初始化项目
This commit is contained in:
10
.dockerignore
Normal file
10
.dockerignore
Normal file
@@ -0,0 +1,10 @@
|
||||
node_modules
|
||||
dist
|
||||
.git
|
||||
.gitignore
|
||||
.env
|
||||
.DS_Store
|
||||
*.log
|
||||
coverage
|
||||
.vscode
|
||||
README.md
|
||||
24
.gitignore
vendored
Normal file
24
.gitignore
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
# Logs
|
||||
logs
|
||||
*.log
|
||||
npm-debug.log*
|
||||
yarn-debug.log*
|
||||
yarn-error.log*
|
||||
pnpm-debug.log*
|
||||
lerna-debug.log*
|
||||
|
||||
node_modules
|
||||
dist
|
||||
dist-ssr
|
||||
*.local
|
||||
|
||||
# Editor directories and files
|
||||
.vscode/*
|
||||
!.vscode/extensions.json
|
||||
.idea
|
||||
.DS_Store
|
||||
*.suo
|
||||
*.ntvs*
|
||||
*.njsproj
|
||||
*.sln
|
||||
*.sw?
|
||||
685
App.tsx
Normal file
685
App.tsx
Normal file
@@ -0,0 +1,685 @@
|
||||
import React, { useState, useEffect, useRef } from 'react';
|
||||
import {
|
||||
Settings as SettingsIcon,
|
||||
MessageSquare,
|
||||
Sparkles,
|
||||
Menu,
|
||||
X,
|
||||
Mic,
|
||||
ImagePlus,
|
||||
Send,
|
||||
Loader2,
|
||||
Volume2,
|
||||
Trash2,
|
||||
Plus,
|
||||
BookOpen,
|
||||
Brain,
|
||||
GraduationCap,
|
||||
Coffee
|
||||
} from 'lucide-react';
|
||||
import ReactMarkdown from 'react-markdown';
|
||||
import { TRANSLATIONS, DEFAULT_LANGUAGE } from './constants';
|
||||
import { AppLanguage, ChatMode, Message, UserSettings, ChatSession, ChatScenario } from './types';
|
||||
import { loadSettings, saveSettings, loadSessions, saveSessions, exportData, importData, clearData } from './services/storage';
|
||||
import { streamChatResponse, transcribeAudio, generateSpeech } from './services/geminiService';
|
||||
import Tools from './components/Tools';
|
||||
|
||||
const App: React.FC = () => {
|
||||
// State
|
||||
const [settings, setSettingsState] = useState<UserSettings>(loadSettings());
|
||||
const [sessions, setSessions] = useState<ChatSession[]>([]);
|
||||
const [currentSessionId, setCurrentSessionId] = useState<string | null>(null);
|
||||
const [input, setInput] = useState('');
|
||||
const [isSidebarOpen, setIsSidebarOpen] = useState(false);
|
||||
const [activeView, setActiveView] = useState<'chat' | 'tools' | 'settings'>('chat');
|
||||
const [isProcessing, setIsProcessing] = useState(false);
|
||||
const [streamingContent, setStreamingContent] = useState(''); // For real-time effect
|
||||
const [attachments, setAttachments] = useState<{mimeType: string, data: string, name?: string}[]>([]);
|
||||
const [isRecording, setIsRecording] = useState(false);
|
||||
|
||||
// Refs
|
||||
const messagesEndRef = useRef<HTMLDivElement>(null);
|
||||
const fileInputRef = useRef<HTMLInputElement>(null);
|
||||
const mediaRecorderRef = useRef<MediaRecorder | null>(null);
|
||||
|
||||
const t = TRANSLATIONS[settings.language] || TRANSLATIONS[DEFAULT_LANGUAGE];
|
||||
|
||||
// Effects
|
||||
useEffect(() => {
|
||||
const loadedSessions = loadSessions();
|
||||
setSessions(loadedSessions);
|
||||
// Don't automatically select session on load, let user choose or start new
|
||||
if (loadedSessions.length > 0 && !currentSessionId) {
|
||||
setCurrentSessionId(loadedSessions[0].id);
|
||||
}
|
||||
}, []);
|
||||
|
||||
useEffect(() => {
|
||||
saveSettings(settings);
|
||||
}, [settings]);
|
||||
|
||||
useEffect(() => {
|
||||
saveSessions(sessions);
|
||||
}, [sessions]);
|
||||
|
||||
useEffect(() => {
|
||||
messagesEndRef.current?.scrollIntoView({ behavior: 'smooth' });
|
||||
}, [sessions, streamingContent, currentSessionId]);
|
||||
|
||||
// Helpers
|
||||
const getCurrentSession = () => sessions.find(s => s.id === currentSessionId);
|
||||
|
||||
const handleNewChatClick = () => {
|
||||
setCurrentSessionId(null);
|
||||
setActiveView('chat');
|
||||
setIsSidebarOpen(false);
|
||||
};
|
||||
|
||||
const startScenarioSession = (scenario: ChatScenario) => {
|
||||
const scenarioConfig = t.scenarios[scenario];
|
||||
const initialGreeting = scenarioConfig.greeting;
|
||||
|
||||
const newSession: ChatSession = {
|
||||
id: Date.now().toString(),
|
||||
title: scenarioConfig.title,
|
||||
messages: [
|
||||
{
|
||||
id: Date.now().toString(),
|
||||
role: 'model',
|
||||
content: initialGreeting,
|
||||
timestamp: Date.now()
|
||||
}
|
||||
],
|
||||
mode: ChatMode.STANDARD, // Default mode, can be changed
|
||||
scenario: scenario,
|
||||
createdAt: Date.now()
|
||||
};
|
||||
|
||||
setSessions([newSession, ...sessions]);
|
||||
setCurrentSessionId(newSession.id);
|
||||
};
|
||||
|
||||
const updateCurrentSession = (updater: (session: ChatSession) => ChatSession) => {
|
||||
setSessions(prev => prev.map(s => s.id === currentSessionId ? updater(s) : s));
|
||||
};
|
||||
|
||||
const getScenarioIcon = (scenario?: ChatScenario) => {
|
||||
switch (scenario) {
|
||||
case ChatScenario.READING: return <BookOpen size={18} />;
|
||||
case ChatScenario.CONCEPT: return <Brain size={18} />;
|
||||
case ChatScenario.RESEARCH: return <GraduationCap size={18} />;
|
||||
case ChatScenario.GENERAL:
|
||||
default: return <Coffee size={18} />;
|
||||
}
|
||||
};
|
||||
|
||||
// Handlers
|
||||
const handleSendMessage = async () => {
|
||||
if ((!input.trim() && attachments.length === 0) || isProcessing || !currentSessionId) return;
|
||||
|
||||
const session = getCurrentSession();
|
||||
if (!session) return;
|
||||
|
||||
const userMsg: Message = {
|
||||
id: Date.now().toString(),
|
||||
role: 'user',
|
||||
content: input,
|
||||
timestamp: Date.now(),
|
||||
attachments: attachments.map(a => ({ type: 'image', ...a })) // Simplify type for now
|
||||
};
|
||||
|
||||
updateCurrentSession(s => ({ ...s, messages: [...s.messages, userMsg] }));
|
||||
setInput('');
|
||||
setAttachments([]);
|
||||
setIsProcessing(true);
|
||||
setStreamingContent('');
|
||||
|
||||
try {
|
||||
let fullResponse = '';
|
||||
let groundingData: any = null;
|
||||
|
||||
await streamChatResponse(
|
||||
[...session.messages, userMsg],
|
||||
userMsg.content,
|
||||
session.mode,
|
||||
settings.language,
|
||||
session.scenario || ChatScenario.GENERAL,
|
||||
userMsg.attachments as any,
|
||||
(text, grounding) => {
|
||||
fullResponse += text;
|
||||
setStreamingContent(fullResponse);
|
||||
if (grounding) groundingData = grounding;
|
||||
}
|
||||
);
|
||||
|
||||
const modelMsg: Message = {
|
||||
id: (Date.now() + 1).toString(),
|
||||
role: 'model',
|
||||
content: fullResponse,
|
||||
timestamp: Date.now(),
|
||||
groundingMetadata: groundingData
|
||||
};
|
||||
|
||||
updateCurrentSession(s => ({ ...s, messages: [...s.messages, modelMsg] }));
|
||||
|
||||
// Auto-update title if it's the first USER interaction (second message total due to greeting)
|
||||
if (session.messages.length === 1) { // 1 existing message (the greeting)
|
||||
const newTitle = userMsg.content.slice(0, 30) || t.newChat;
|
||||
updateCurrentSession(s => ({ ...s, title: newTitle }));
|
||||
}
|
||||
|
||||
} catch (err) {
|
||||
console.error(err);
|
||||
const errorMsg: Message = {
|
||||
id: Date.now().toString(),
|
||||
role: 'model',
|
||||
content: "Error: Could not generate response. Please check if API key is configured in environment.",
|
||||
timestamp: Date.now()
|
||||
};
|
||||
updateCurrentSession(s => ({ ...s, messages: [...s.messages, errorMsg] }));
|
||||
} finally {
|
||||
setIsProcessing(false);
|
||||
setStreamingContent('');
|
||||
}
|
||||
};
|
||||
|
||||
const handleFileUpload = (e: React.ChangeEvent<HTMLInputElement>) => {
|
||||
const file = e.target.files?.[0];
|
||||
if (!file) return;
|
||||
|
||||
const reader = new FileReader();
|
||||
reader.onloadend = () => {
|
||||
const base64String = reader.result as string;
|
||||
const base64Data = base64String.split(',')[1];
|
||||
setAttachments(prev => [...prev, {
|
||||
mimeType: file.type,
|
||||
data: base64Data,
|
||||
name: file.name
|
||||
}]);
|
||||
};
|
||||
reader.readAsDataURL(file);
|
||||
e.target.value = ''; // Reset input
|
||||
};
|
||||
|
||||
const handleRecordAudio = async () => {
|
||||
if (isRecording) {
|
||||
mediaRecorderRef.current?.stop();
|
||||
setIsRecording(false);
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
|
||||
const mediaRecorder = new MediaRecorder(stream);
|
||||
mediaRecorderRef.current = mediaRecorder;
|
||||
const chunks: BlobPart[] = [];
|
||||
|
||||
mediaRecorder.ondataavailable = (e) => chunks.push(e.data);
|
||||
mediaRecorder.onstop = async () => {
|
||||
const blob = new Blob(chunks, { type: 'audio/webm' }); // Chrome default
|
||||
// Convert blob to base64
|
||||
const reader = new FileReader();
|
||||
reader.onloadend = async () => {
|
||||
const base64 = (reader.result as string).split(',')[1];
|
||||
|
||||
setIsProcessing(true); // Re-use processing state for spinner on button
|
||||
try {
|
||||
const text = await transcribeAudio(base64, 'audio/webm');
|
||||
setInput(prev => prev + " " + text);
|
||||
} catch (e) {
|
||||
console.error(e);
|
||||
alert("Transcription failed");
|
||||
} finally {
|
||||
setIsProcessing(false);
|
||||
}
|
||||
};
|
||||
reader.readAsDataURL(blob);
|
||||
stream.getTracks().forEach(track => track.stop());
|
||||
};
|
||||
|
||||
mediaRecorder.start();
|
||||
setIsRecording(true);
|
||||
} catch (e) {
|
||||
console.error("Mic error", e);
|
||||
alert("Microphone access denied or not available.");
|
||||
}
|
||||
};
|
||||
|
||||
const playTTS = async (text: string) => {
|
||||
try {
|
||||
const buffer = await generateSpeech(text);
|
||||
const ctx = new (window.AudioContext || (window as any).webkitAudioContext)();
|
||||
const source = ctx.createBufferSource();
|
||||
source.buffer = buffer;
|
||||
source.connect(ctx.destination);
|
||||
source.start(0);
|
||||
} catch (e) {
|
||||
console.error("TTS Error", e);
|
||||
}
|
||||
};
|
||||
|
||||
const handleImport = async (e: React.ChangeEvent<HTMLInputElement>) => {
|
||||
const file = e.target.files?.[0];
|
||||
if (file) {
|
||||
const success = await importData(file);
|
||||
if (success) {
|
||||
setSettingsState(loadSettings());
|
||||
setSessions(loadSessions());
|
||||
alert("Import successful!");
|
||||
} else {
|
||||
alert("Import failed.");
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const currentSession = getCurrentSession();
|
||||
|
||||
return (
|
||||
<div className="flex h-screen bg-slate-50 overflow-hidden">
|
||||
{/* Sidebar - Mobile Overlay */}
|
||||
{isSidebarOpen && (
|
||||
<div className="fixed inset-0 bg-black/50 z-20 md:hidden" onClick={() => setIsSidebarOpen(false)} />
|
||||
)}
|
||||
|
||||
{/* Sidebar */}
|
||||
<aside className={`
|
||||
fixed inset-y-0 left-0 z-30 w-64 bg-white border-r border-slate-200 transform transition-transform duration-300 ease-in-out
|
||||
md:relative md:translate-x-0
|
||||
${isSidebarOpen ? 'translate-x-0' : '-translate-x-full'}
|
||||
`}>
|
||||
<div className="flex flex-col h-full">
|
||||
<div className="p-4 border-b border-slate-100 flex items-center justify-between">
|
||||
<h1 className="font-bold text-xl text-blue-600 flex items-center gap-2">
|
||||
<span className="bg-blue-100 p-1.5 rounded-lg"><Sparkles size={18}/></span>
|
||||
{t.appName}
|
||||
</h1>
|
||||
<button onClick={() => setIsSidebarOpen(false)} className="md:hidden text-slate-500">
|
||||
<X size={20} />
|
||||
</button>
|
||||
</div>
|
||||
|
||||
<div className="p-4">
|
||||
<button
|
||||
onClick={handleNewChatClick}
|
||||
className="w-full flex items-center justify-center gap-2 bg-blue-600 text-white py-2.5 rounded-xl hover:bg-blue-700 transition shadow-sm font-medium"
|
||||
>
|
||||
<Plus size={18} />
|
||||
{t.newChat}
|
||||
</button>
|
||||
</div>
|
||||
|
||||
<div className="flex-1 overflow-y-auto px-4 space-y-1">
|
||||
<div className="text-xs font-semibold text-slate-400 uppercase tracking-wider mb-2 mt-2">History</div>
|
||||
{sessions.map(s => (
|
||||
<button
|
||||
key={s.id}
|
||||
onClick={() => {
|
||||
setCurrentSessionId(s.id);
|
||||
setActiveView('chat');
|
||||
setIsSidebarOpen(false);
|
||||
}}
|
||||
className={`w-full text-left p-3 rounded-lg text-sm truncate transition flex items-center gap-2 ${currentSessionId === s.id && activeView === 'chat' ? 'bg-blue-50 text-blue-700 font-medium' : 'text-slate-600 hover:bg-slate-50'}`}
|
||||
>
|
||||
<span className="opacity-70">{getScenarioIcon(s.scenario)}</span>
|
||||
<span className="truncate">{s.title}</span>
|
||||
</button>
|
||||
))}
|
||||
</div>
|
||||
|
||||
<div className="p-4 border-t border-slate-100 space-y-1">
|
||||
<button
|
||||
onClick={() => setActiveView('tools')}
|
||||
className={`w-full flex items-center gap-3 p-3 rounded-lg text-sm font-medium transition ${activeView === 'tools' ? 'bg-slate-100 text-slate-900' : 'text-slate-600 hover:bg-slate-50'}`}
|
||||
>
|
||||
<ImagePlus size={18} />
|
||||
{t.tools}
|
||||
</button>
|
||||
<button
|
||||
onClick={() => setActiveView('settings')}
|
||||
className={`w-full flex items-center gap-3 p-3 rounded-lg text-sm font-medium transition ${activeView === 'settings' ? 'bg-slate-100 text-slate-900' : 'text-slate-600 hover:bg-slate-50'}`}
|
||||
>
|
||||
<SettingsIcon size={18} />
|
||||
{t.settings}
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
</aside>
|
||||
|
||||
{/* Main Content */}
|
||||
<main className="flex-1 flex flex-col h-full w-full relative">
|
||||
{/* Header */}
|
||||
<header className="h-16 bg-white border-b border-slate-100 flex items-center px-4 justify-between shrink-0">
|
||||
<div className="flex items-center gap-2">
|
||||
<button onClick={() => setIsSidebarOpen(true)} className="md:hidden p-2 text-slate-600">
|
||||
<Menu size={24} />
|
||||
</button>
|
||||
{activeView === 'chat' && currentSession && (
|
||||
<div className="flex items-center gap-2 text-slate-700 font-medium">
|
||||
<span className="text-blue-600 bg-blue-50 p-1 rounded-md">{getScenarioIcon(currentSession.scenario)}</span>
|
||||
<span className="hidden sm:inline">{t.scenarios[currentSession.scenario || ChatScenario.GENERAL].title}</span>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
|
||||
{/* Mode Switcher (Only visible in chat) */}
|
||||
{activeView === 'chat' && currentSession && (
|
||||
<div className="flex items-center space-x-2 bg-slate-100 p-1 rounded-lg">
|
||||
<button
|
||||
onClick={() => updateCurrentSession(s => ({...s, mode: ChatMode.STANDARD}))}
|
||||
className={`px-3 py-1 text-xs font-medium rounded-md transition ${currentSession.mode === ChatMode.STANDARD ? 'bg-white shadow text-blue-600' : 'text-slate-500'}`}
|
||||
>
|
||||
Search
|
||||
</button>
|
||||
<button
|
||||
onClick={() => updateCurrentSession(s => ({...s, mode: ChatMode.DEEP}))}
|
||||
className={`px-3 py-1 text-xs font-medium rounded-md transition ${currentSession.mode === ChatMode.DEEP ? 'bg-white shadow text-purple-600' : 'text-slate-500'}`}
|
||||
>
|
||||
Reasoning
|
||||
</button>
|
||||
<button
|
||||
onClick={() => updateCurrentSession(s => ({...s, mode: ChatMode.FAST}))}
|
||||
className={`px-3 py-1 text-xs font-medium rounded-md transition ${currentSession.mode === ChatMode.FAST ? 'bg-white shadow text-green-600' : 'text-slate-500'}`}
|
||||
>
|
||||
Fast
|
||||
</button>
|
||||
</div>
|
||||
)}
|
||||
</header>
|
||||
|
||||
{/* View Content */}
|
||||
<div className="flex-1 overflow-hidden relative">
|
||||
|
||||
{/* Chat View */}
|
||||
{activeView === 'chat' && (
|
||||
<div className="flex flex-col h-full">
|
||||
{!currentSession ? (
|
||||
<div className="flex-1 flex flex-col items-center justify-center bg-slate-50 p-4 overflow-y-auto">
|
||||
<div className="max-w-4xl w-full text-center space-y-8">
|
||||
<div className="space-y-2">
|
||||
<div className="bg-blue-100 w-16 h-16 rounded-2xl flex items-center justify-center mx-auto text-blue-600 mb-4">
|
||||
<Sparkles size={32} />
|
||||
</div>
|
||||
<h2 className="text-2xl font-bold text-slate-800">{t.welcome}</h2>
|
||||
<p className="text-slate-500">{t.tagline}</p>
|
||||
</div>
|
||||
|
||||
<div className="grid grid-cols-1 sm:grid-cols-2 gap-4 text-left">
|
||||
{/* Daily Q&A */}
|
||||
<button
|
||||
onClick={() => startScenarioSession(ChatScenario.GENERAL)}
|
||||
className="bg-white p-6 rounded-2xl shadow-sm border border-slate-100 hover:border-blue-300 hover:shadow-md transition group"
|
||||
>
|
||||
<div className="flex items-start justify-between mb-4">
|
||||
<div className="bg-orange-100 p-3 rounded-xl text-orange-600 group-hover:bg-orange-600 group-hover:text-white transition">
|
||||
<Coffee size={24} />
|
||||
</div>
|
||||
<span className="text-xs font-medium bg-slate-100 text-slate-600 px-2 py-1 rounded-full">Basics</span>
|
||||
</div>
|
||||
<h3 className="font-bold text-lg text-slate-800 mb-2">{t.scenarios.general.title}</h3>
|
||||
<p className="text-sm text-slate-500 leading-relaxed">{t.scenarios.general.desc}</p>
|
||||
</button>
|
||||
|
||||
{/* Classic Reading */}
|
||||
<button
|
||||
onClick={() => startScenarioSession(ChatScenario.READING)}
|
||||
className="bg-white p-6 rounded-2xl shadow-sm border border-slate-100 hover:border-blue-300 hover:shadow-md transition group"
|
||||
>
|
||||
<div className="flex items-start justify-between mb-4">
|
||||
<div className="bg-purple-100 p-3 rounded-xl text-purple-600 group-hover:bg-purple-600 group-hover:text-white transition">
|
||||
<BookOpen size={24} />
|
||||
</div>
|
||||
<span className="text-xs font-medium bg-slate-100 text-slate-600 px-2 py-1 rounded-full">Theory</span>
|
||||
</div>
|
||||
<h3 className="font-bold text-lg text-slate-800 mb-2">{t.scenarios.reading.title}</h3>
|
||||
<p className="text-sm text-slate-500 leading-relaxed">{t.scenarios.reading.desc}</p>
|
||||
</button>
|
||||
|
||||
{/* Concept */}
|
||||
<button
|
||||
onClick={() => startScenarioSession(ChatScenario.CONCEPT)}
|
||||
className="bg-white p-6 rounded-2xl shadow-sm border border-slate-100 hover:border-blue-300 hover:shadow-md transition group"
|
||||
>
|
||||
<div className="flex items-start justify-between mb-4">
|
||||
<div className="bg-blue-100 p-3 rounded-xl text-blue-600 group-hover:bg-blue-600 group-hover:text-white transition">
|
||||
<Brain size={24} />
|
||||
</div>
|
||||
<span className="text-xs font-medium bg-slate-100 text-slate-600 px-2 py-1 rounded-full">Deep Dive</span>
|
||||
</div>
|
||||
<h3 className="font-bold text-lg text-slate-800 mb-2">{t.scenarios.concept.title}</h3>
|
||||
<p className="text-sm text-slate-500 leading-relaxed">{t.scenarios.concept.desc}</p>
|
||||
</button>
|
||||
|
||||
{/* Research */}
|
||||
<button
|
||||
onClick={() => startScenarioSession(ChatScenario.RESEARCH)}
|
||||
className="bg-white p-6 rounded-2xl shadow-sm border border-slate-100 hover:border-blue-300 hover:shadow-md transition group"
|
||||
>
|
||||
<div className="flex items-start justify-between mb-4">
|
||||
<div className="bg-green-100 p-3 rounded-xl text-green-600 group-hover:bg-green-600 group-hover:text-white transition">
|
||||
<GraduationCap size={24} />
|
||||
</div>
|
||||
<span className="text-xs font-medium bg-slate-100 text-slate-600 px-2 py-1 rounded-full">Advanced</span>
|
||||
</div>
|
||||
<h3 className="font-bold text-lg text-slate-800 mb-2">{t.scenarios.research.title}</h3>
|
||||
<p className="text-sm text-slate-500 leading-relaxed">{t.scenarios.research.desc}</p>
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
) : (
|
||||
<>
|
||||
<div className="flex-1 overflow-y-auto p-4 space-y-6">
|
||||
{currentSession.messages.map((msg, idx) => (
|
||||
<div key={msg.id} className={`flex ${msg.role === 'user' ? 'justify-end' : 'justify-start'}`}>
|
||||
{msg.role === 'model' && (
|
||||
<div className="mr-3 flex-shrink-0 mt-1">
|
||||
<div className="w-8 h-8 rounded-full bg-blue-100 flex items-center justify-center text-blue-600">
|
||||
{getScenarioIcon(currentSession.scenario)}
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
<div className={`max-w-[85%] md:max-w-[70%] space-y-1`}>
|
||||
{/* Attachments */}
|
||||
{msg.attachments?.map((att, i) => (
|
||||
<div key={i} className="mb-2">
|
||||
<img src={`data:${att.mimeType};base64,${att.data}`} alt="attachment" className="max-h-48 rounded-lg shadow-sm border border-slate-100" />
|
||||
</div>
|
||||
))}
|
||||
|
||||
{/* Bubble */}
|
||||
<div className={`p-4 rounded-2xl shadow-sm text-sm md:text-base leading-relaxed ${
|
||||
msg.role === 'user'
|
||||
? 'bg-blue-600 text-white rounded-tr-none'
|
||||
: 'bg-white border border-slate-100 text-slate-800 rounded-tl-none'
|
||||
}`}>
|
||||
<ReactMarkdown className="prose prose-sm max-w-none dark:prose-invert">
|
||||
{msg.content}
|
||||
</ReactMarkdown>
|
||||
</div>
|
||||
|
||||
{/* Metadata / Actions */}
|
||||
<div className="flex items-center gap-2 text-xs text-slate-400 px-1">
|
||||
<span>{new Date(msg.timestamp).toLocaleTimeString()}</span>
|
||||
{msg.role === 'model' && (
|
||||
<>
|
||||
<button onClick={() => playTTS(msg.content)} className="hover:text-blue-500"><Volume2 size={14}/></button>
|
||||
{msg.groundingMetadata?.groundingChunks && msg.groundingMetadata.groundingChunks.length > 0 && (
|
||||
<span className="flex items-center gap-1 text-green-600">
|
||||
<span className="w-1.5 h-1.5 rounded-full bg-green-500"></span>
|
||||
{t.searchSources}
|
||||
</span>
|
||||
)}
|
||||
</>
|
||||
)}
|
||||
</div>
|
||||
|
||||
{/* Sources List */}
|
||||
{msg.role === 'model' && msg.groundingMetadata?.groundingChunks && (
|
||||
<div className="mt-2 text-xs bg-slate-50 p-2 rounded-lg border border-slate-100">
|
||||
<p className="font-medium mb-1">{t.searchSources}:</p>
|
||||
<ul className="list-disc pl-4 space-y-1">
|
||||
{msg.groundingMetadata.groundingChunks.map((chunk, i) => chunk.web && (
|
||||
<li key={i}>
|
||||
<a href={chunk.web.uri} target="_blank" rel="noreferrer" className="text-blue-500 hover:underline truncate block max-w-xs">
|
||||
{chunk.web.title}
|
||||
</a>
|
||||
</li>
|
||||
))}
|
||||
</ul>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
))}
|
||||
|
||||
{/* Streaming Pending State */}
|
||||
{isProcessing && streamingContent && (
|
||||
<div className="flex justify-start">
|
||||
<div className="mr-3 flex-shrink-0 mt-1">
|
||||
<div className="w-8 h-8 rounded-full bg-blue-100 flex items-center justify-center text-blue-600">
|
||||
{getScenarioIcon(currentSession.scenario)}
|
||||
</div>
|
||||
</div>
|
||||
<div className="max-w-[85%] md:max-w-[70%] bg-white border border-slate-100 p-4 rounded-2xl rounded-tl-none shadow-sm">
|
||||
<ReactMarkdown className="prose prose-sm max-w-none text-slate-800">
|
||||
{streamingContent}
|
||||
</ReactMarkdown>
|
||||
<div className="mt-2 flex items-center gap-2 text-xs text-blue-500 animate-pulse">
|
||||
<Loader2 size={12} className="animate-spin" />
|
||||
{currentSession?.mode === ChatMode.DEEP ? t.thinking : t.generating}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
<div ref={messagesEndRef} />
|
||||
</div>
|
||||
|
||||
{/* Input Area */}
|
||||
<div className="p-4 bg-white border-t border-slate-100">
|
||||
<div className="max-w-4xl mx-auto flex flex-col gap-2">
|
||||
{attachments.length > 0 && (
|
||||
<div className="flex gap-2 overflow-x-auto pb-2">
|
||||
{attachments.map((a, i) => (
|
||||
<div key={i} className="relative group">
|
||||
<img src={`data:${a.mimeType};base64,${a.data}`} className="h-16 w-16 object-cover rounded-lg border border-slate-200" alt="preview" />
|
||||
<button
|
||||
onClick={() => setAttachments(prev => prev.filter((_, idx) => idx !== i))}
|
||||
className="absolute -top-1 -right-1 bg-red-500 text-white rounded-full p-0.5 shadow-sm opacity-0 group-hover:opacity-100 transition"
|
||||
>
|
||||
<X size={12} />
|
||||
</button>
|
||||
</div>
|
||||
))}
|
||||
</div>
|
||||
)}
|
||||
<div className="flex items-end gap-2 bg-slate-50 p-2 rounded-2xl border border-slate-200 focus-within:ring-2 focus-within:ring-blue-100 transition">
|
||||
<input type="file" ref={fileInputRef} onChange={handleFileUpload} accept="image/*" className="hidden" />
|
||||
<button
|
||||
onClick={() => fileInputRef.current?.click()}
|
||||
className="p-2 text-slate-400 hover:text-blue-500 hover:bg-white rounded-xl transition"
|
||||
title={t.uploadImage}
|
||||
>
|
||||
<ImagePlus size={20} />
|
||||
</button>
|
||||
|
||||
<textarea
|
||||
value={input}
|
||||
onChange={e => setInput(e.target.value)}
|
||||
onKeyDown={e => {
|
||||
if (e.key === 'Enter' && !e.shiftKey) {
|
||||
e.preventDefault();
|
||||
handleSendMessage();
|
||||
}
|
||||
}}
|
||||
placeholder={t.inputPlaceholder}
|
||||
className="flex-1 bg-transparent border-none focus:ring-0 resize-none max-h-32 py-2 text-slate-700 placeholder:text-slate-400"
|
||||
rows={1}
|
||||
/>
|
||||
|
||||
<button
|
||||
onClick={handleRecordAudio}
|
||||
className={`p-2 rounded-xl transition ${isRecording ? 'bg-red-100 text-red-500 animate-pulse' : 'text-slate-400 hover:text-blue-500 hover:bg-white'}`}
|
||||
title={t.recordAudio}
|
||||
>
|
||||
<Mic size={20} />
|
||||
</button>
|
||||
|
||||
<button
|
||||
onClick={handleSendMessage}
|
||||
disabled={(!input.trim() && attachments.length === 0) || isProcessing}
|
||||
className="p-2 bg-blue-600 text-white rounded-xl hover:bg-blue-700 disabled:opacity-50 disabled:hover:bg-blue-600 transition shadow-sm"
|
||||
>
|
||||
{isProcessing ? <Loader2 size={20} className="animate-spin"/> : <Send size={20} />}
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Tools View */}
|
||||
{activeView === 'tools' && (
|
||||
<div className="h-full overflow-y-auto bg-slate-50/50">
|
||||
<Tools language={settings.language} />
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Settings View */}
|
||||
{activeView === 'settings' && (
|
||||
<div className="h-full overflow-y-auto p-4 md:p-8">
|
||||
<div className="max-w-2xl mx-auto space-y-8">
|
||||
<div className="bg-white p-6 rounded-2xl shadow-sm border border-slate-100">
|
||||
<h2 className="text-lg font-bold mb-4 flex items-center gap-2">
|
||||
<SettingsIcon size={20} className="text-slate-400" />
|
||||
{t.settings}
|
||||
</h2>
|
||||
|
||||
{/* Language */}
|
||||
<div className="mb-6">
|
||||
<label className="block text-sm font-medium text-slate-700 mb-1">Language</label>
|
||||
<select
|
||||
value={settings.language}
|
||||
onChange={(e) => setSettingsState(s => ({...s, language: e.target.value as AppLanguage}))}
|
||||
className="w-full p-3 border border-slate-200 bg-white rounded-xl focus:ring-2 focus:ring-blue-500 focus:outline-none"
|
||||
>
|
||||
<option value={AppLanguage.ZH_CN}>简体中文</option>
|
||||
<option value={AppLanguage.ZH_TW}>繁體中文</option>
|
||||
<option value={AppLanguage.EN}>English</option>
|
||||
<option value={AppLanguage.JA}>日本語</option>
|
||||
</select>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Data Management */}
|
||||
<div className="bg-white p-6 rounded-2xl shadow-sm border border-slate-100">
|
||||
<h2 className="text-lg font-bold mb-4">{t.backupRestore}</h2>
|
||||
<div className="flex flex-col sm:flex-row gap-4">
|
||||
<button onClick={exportData} className="px-4 py-2 bg-slate-100 hover:bg-slate-200 text-slate-700 rounded-lg text-sm font-medium transition">
|
||||
{t.exportData}
|
||||
</button>
|
||||
<label className="px-4 py-2 bg-slate-100 hover:bg-slate-200 text-slate-700 rounded-lg text-sm font-medium transition cursor-pointer text-center">
|
||||
{t.importData}
|
||||
<input type="file" onChange={handleImport} accept=".json" className="hidden" />
|
||||
</label>
|
||||
<button onClick={() => {
|
||||
if (window.confirm("Are you sure? This will delete all history.")) {
|
||||
clearData();
|
||||
window.location.reload();
|
||||
}
|
||||
}} className="px-4 py-2 bg-red-50 hover:bg-red-100 text-red-600 rounded-lg text-sm font-medium transition ml-auto">
|
||||
{t.clearData}
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
</main>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
export default App;
|
||||
39
Dockerfile
Normal file
39
Dockerfile
Normal file
@@ -0,0 +1,39 @@
|
||||
# 阶段 1: 构建 (Build Stage)
|
||||
FROM node:20-alpine as builder
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# 复制依赖定义文件
|
||||
COPY package.json ./
|
||||
|
||||
# 安装依赖
|
||||
# 注意:如果您的项目中生成了 package-lock.json,最好也复制进去以锁定版本
|
||||
RUN npm install
|
||||
|
||||
# 复制项目源代码
|
||||
COPY . .
|
||||
|
||||
# 接收构建参数 API_KEY
|
||||
# 因为 vite.config.ts 中配置了 define: { 'process.env.API_KEY': ... }
|
||||
# 所以必须在 npm run build 时提供这个变量,否则构建出的代码中 key 会是 undefined
|
||||
ARG API_KEY
|
||||
ENV API_KEY=$API_KEY
|
||||
|
||||
# 执行构建
|
||||
RUN npm run build
|
||||
|
||||
# 阶段 2: 生产环境服务 (Production Stage)
|
||||
FROM nginx:alpine
|
||||
|
||||
# 从构建阶段复制构建产物 (dist 目录) 到 Nginx 默认目录
|
||||
COPY --from=builder /app/dist /usr/share/nginx/html
|
||||
|
||||
# 配置 Nginx 以支持 SPA (单页应用) 路由
|
||||
# 这行命令修改默认配置:当找不到文件时(404),重定向回 index.html,交给 React Router 处理
|
||||
RUN sed -i 's/location \/ {/location \/ { try_files $uri $uri\/ \/index.html;/' /etc/nginx/conf.d/default.conf
|
||||
|
||||
# 暴露 80 端口
|
||||
EXPOSE 80
|
||||
|
||||
# 启动 Nginx
|
||||
CMD ["nginx", "-g", "daemon off;"]
|
||||
21
README.md
21
README.md
@@ -1,3 +1,20 @@
|
||||
# ai-app-skg
|
||||
<div align="center">
|
||||
<img width="1200" height="475" alt="GHBanner" src="https://github.com/user-attachments/assets/0aa67016-6eaf-458a-adb2-6e31a0763ed6" />
|
||||
</div>
|
||||
|
||||
使用 Google AI Studio 构建的社会学学习应用——SocioPal - Social Learning Tool
|
||||
# Run and deploy your AI Studio app
|
||||
|
||||
This contains everything you need to run your app locally.
|
||||
|
||||
View your app in AI Studio: https://ai.studio/apps/drive/10M3hDCXCBTcz9AYqzRIW22iDVBmND_DH
|
||||
|
||||
## Run Locally
|
||||
|
||||
**Prerequisites:** Node.js
|
||||
|
||||
|
||||
1. Install dependencies:
|
||||
`npm install`
|
||||
2. Set the `GEMINI_API_KEY` in [.env.local](.env.local) to your Gemini API key
|
||||
3. Run the app:
|
||||
`npm run dev`
|
||||
|
||||
147
components/Tools.tsx
Normal file
147
components/Tools.tsx
Normal file
@@ -0,0 +1,147 @@
|
||||
import React, { useState } from 'react';
|
||||
import { TRANSLATIONS } from '../constants';
|
||||
import { AppLanguage } from '../types';
|
||||
import { generateImage, generateVideo } from '../services/geminiService';
|
||||
import { Loader2, Image as ImageIcon, Video, Download } from 'lucide-react';
|
||||
|
||||
interface ToolsProps {
|
||||
language: AppLanguage;
|
||||
}
|
||||
|
||||
const Tools: React.FC<ToolsProps> = ({ language }) => {
|
||||
const t = TRANSLATIONS[language];
|
||||
const [activeTab, setActiveTab] = useState<'image' | 'video'>('image');
|
||||
const [prompt, setPrompt] = useState('');
|
||||
const [loading, setLoading] = useState(false);
|
||||
const [resultUrl, setResultUrl] = useState<string | null>(null);
|
||||
const [imageSize, setImageSize] = useState<"1K" | "2K" | "4K">("1K");
|
||||
const [videoRatio, setVideoRatio] = useState<"16:9" | "9:16">("16:9");
|
||||
const [error, setError] = useState<string | null>(null);
|
||||
|
||||
const handleGenerate = async () => {
|
||||
if (!prompt.trim()) return;
|
||||
|
||||
setLoading(true);
|
||||
setError(null);
|
||||
setResultUrl(null);
|
||||
|
||||
try {
|
||||
if (activeTab === 'image') {
|
||||
const images = await generateImage(prompt, imageSize);
|
||||
if (images.length > 0) setResultUrl(images[0]);
|
||||
} else {
|
||||
const video = await generateVideo(prompt, videoRatio);
|
||||
setResultUrl(video);
|
||||
}
|
||||
} catch (e: any) {
|
||||
setError(e.message || "Generation failed");
|
||||
} finally {
|
||||
setLoading(false);
|
||||
}
|
||||
};
|
||||
|
||||
return (
|
||||
<div className="max-w-4xl mx-auto p-4 space-y-6">
|
||||
<div className="flex space-x-2 bg-slate-200 p-1 rounded-lg w-fit">
|
||||
<button
|
||||
onClick={() => setActiveTab('image')}
|
||||
className={`flex items-center space-x-2 px-4 py-2 rounded-md transition ${activeTab === 'image' ? 'bg-white shadow text-blue-600' : 'text-slate-600'}`}
|
||||
>
|
||||
<ImageIcon size={18} />
|
||||
<span>{t.imageGen}</span>
|
||||
</button>
|
||||
<button
|
||||
onClick={() => setActiveTab('video')}
|
||||
className={`flex items-center space-x-2 px-4 py-2 rounded-md transition ${activeTab === 'video' ? 'bg-white shadow text-purple-600' : 'text-slate-600'}`}
|
||||
>
|
||||
<Video size={18} />
|
||||
<span>{t.videoGen}</span>
|
||||
</button>
|
||||
</div>
|
||||
|
||||
<div className="bg-white rounded-2xl shadow-sm border border-slate-100 p-6">
|
||||
<textarea
|
||||
className="w-full p-4 border border-slate-200 rounded-xl focus:ring-2 focus:ring-blue-500 focus:outline-none resize-none"
|
||||
rows={4}
|
||||
placeholder={activeTab === 'image' ? t.imagePromptPlaceholder : t.videoPromptPlaceholder}
|
||||
value={prompt}
|
||||
onChange={(e) => setPrompt(e.target.value)}
|
||||
/>
|
||||
|
||||
<div className="mt-4 flex flex-wrap gap-4 items-center justify-between">
|
||||
<div className="flex gap-4">
|
||||
{activeTab === 'image' ? (
|
||||
<div className="flex items-center space-x-2">
|
||||
<span className="text-sm text-slate-500">{t.imageSize}:</span>
|
||||
<select
|
||||
value={imageSize}
|
||||
onChange={(e) => setImageSize(e.target.value as any)}
|
||||
className="p-2 bg-slate-50 border border-slate-200 rounded-lg text-sm"
|
||||
>
|
||||
<option value="1K">1K</option>
|
||||
<option value="2K">2K</option>
|
||||
<option value="4K">4K</option>
|
||||
</select>
|
||||
</div>
|
||||
) : (
|
||||
<div className="flex items-center space-x-2">
|
||||
<span className="text-sm text-slate-500">{t.aspectRatio}:</span>
|
||||
<select
|
||||
value={videoRatio}
|
||||
onChange={(e) => setVideoRatio(e.target.value as any)}
|
||||
className="p-2 bg-slate-50 border border-slate-200 rounded-lg text-sm"
|
||||
>
|
||||
<option value="16:9">{t.landscape}</option>
|
||||
<option value="9:16">{t.portrait}</option>
|
||||
</select>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
|
||||
<button
|
||||
onClick={handleGenerate}
|
||||
disabled={loading || !prompt.trim()}
|
||||
className="px-6 py-2 bg-blue-600 text-white rounded-xl hover:bg-blue-700 disabled:opacity-50 flex items-center space-x-2"
|
||||
>
|
||||
{loading ? <Loader2 className="animate-spin" size={18} /> : null}
|
||||
<span>{loading ? t.generating : t.generate}</span>
|
||||
</button>
|
||||
</div>
|
||||
|
||||
{error && (
|
||||
<div className="mt-4 p-3 bg-red-50 text-red-600 text-sm rounded-lg">
|
||||
{error}
|
||||
</div>
|
||||
)}
|
||||
|
||||
{loading && activeTab === 'video' && (
|
||||
<div className="mt-4 text-center text-sm text-slate-500 animate-pulse">
|
||||
{t.videoDuration}
|
||||
</div>
|
||||
)}
|
||||
|
||||
{resultUrl && (
|
||||
<div className="mt-8 border-t pt-6">
|
||||
<div className="relative rounded-xl overflow-hidden bg-black flex justify-center items-center">
|
||||
{activeTab === 'image' ? (
|
||||
<img src={resultUrl} alt="Generated" className="max-h-[500px] w-auto object-contain" />
|
||||
) : (
|
||||
<video src={resultUrl} controls autoPlay loop className="max-h-[500px] w-auto" />
|
||||
)}
|
||||
<a
|
||||
href={resultUrl}
|
||||
download={`generated-${activeTab}-${Date.now()}.${activeTab === 'image' ? 'png' : 'mp4'}`}
|
||||
className="absolute top-4 right-4 bg-white/90 p-2 rounded-full shadow hover:bg-white text-slate-800"
|
||||
title={t.download}
|
||||
>
|
||||
<Download size={20} />
|
||||
</a>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
export default Tools;
|
||||
182
constants.ts
Normal file
182
constants.ts
Normal file
@@ -0,0 +1,182 @@
|
||||
import { AppLanguage, ChatMode } from './types';
|
||||
|
||||
export const DEFAULT_LANGUAGE = AppLanguage.ZH_CN;
|
||||
|
||||
export const TRANSLATIONS = {
|
||||
[AppLanguage.ZH_CN]: {
|
||||
appName: "社学搭子",
|
||||
tagline: "您的AI社会学助教",
|
||||
newChat: "新建会话",
|
||||
settings: "设置",
|
||||
inputPlaceholder: "输入您的问题...",
|
||||
modeStandard: "标准搜索 (实时联网)",
|
||||
modeDeep: "深度思考 (复杂推理)",
|
||||
modeFast: "极速模式 (快速响应)",
|
||||
tools: "创作工具",
|
||||
imageGen: "图像生成",
|
||||
videoGen: "视频生成",
|
||||
uploadImage: "上传图片分析",
|
||||
recordAudio: "录音提问",
|
||||
generate: "生成",
|
||||
download: "下载",
|
||||
apiKeyLabel: "Google Gemini API Key",
|
||||
apiKeyDesc: "您的密钥将仅存储在本地浏览器中。",
|
||||
backupRestore: "数据备份与恢复",
|
||||
exportData: "导出数据",
|
||||
importData: "导入数据",
|
||||
clearData: "清除所有数据",
|
||||
imageSize: "图片尺寸",
|
||||
aspectRatio: "视频比例",
|
||||
landscape: "横屏 16:9",
|
||||
portrait: "竖屏 9:16",
|
||||
generating: "生成中...",
|
||||
thinking: "正在深度思考...",
|
||||
transcribing: "正在转录音频...",
|
||||
speaking: "朗读",
|
||||
searchSources: "参考来源",
|
||||
errorApiKey: "请先在设置中配置 API Key",
|
||||
welcome: "请选择一个学习场景开始:",
|
||||
videoPromptPlaceholder: "描述您想生成的社会学场景视频...",
|
||||
imagePromptPlaceholder: "描述您想生成的图片...",
|
||||
selectImageSize: "选择尺寸",
|
||||
videoDuration: "生成视频可能需要几分钟,请耐心等待。",
|
||||
scenarios: {
|
||||
general: { title: "日常答疑", desc: "解答各类社会学基础问题", greeting: "你好!我是你的社会学学习搭子。有什么日常学习中的疑问需要我解答吗?" },
|
||||
reading: { title: "经典导读", desc: "马克思、韦伯、涂尔干等经典著作导读", greeting: "欢迎来到经典导读。今天你想通过哪位大家(如韦伯、涂尔干)的著作来深化理解?" },
|
||||
concept: { title: "概念解析", desc: "深入剖析社会学核心概念", greeting: "概念是社会学的基石。请告诉我你需要深度解析哪个概念?(例如:异化、不仅、科层制)" },
|
||||
research: { title: "研究讨论", desc: "研究设计、方法论与田野调查建议", greeting: "你好,研究员。无论是定性还是定量,我都可以协助你完善研究设计或讨论方法论问题。" }
|
||||
}
|
||||
},
|
||||
[AppLanguage.ZH_TW]: {
|
||||
appName: "社學搭子",
|
||||
tagline: "您的AI社會學助教",
|
||||
newChat: "新建會話",
|
||||
settings: "設置",
|
||||
inputPlaceholder: "輸入您的問題...",
|
||||
modeStandard: "標準搜索 (實時聯網)",
|
||||
modeDeep: "深度思考 (複雜推理)",
|
||||
modeFast: "極速模式 (快速響應)",
|
||||
tools: "創作工具",
|
||||
imageGen: "圖像生成",
|
||||
videoGen: "視頻生成",
|
||||
uploadImage: "上傳圖片分析",
|
||||
recordAudio: "錄音提問",
|
||||
generate: "生成",
|
||||
download: "下載",
|
||||
apiKeyLabel: "Google Gemini API Key",
|
||||
apiKeyDesc: "您的密鑰將僅存儲在本地瀏覽器中。",
|
||||
backupRestore: "數據備份與恢復",
|
||||
exportData: "導出數據",
|
||||
importData: "導入數據",
|
||||
clearData: "清除所有數據",
|
||||
imageSize: "圖片尺寸",
|
||||
aspectRatio: "視頻比例",
|
||||
landscape: "橫屏 16:9",
|
||||
portrait: "豎屏 9:16",
|
||||
generating: "生成中...",
|
||||
thinking: "正在深度思考...",
|
||||
transcribing: "正在轉錄音頻...",
|
||||
speaking: "朗讀",
|
||||
searchSources: "參考來源",
|
||||
errorApiKey: "請先在設置中配置 API Key",
|
||||
welcome: "請選擇一個學習場景開始:",
|
||||
videoPromptPlaceholder: "描述您想生成的社會學場景視頻...",
|
||||
imagePromptPlaceholder: "描述您想生成的圖片...",
|
||||
selectImageSize: "選擇尺寸",
|
||||
videoDuration: "生成視頻可能需要幾分鐘,請耐心等待。",
|
||||
scenarios: {
|
||||
general: { title: "日常答疑", desc: "解答各類社會學基礎問題", greeting: "你好!我是你的社會學學習搭子。有什麼日常學習中的疑問需要我解答嗎?" },
|
||||
reading: { title: "經典導讀", desc: "馬克思、韋伯、塗爾干等經典著作導讀", greeting: "歡迎來到經典導讀。今天你想通過哪位大家(如韋伯、塗爾干)的著作來深化理解?" },
|
||||
concept: { title: "概念解析", desc: "深入剖析社會學核心概念", greeting: "概念是社會學的基石。請告訴我你需要深度解析哪個概念?(例如:異化、不僅、科層制)" },
|
||||
research: { title: "研究討論", desc: "研究設計、方法論與田野調查建議", greeting: "你好,研究員。無論是定性還是定量,我都可以協助你完善研究設計或討論方法論問題。" }
|
||||
}
|
||||
},
|
||||
[AppLanguage.EN]: {
|
||||
appName: "SocioPal",
|
||||
tagline: "Your AI Sociology Tutor",
|
||||
newChat: "New Chat",
|
||||
settings: "Settings",
|
||||
inputPlaceholder: "Ask a question...",
|
||||
modeStandard: "Standard (Search)",
|
||||
modeDeep: "Deep Think (Reasoning)",
|
||||
modeFast: "Fast (Lite)",
|
||||
tools: "Creative Tools",
|
||||
imageGen: "Image Gen",
|
||||
videoGen: "Video Gen",
|
||||
uploadImage: "Analyze Image",
|
||||
recordAudio: "Record Audio",
|
||||
generate: "Generate",
|
||||
download: "Download",
|
||||
apiKeyLabel: "Google Gemini API Key",
|
||||
apiKeyDesc: "Your key is stored locally in your browser.",
|
||||
backupRestore: "Backup & Restore",
|
||||
exportData: "Export Data",
|
||||
importData: "Import Data",
|
||||
clearData: "Clear All Data",
|
||||
imageSize: "Image Size",
|
||||
aspectRatio: "Aspect Ratio",
|
||||
landscape: "Landscape 16:9",
|
||||
portrait: "Portrait 9:16",
|
||||
generating: "Generating...",
|
||||
thinking: "Thinking deeply...",
|
||||
transcribing: "Transcribing audio...",
|
||||
speaking: "Read Aloud",
|
||||
searchSources: "Sources",
|
||||
errorApiKey: "Please configure your API Key in Settings first.",
|
||||
welcome: "Choose a learning scenario to start:",
|
||||
videoPromptPlaceholder: "Describe the sociology scenario video...",
|
||||
imagePromptPlaceholder: "Describe the image to generate...",
|
||||
selectImageSize: "Select Size",
|
||||
videoDuration: "Video generation may take a few minutes.",
|
||||
scenarios: {
|
||||
general: { title: "Daily Q&A", desc: "General sociology questions", greeting: "Hi! I'm your sociology study companion. Do you have any questions for me today?" },
|
||||
reading: { title: "Classic Readings", desc: "Guide to Marx, Weber, Durkheim...", greeting: "Welcome to Classic Readings. Which foundational text or theorist shall we explore today?" },
|
||||
concept: { title: "Concept Analysis", desc: "Deep dive into terms", greeting: "Concepts are the building blocks of sociology. Which term would you like to analyze deeply?" },
|
||||
research: { title: "Research Advisor", desc: "Methodology and design", greeting: "Hello, researcher. I can assist with your research design, methodology, or field work questions." }
|
||||
}
|
||||
},
|
||||
[AppLanguage.JA]: {
|
||||
appName: "SocioPal",
|
||||
tagline: "AI社会学チューター",
|
||||
newChat: "新しいチャット",
|
||||
settings: "設定",
|
||||
inputPlaceholder: "質問を入力...",
|
||||
modeStandard: "標準 (検索)",
|
||||
modeDeep: "深い思考 (推論)",
|
||||
modeFast: "高速 (ライト)",
|
||||
tools: "クリエイティブツール",
|
||||
imageGen: "画像生成",
|
||||
videoGen: "動画生成",
|
||||
uploadImage: "画像分析",
|
||||
recordAudio: "音声入力",
|
||||
generate: "生成",
|
||||
download: "ダウンロード",
|
||||
apiKeyLabel: "Google Gemini API Key",
|
||||
apiKeyDesc: "キーはブラウザにローカルに保存されます。",
|
||||
backupRestore: "バックアップと復元",
|
||||
exportData: "データをエクスポート",
|
||||
importData: "データをインポート",
|
||||
clearData: "すべてのデータを消去",
|
||||
imageSize: "画像サイズ",
|
||||
aspectRatio: "アスペクト比",
|
||||
landscape: "横向き 16:9",
|
||||
portrait: "縦向き 9:16",
|
||||
generating: "生成中...",
|
||||
thinking: "深く考えています...",
|
||||
transcribing: "音声を文字起こし中...",
|
||||
speaking: "読み上げ",
|
||||
searchSources: "情報源",
|
||||
errorApiKey: "設定でAPIキーを設定してください。",
|
||||
welcome: "学習シナリオを選択してください:",
|
||||
videoPromptPlaceholder: "生成したい社会学のシナリオ動画を説明してください...",
|
||||
imagePromptPlaceholder: "生成したい画像を説明してください...",
|
||||
selectImageSize: "サイズを選択",
|
||||
videoDuration: "動画の生成には数分かかる場合があります。",
|
||||
scenarios: {
|
||||
general: { title: "日常のQ&A", desc: "一般的な社会学の質問", greeting: "こんにちは!社会学の学習パートナーです。今日の質問は何ですか?" },
|
||||
reading: { title: "古典講読", desc: "マルクス、ウェーバー、デュルケーム...", greeting: "古典講読へようこそ。今日はどの社会学者の著作を深掘りしましょうか?" },
|
||||
concept: { title: "概念分析", desc: "用語の深い分析", greeting: "概念は社会学の基礎です。どの用語を詳しく分析したいですか?" },
|
||||
research: { title: "研究相談", desc: "方法論とデザイン", greeting: "こんにちは。研究デザインや方法論(質的・量的)についての相談に乗ります。" }
|
||||
}
|
||||
}
|
||||
};
|
||||
36
index.html
Normal file
36
index.html
Normal file
@@ -0,0 +1,36 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8" />
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
||||
<title>SocioPal - Social Learning Tool</title>
|
||||
<script src="https://cdn.tailwindcss.com"></script>
|
||||
<link href="https://fonts.googleapis.com/css2?family=Inter:wght@300;400;500;600;700&display=swap" rel="stylesheet">
|
||||
<style>
|
||||
body { font-family: 'Inter', sans-serif; }
|
||||
/* Custom scrollbar for webkit */
|
||||
::-webkit-scrollbar { width: 6px; height: 6px; }
|
||||
::-webkit-scrollbar-track { background: transparent; }
|
||||
::-webkit-scrollbar-thumb { background: #cbd5e1; border-radius: 3px; }
|
||||
::-webkit-scrollbar-thumb:hover { background: #94a3b8; }
|
||||
</style>
|
||||
<script type="importmap">
|
||||
{
|
||||
"imports": {
|
||||
"react/": "https://esm.sh/react@^19.2.3/",
|
||||
"react": "https://esm.sh/react@^19.2.3",
|
||||
"react-dom/": "https://esm.sh/react-dom@^19.2.3/",
|
||||
"@google/genai": "https://esm.sh/@google/genai@^1.34.0",
|
||||
"lucide-react": "https://esm.sh/lucide-react@^0.562.0",
|
||||
"react-markdown": "https://esm.sh/react-markdown@^10.1.0",
|
||||
"vite": "https://esm.sh/vite@^7.3.0",
|
||||
"@vitejs/plugin-react": "https://esm.sh/@vitejs/plugin-react@^5.1.2"
|
||||
}
|
||||
}
|
||||
</script>
|
||||
</head>
|
||||
<body class="bg-slate-50 text-slate-900">
|
||||
<div id="root"></div>
|
||||
<script type="module" src="/index.tsx"></script>
|
||||
</body>
|
||||
</html>
|
||||
15
index.tsx
Normal file
15
index.tsx
Normal file
@@ -0,0 +1,15 @@
|
||||
import React from 'react';
|
||||
import ReactDOM from 'react-dom/client';
|
||||
import App from './App';
|
||||
|
||||
const rootElement = document.getElementById('root');
|
||||
if (!rootElement) {
|
||||
throw new Error("Could not find root element to mount to");
|
||||
}
|
||||
|
||||
const root = ReactDOM.createRoot(rootElement);
|
||||
root.render(
|
||||
<React.StrictMode>
|
||||
<App />
|
||||
</React.StrictMode>
|
||||
);
|
||||
7
metadata.json
Normal file
7
metadata.json
Normal file
@@ -0,0 +1,7 @@
|
||||
{
|
||||
"name": "SocioPal - Social Learning Tool",
|
||||
"description": "A comprehensive AI-powered tool for sociology learning, featuring Q&A, image generation, video simulation, and audio transcription.",
|
||||
"requestFramePermissions": [
|
||||
"microphone"
|
||||
]
|
||||
}
|
||||
25
package.json
Normal file
25
package.json
Normal file
@@ -0,0 +1,25 @@
|
||||
{
|
||||
"name": "sociopal",
|
||||
"private": true,
|
||||
"version": "1.0.0",
|
||||
"type": "module",
|
||||
"scripts": {
|
||||
"dev": "vite",
|
||||
"build": "tsc && vite build",
|
||||
"preview": "vite preview"
|
||||
},
|
||||
"dependencies": {
|
||||
"@google/genai": "^0.1.0",
|
||||
"lucide-react": "^0.300.0",
|
||||
"react": "^18.3.1",
|
||||
"react-dom": "^18.3.1",
|
||||
"react-markdown": "^9.0.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/react": "^18.3.1",
|
||||
"@types/react-dom": "^18.3.0",
|
||||
"@vitejs/plugin-react": "^4.2.0",
|
||||
"typescript": "^5.3.0",
|
||||
"vite": "^5.0.0"
|
||||
}
|
||||
}
|
||||
262
services/geminiService.ts
Normal file
262
services/geminiService.ts
Normal file
@@ -0,0 +1,262 @@
|
||||
import { GoogleGenAI, Modality, Type } from "@google/genai";
|
||||
import { ChatMode, Message, ChatScenario } from "../types";
|
||||
|
||||
// Helper to get client
|
||||
const getClient = () => {
|
||||
const apiKey = process.env.API_KEY;
|
||||
if (!apiKey) throw new Error("API Key is missing. Please ensure process.env.API_KEY is set.");
|
||||
return new GoogleGenAI({ apiKey });
|
||||
};
|
||||
|
||||
// --- Models ---
|
||||
const MODEL_CHAT_STANDARD = "gemini-3-flash-preview";
|
||||
const MODEL_CHAT_DEEP = "gemini-3-pro-preview";
|
||||
const MODEL_CHAT_FAST = "gemini-flash-lite-latest";
|
||||
const MODEL_IMAGE_GEN = "gemini-3-pro-image-preview";
|
||||
const MODEL_VIDEO_GEN = "veo-3.1-fast-generate-preview";
|
||||
const MODEL_TTS = "gemini-2.5-flash-preview-tts";
|
||||
|
||||
// --- Chat ---
|
||||
|
||||
export const streamChatResponse = async (
|
||||
history: Message[],
|
||||
currentMessage: string,
|
||||
mode: ChatMode,
|
||||
language: string,
|
||||
scenario: ChatScenario = ChatScenario.GENERAL,
|
||||
attachments: { mimeType: string; data: string }[] = [],
|
||||
onChunk: (text: string, grounding?: any) => void
|
||||
) => {
|
||||
const ai = getClient();
|
||||
let model = MODEL_CHAT_STANDARD;
|
||||
|
||||
// Construct System Instruction based on Scenario
|
||||
let baseInstruction = "";
|
||||
switch (scenario) {
|
||||
case ChatScenario.READING:
|
||||
baseInstruction = `You are a distinguished Sociology Professor specializing in Classical Sociological Theory.
|
||||
Focus on the works of Marx, Weber, Durkheim, Simmel, and other foundational figures.
|
||||
When answering:
|
||||
1. Contextualize the text historically.
|
||||
2. Explain key arguments precisely.
|
||||
3. Discuss the critical reception and legacy.
|
||||
4. Use academic yet accessible language.`;
|
||||
break;
|
||||
case ChatScenario.CONCEPT:
|
||||
baseInstruction = `You are an expert Sociological Concept Analyst.
|
||||
Your goal is to provide deep, multi-dimensional definitions of sociological terms.
|
||||
When defining a concept:
|
||||
1. Provide a clear, concise definition.
|
||||
2. Explain its etymology or theoretical origin.
|
||||
3. Contrast it with related or opposing concepts.
|
||||
4. Provide concrete examples of the concept in action.`;
|
||||
break;
|
||||
case ChatScenario.RESEARCH:
|
||||
baseInstruction = `You are a Senior Research Methodology Consultant.
|
||||
You help students and researchers design their studies.
|
||||
Focus on:
|
||||
1. Refining research questions.
|
||||
2. Suggesting appropriate methods (Qualitative, Quantitative, Mixed).
|
||||
3. Discussing sampling, operationalization, and ethics.
|
||||
4. Suggesting theoretical frameworks suitable for the topic.`;
|
||||
break;
|
||||
case ChatScenario.GENERAL:
|
||||
default:
|
||||
baseInstruction = `You are a helpful and knowledgeable Sociology Learning Assistant.
|
||||
Answer questions clearly using sociological perspectives.
|
||||
Encourage critical thinking and connect daily life examples to sociological theories.`;
|
||||
break;
|
||||
}
|
||||
|
||||
let config: any = {
|
||||
systemInstruction: `${baseInstruction} Always reply in the user's preferred language: ${language}.`,
|
||||
};
|
||||
|
||||
// Configure based on mode
|
||||
if (mode === ChatMode.STANDARD) {
|
||||
model = MODEL_CHAT_STANDARD;
|
||||
config.tools = [{ googleSearch: {} }];
|
||||
} else if (mode === ChatMode.DEEP) {
|
||||
model = MODEL_CHAT_DEEP;
|
||||
config.thinkingConfig = { thinkingBudget: 32768 }; // Max for pro
|
||||
} else if (mode === ChatMode.FAST) {
|
||||
model = MODEL_CHAT_FAST;
|
||||
}
|
||||
|
||||
const chat = ai.chats.create({
|
||||
model,
|
||||
config,
|
||||
history: history.slice(0, -1).map(m => ({
|
||||
role: m.role,
|
||||
parts: [
|
||||
{ text: m.content },
|
||||
...(m.attachments || []).map(a => ({
|
||||
inlineData: { mimeType: a.mimeType, data: a.data }
|
||||
}))
|
||||
]
|
||||
}))
|
||||
});
|
||||
|
||||
const parts: any[] = [{ text: currentMessage }];
|
||||
attachments.forEach(att => {
|
||||
parts.push({ inlineData: { mimeType: att.mimeType, data: att.data } });
|
||||
});
|
||||
|
||||
try {
|
||||
const result = await chat.sendMessageStream({
|
||||
message: { parts }
|
||||
});
|
||||
|
||||
for await (const chunk of result) {
|
||||
const text = chunk.text;
|
||||
const grounding = chunk.candidates?.[0]?.groundingMetadata;
|
||||
if (text || grounding) {
|
||||
onChunk(text || '', grounding);
|
||||
}
|
||||
}
|
||||
} catch (e) {
|
||||
console.error("Chat error", e);
|
||||
throw e;
|
||||
}
|
||||
};
|
||||
|
||||
// --- Image Generation ---
|
||||
export const generateImage = async (
|
||||
prompt: string,
|
||||
size: "1K" | "2K" | "4K"
|
||||
): Promise<string[]> => {
|
||||
const ai = getClient();
|
||||
|
||||
// Using gemini-3-pro-image-preview
|
||||
const response = await ai.models.generateContent({
|
||||
model: MODEL_IMAGE_GEN,
|
||||
contents: { parts: [{ text: prompt }] },
|
||||
config: {
|
||||
imageConfig: {
|
||||
imageSize: size,
|
||||
count: 1, // Only 1 allowed usually for this model in preview
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
const images: string[] = [];
|
||||
if (response.candidates?.[0]?.content?.parts) {
|
||||
for (const part of response.candidates[0].content.parts) {
|
||||
if (part.inlineData && part.inlineData.data) {
|
||||
images.push(`data:${part.inlineData.mimeType};base64,${part.inlineData.data}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
return images;
|
||||
};
|
||||
|
||||
// --- Video Generation ---
|
||||
export const generateVideo = async (
|
||||
prompt: string,
|
||||
aspectRatio: "16:9" | "9:16"
|
||||
): Promise<string> => {
|
||||
const ai = getClient();
|
||||
|
||||
let operation = await ai.models.generateVideos({
|
||||
model: MODEL_VIDEO_GEN,
|
||||
prompt: prompt,
|
||||
config: {
|
||||
numberOfVideos: 1,
|
||||
aspectRatio: aspectRatio,
|
||||
resolution: '720p', // fast-generate-preview often defaults to this
|
||||
}
|
||||
});
|
||||
|
||||
// Poll for completion
|
||||
while (!operation.done) {
|
||||
await new Promise(resolve => setTimeout(resolve, 5000));
|
||||
operation = await ai.operations.getVideosOperation({ operation: operation });
|
||||
}
|
||||
|
||||
const uri = operation.response?.generatedVideos?.[0]?.video?.uri;
|
||||
if (!uri) throw new Error("No video URI returned");
|
||||
|
||||
// Fetch the actual bytes using the key
|
||||
const fetchResponse = await fetch(`${uri}&key=${process.env.API_KEY}`);
|
||||
const blob = await fetchResponse.blob();
|
||||
return URL.createObjectURL(blob);
|
||||
};
|
||||
|
||||
// --- Transcription ---
|
||||
export const transcribeAudio = async (
|
||||
audioBase64: string,
|
||||
mimeType: string
|
||||
): Promise<string> => {
|
||||
const ai = getClient();
|
||||
const response = await ai.models.generateContent({
|
||||
model: MODEL_CHAT_STANDARD, // 3-flash is good for audio
|
||||
contents: {
|
||||
parts: [
|
||||
{ inlineData: { mimeType, data: audioBase64 } },
|
||||
{ text: "Please transcribe this audio exactly as spoken." }
|
||||
]
|
||||
}
|
||||
});
|
||||
return response.text || "";
|
||||
};
|
||||
|
||||
// --- TTS ---
|
||||
export const generateSpeech = async (
|
||||
text: string
|
||||
): Promise<AudioBuffer> => {
|
||||
const ai = getClient();
|
||||
const response = await ai.models.generateContent({
|
||||
model: MODEL_TTS,
|
||||
contents: { parts: [{ text }] },
|
||||
config: {
|
||||
responseModalities: [Modality.AUDIO],
|
||||
speechConfig: {
|
||||
voiceConfig: {
|
||||
prebuiltVoiceConfig: { voiceName: 'Kore' },
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
const base64Audio = response.candidates?.[0]?.content?.parts?.[0]?.inlineData?.data;
|
||||
if (!base64Audio) throw new Error("No audio generated");
|
||||
|
||||
const audioContext = new (window.AudioContext || (window as any).webkitAudioContext)();
|
||||
const audioBuffer = await decodeAudioData(
|
||||
decode(base64Audio),
|
||||
audioContext,
|
||||
24000,
|
||||
1
|
||||
);
|
||||
return audioBuffer;
|
||||
};
|
||||
|
||||
// Helper utils for audio
|
||||
function decode(base64: string) {
|
||||
const binaryString = atob(base64);
|
||||
const len = binaryString.length;
|
||||
const bytes = new Uint8Array(len);
|
||||
for (let i = 0; i < len; i++) {
|
||||
bytes[i] = binaryString.charCodeAt(i);
|
||||
}
|
||||
return bytes;
|
||||
}
|
||||
|
||||
async function decodeAudioData(
|
||||
data: Uint8Array,
|
||||
ctx: AudioContext,
|
||||
sampleRate: number,
|
||||
numChannels: number,
|
||||
): Promise<AudioBuffer> {
|
||||
const dataInt16 = new Int16Array(data.buffer);
|
||||
const frameCount = dataInt16.length / numChannels;
|
||||
const buffer = ctx.createBuffer(numChannels, frameCount, sampleRate);
|
||||
|
||||
for (let channel = 0; channel < numChannels; channel++) {
|
||||
const channelData = buffer.getChannelData(channel);
|
||||
for (let i = 0; i < frameCount; i++) {
|
||||
channelData[i] = dataInt16[i * numChannels + channel] / 32768.0;
|
||||
}
|
||||
}
|
||||
return buffer;
|
||||
}
|
||||
68
services/storage.ts
Normal file
68
services/storage.ts
Normal file
@@ -0,0 +1,68 @@
|
||||
import { UserSettings, ChatSession, AppLanguage } from "../types";
|
||||
import { DEFAULT_LANGUAGE } from "../constants";
|
||||
|
||||
const KEYS = {
|
||||
SETTINGS: "sociopal_settings",
|
||||
SESSIONS: "sociopal_sessions",
|
||||
CURRENT_SESSION: "sociopal_current_session_id"
|
||||
};
|
||||
|
||||
export const loadSettings = (): UserSettings => {
|
||||
const stored = localStorage.getItem(KEYS.SETTINGS);
|
||||
if (stored) return JSON.parse(stored);
|
||||
return {
|
||||
language: DEFAULT_LANGUAGE,
|
||||
theme: 'light'
|
||||
};
|
||||
};
|
||||
|
||||
export const saveSettings = (settings: UserSettings) => {
|
||||
localStorage.setItem(KEYS.SETTINGS, JSON.stringify(settings));
|
||||
};
|
||||
|
||||
export const loadSessions = (): ChatSession[] => {
|
||||
const stored = localStorage.getItem(KEYS.SESSIONS);
|
||||
return stored ? JSON.parse(stored) : [];
|
||||
};
|
||||
|
||||
export const saveSessions = (sessions: ChatSession[]) => {
|
||||
localStorage.setItem(KEYS.SESSIONS, JSON.stringify(sessions));
|
||||
};
|
||||
|
||||
export const exportData = () => {
|
||||
const data = {
|
||||
settings: loadSettings(),
|
||||
sessions: loadSessions(),
|
||||
timestamp: Date.now()
|
||||
};
|
||||
const blob = new Blob([JSON.stringify(data, null, 2)], { type: "application/json" });
|
||||
const url = URL.createObjectURL(blob);
|
||||
const a = document.createElement('a');
|
||||
a.href = url;
|
||||
a.download = `sociopal_backup_${new Date().toISOString().slice(0, 10)}.json`;
|
||||
a.click();
|
||||
};
|
||||
|
||||
export const importData = async (file: File): Promise<boolean> => {
|
||||
return new Promise((resolve) => {
|
||||
const reader = new FileReader();
|
||||
reader.onload = (e) => {
|
||||
try {
|
||||
const data = JSON.parse(e.target?.result as string);
|
||||
if (data.settings) saveSettings(data.settings);
|
||||
if (data.sessions) saveSessions(data.sessions);
|
||||
resolve(true);
|
||||
} catch (err) {
|
||||
console.error("Import failed", err);
|
||||
resolve(false);
|
||||
}
|
||||
};
|
||||
reader.readAsText(file);
|
||||
});
|
||||
};
|
||||
|
||||
export const clearData = () => {
|
||||
localStorage.removeItem(KEYS.SETTINGS);
|
||||
localStorage.removeItem(KEYS.SESSIONS);
|
||||
localStorage.removeItem(KEYS.CURRENT_SESSION);
|
||||
};
|
||||
21
tsconfig.json
Normal file
21
tsconfig.json
Normal file
@@ -0,0 +1,21 @@
|
||||
{
|
||||
"compilerOptions": {
|
||||
"target": "ES2020",
|
||||
"useDefineForClassFields": true,
|
||||
"lib": ["ES2020", "DOM", "DOM.Iterable"],
|
||||
"module": "ESNext",
|
||||
"skipLibCheck": true,
|
||||
"moduleResolution": "bundler",
|
||||
"allowImportingTsExtensions": true,
|
||||
"resolveJsonModule": true,
|
||||
"isolatedModules": true,
|
||||
"noEmit": true,
|
||||
"jsx": "react-jsx",
|
||||
"strict": true,
|
||||
"noUnusedLocals": false,
|
||||
"noUnusedParameters": false,
|
||||
"noFallthroughCasesInSwitch": true
|
||||
},
|
||||
"include": ["."],
|
||||
"references": [{ "path": "./tsconfig.node.json" }]
|
||||
}
|
||||
10
tsconfig.node.json
Normal file
10
tsconfig.node.json
Normal file
@@ -0,0 +1,10 @@
|
||||
{
|
||||
"compilerOptions": {
|
||||
"composite": true,
|
||||
"skipLibCheck": true,
|
||||
"module": "ESNext",
|
||||
"moduleResolution": "bundler",
|
||||
"allowSyntheticDefaultImports": true
|
||||
},
|
||||
"include": ["vite.config.ts"]
|
||||
}
|
||||
57
types.ts
Normal file
57
types.ts
Normal file
@@ -0,0 +1,57 @@
|
||||
export enum AppLanguage {
|
||||
EN = 'en',
|
||||
ZH_CN = 'zh-CN',
|
||||
ZH_TW = 'zh-TW',
|
||||
JA = 'ja'
|
||||
}
|
||||
|
||||
export enum ChatMode {
|
||||
STANDARD = 'standard', // Flash + Search
|
||||
DEEP = 'deep', // Pro + Thinking
|
||||
FAST = 'fast' // Flash Lite
|
||||
}
|
||||
|
||||
export enum ChatScenario {
|
||||
GENERAL = 'general',
|
||||
READING = 'reading',
|
||||
CONCEPT = 'concept',
|
||||
RESEARCH = 'research'
|
||||
}
|
||||
|
||||
export interface Message {
|
||||
id: string;
|
||||
role: 'user' | 'model';
|
||||
content: string;
|
||||
timestamp: number;
|
||||
attachments?: Attachment[];
|
||||
isThinking?: boolean; // If true, show thinking UI
|
||||
groundingMetadata?: GroundingMetadata;
|
||||
}
|
||||
|
||||
export interface GroundingMetadata {
|
||||
searchEntryPoint?: { renderedContent: string };
|
||||
groundingChunks?: Array<{
|
||||
web?: { uri: string; title: string };
|
||||
}>;
|
||||
}
|
||||
|
||||
export interface Attachment {
|
||||
type: 'image' | 'audio' | 'video';
|
||||
mimeType: string;
|
||||
data: string; // Base64
|
||||
name?: string;
|
||||
}
|
||||
|
||||
export interface UserSettings {
|
||||
language: AppLanguage;
|
||||
theme: 'light' | 'dark';
|
||||
}
|
||||
|
||||
export interface ChatSession {
|
||||
id: string;
|
||||
title: string;
|
||||
messages: Message[];
|
||||
mode: ChatMode;
|
||||
scenario?: ChatScenario;
|
||||
createdAt: number;
|
||||
}
|
||||
14
vite.config.ts
Normal file
14
vite.config.ts
Normal file
@@ -0,0 +1,14 @@
|
||||
import { defineConfig, loadEnv } from 'vite'
|
||||
import react from '@vitejs/plugin-react'
|
||||
|
||||
// https://vitejs.dev/config/
|
||||
export default defineConfig(({ mode }) => {
|
||||
const env = loadEnv(mode, (process as any).cwd(), '');
|
||||
return {
|
||||
plugins: [react()],
|
||||
define: {
|
||||
// Define process.env.API_KEY during build time to support the existing code structure
|
||||
'process.env.API_KEY': JSON.stringify(env.API_KEY)
|
||||
}
|
||||
}
|
||||
})
|
||||
Reference in New Issue
Block a user