diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000..8bedad9 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,10 @@ +node_modules +dist +.git +.gitignore +.env +.DS_Store +*.log +coverage +.vscode +README.md diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..a547bf3 --- /dev/null +++ b/.gitignore @@ -0,0 +1,24 @@ +# Logs +logs +*.log +npm-debug.log* +yarn-debug.log* +yarn-error.log* +pnpm-debug.log* +lerna-debug.log* + +node_modules +dist +dist-ssr +*.local + +# Editor directories and files +.vscode/* +!.vscode/extensions.json +.idea +.DS_Store +*.suo +*.ntvs* +*.njsproj +*.sln +*.sw? diff --git a/App.tsx b/App.tsx new file mode 100644 index 0000000..369059c --- /dev/null +++ b/App.tsx @@ -0,0 +1,685 @@ +import React, { useState, useEffect, useRef } from 'react'; +import { + Settings as SettingsIcon, + MessageSquare, + Sparkles, + Menu, + X, + Mic, + ImagePlus, + Send, + Loader2, + Volume2, + Trash2, + Plus, + BookOpen, + Brain, + GraduationCap, + Coffee +} from 'lucide-react'; +import ReactMarkdown from 'react-markdown'; +import { TRANSLATIONS, DEFAULT_LANGUAGE } from './constants'; +import { AppLanguage, ChatMode, Message, UserSettings, ChatSession, ChatScenario } from './types'; +import { loadSettings, saveSettings, loadSessions, saveSessions, exportData, importData, clearData } from './services/storage'; +import { streamChatResponse, transcribeAudio, generateSpeech } from './services/geminiService'; +import Tools from './components/Tools'; + +const App: React.FC = () => { + // State + const [settings, setSettingsState] = useState(loadSettings()); + const [sessions, setSessions] = useState([]); + const [currentSessionId, setCurrentSessionId] = useState(null); + const [input, setInput] = useState(''); + const [isSidebarOpen, setIsSidebarOpen] = useState(false); + const [activeView, setActiveView] = useState<'chat' | 'tools' | 'settings'>('chat'); + const [isProcessing, setIsProcessing] = useState(false); + const [streamingContent, setStreamingContent] = useState(''); // For real-time effect + const [attachments, setAttachments] = useState<{mimeType: string, data: string, name?: string}[]>([]); + const [isRecording, setIsRecording] = useState(false); + + // Refs + const messagesEndRef = useRef(null); + const fileInputRef = useRef(null); + const mediaRecorderRef = useRef(null); + + const t = TRANSLATIONS[settings.language] || TRANSLATIONS[DEFAULT_LANGUAGE]; + + // Effects + useEffect(() => { + const loadedSessions = loadSessions(); + setSessions(loadedSessions); + // Don't automatically select session on load, let user choose or start new + if (loadedSessions.length > 0 && !currentSessionId) { + setCurrentSessionId(loadedSessions[0].id); + } + }, []); + + useEffect(() => { + saveSettings(settings); + }, [settings]); + + useEffect(() => { + saveSessions(sessions); + }, [sessions]); + + useEffect(() => { + messagesEndRef.current?.scrollIntoView({ behavior: 'smooth' }); + }, [sessions, streamingContent, currentSessionId]); + + // Helpers + const getCurrentSession = () => sessions.find(s => s.id === currentSessionId); + + const handleNewChatClick = () => { + setCurrentSessionId(null); + setActiveView('chat'); + setIsSidebarOpen(false); + }; + + const startScenarioSession = (scenario: ChatScenario) => { + const scenarioConfig = t.scenarios[scenario]; + const initialGreeting = scenarioConfig.greeting; + + const newSession: ChatSession = { + id: Date.now().toString(), + title: scenarioConfig.title, + messages: [ + { + id: Date.now().toString(), + role: 'model', + content: initialGreeting, + timestamp: Date.now() + } + ], + mode: ChatMode.STANDARD, // Default mode, can be changed + scenario: scenario, + createdAt: Date.now() + }; + + setSessions([newSession, ...sessions]); + setCurrentSessionId(newSession.id); + }; + + const updateCurrentSession = (updater: (session: ChatSession) => ChatSession) => { + setSessions(prev => prev.map(s => s.id === currentSessionId ? updater(s) : s)); + }; + + const getScenarioIcon = (scenario?: ChatScenario) => { + switch (scenario) { + case ChatScenario.READING: return ; + case ChatScenario.CONCEPT: return ; + case ChatScenario.RESEARCH: return ; + case ChatScenario.GENERAL: + default: return ; + } + }; + + // Handlers + const handleSendMessage = async () => { + if ((!input.trim() && attachments.length === 0) || isProcessing || !currentSessionId) return; + + const session = getCurrentSession(); + if (!session) return; + + const userMsg: Message = { + id: Date.now().toString(), + role: 'user', + content: input, + timestamp: Date.now(), + attachments: attachments.map(a => ({ type: 'image', ...a })) // Simplify type for now + }; + + updateCurrentSession(s => ({ ...s, messages: [...s.messages, userMsg] })); + setInput(''); + setAttachments([]); + setIsProcessing(true); + setStreamingContent(''); + + try { + let fullResponse = ''; + let groundingData: any = null; + + await streamChatResponse( + [...session.messages, userMsg], + userMsg.content, + session.mode, + settings.language, + session.scenario || ChatScenario.GENERAL, + userMsg.attachments as any, + (text, grounding) => { + fullResponse += text; + setStreamingContent(fullResponse); + if (grounding) groundingData = grounding; + } + ); + + const modelMsg: Message = { + id: (Date.now() + 1).toString(), + role: 'model', + content: fullResponse, + timestamp: Date.now(), + groundingMetadata: groundingData + }; + + updateCurrentSession(s => ({ ...s, messages: [...s.messages, modelMsg] })); + + // Auto-update title if it's the first USER interaction (second message total due to greeting) + if (session.messages.length === 1) { // 1 existing message (the greeting) + const newTitle = userMsg.content.slice(0, 30) || t.newChat; + updateCurrentSession(s => ({ ...s, title: newTitle })); + } + + } catch (err) { + console.error(err); + const errorMsg: Message = { + id: Date.now().toString(), + role: 'model', + content: "Error: Could not generate response. Please check if API key is configured in environment.", + timestamp: Date.now() + }; + updateCurrentSession(s => ({ ...s, messages: [...s.messages, errorMsg] })); + } finally { + setIsProcessing(false); + setStreamingContent(''); + } + }; + + const handleFileUpload = (e: React.ChangeEvent) => { + const file = e.target.files?.[0]; + if (!file) return; + + const reader = new FileReader(); + reader.onloadend = () => { + const base64String = reader.result as string; + const base64Data = base64String.split(',')[1]; + setAttachments(prev => [...prev, { + mimeType: file.type, + data: base64Data, + name: file.name + }]); + }; + reader.readAsDataURL(file); + e.target.value = ''; // Reset input + }; + + const handleRecordAudio = async () => { + if (isRecording) { + mediaRecorderRef.current?.stop(); + setIsRecording(false); + return; + } + + try { + const stream = await navigator.mediaDevices.getUserMedia({ audio: true }); + const mediaRecorder = new MediaRecorder(stream); + mediaRecorderRef.current = mediaRecorder; + const chunks: BlobPart[] = []; + + mediaRecorder.ondataavailable = (e) => chunks.push(e.data); + mediaRecorder.onstop = async () => { + const blob = new Blob(chunks, { type: 'audio/webm' }); // Chrome default + // Convert blob to base64 + const reader = new FileReader(); + reader.onloadend = async () => { + const base64 = (reader.result as string).split(',')[1]; + + setIsProcessing(true); // Re-use processing state for spinner on button + try { + const text = await transcribeAudio(base64, 'audio/webm'); + setInput(prev => prev + " " + text); + } catch (e) { + console.error(e); + alert("Transcription failed"); + } finally { + setIsProcessing(false); + } + }; + reader.readAsDataURL(blob); + stream.getTracks().forEach(track => track.stop()); + }; + + mediaRecorder.start(); + setIsRecording(true); + } catch (e) { + console.error("Mic error", e); + alert("Microphone access denied or not available."); + } + }; + + const playTTS = async (text: string) => { + try { + const buffer = await generateSpeech(text); + const ctx = new (window.AudioContext || (window as any).webkitAudioContext)(); + const source = ctx.createBufferSource(); + source.buffer = buffer; + source.connect(ctx.destination); + source.start(0); + } catch (e) { + console.error("TTS Error", e); + } + }; + + const handleImport = async (e: React.ChangeEvent) => { + const file = e.target.files?.[0]; + if (file) { + const success = await importData(file); + if (success) { + setSettingsState(loadSettings()); + setSessions(loadSessions()); + alert("Import successful!"); + } else { + alert("Import failed."); + } + } + }; + + const currentSession = getCurrentSession(); + + return ( +
+ {/* Sidebar - Mobile Overlay */} + {isSidebarOpen && ( +
setIsSidebarOpen(false)} /> + )} + + {/* Sidebar */} + + + {/* Main Content */} +
+ {/* Header */} +
+
+ + {activeView === 'chat' && currentSession && ( +
+ {getScenarioIcon(currentSession.scenario)} + {t.scenarios[currentSession.scenario || ChatScenario.GENERAL].title} +
+ )} +
+ + {/* Mode Switcher (Only visible in chat) */} + {activeView === 'chat' && currentSession && ( +
+ + + +
+ )} +
+ + {/* View Content */} +
+ + {/* Chat View */} + {activeView === 'chat' && ( +
+ {!currentSession ? ( +
+
+
+
+ +
+

{t.welcome}

+

{t.tagline}

+
+ +
+ {/* Daily Q&A */} + + + {/* Classic Reading */} + + + {/* Concept */} + + + {/* Research */} + +
+
+
+ ) : ( + <> +
+ {currentSession.messages.map((msg, idx) => ( +
+ {msg.role === 'model' && ( +
+
+ {getScenarioIcon(currentSession.scenario)} +
+
+ )} +
+ {/* Attachments */} + {msg.attachments?.map((att, i) => ( +
+ attachment +
+ ))} + + {/* Bubble */} +
+ + {msg.content} + +
+ + {/* Metadata / Actions */} +
+ {new Date(msg.timestamp).toLocaleTimeString()} + {msg.role === 'model' && ( + <> + + {msg.groundingMetadata?.groundingChunks && msg.groundingMetadata.groundingChunks.length > 0 && ( + + + {t.searchSources} + + )} + + )} +
+ + {/* Sources List */} + {msg.role === 'model' && msg.groundingMetadata?.groundingChunks && ( +
+

{t.searchSources}:

+ +
+ )} +
+
+ ))} + + {/* Streaming Pending State */} + {isProcessing && streamingContent && ( +
+
+
+ {getScenarioIcon(currentSession.scenario)} +
+
+
+ + {streamingContent} + +
+ + {currentSession?.mode === ChatMode.DEEP ? t.thinking : t.generating} +
+
+
+ )} +
+
+ + {/* Input Area */} +
+
+ {attachments.length > 0 && ( +
+ {attachments.map((a, i) => ( +
+ preview + +
+ ))} +
+ )} +
+ + + +