commit 28787833497350f49df8921916644a2842354a9f Author: huty Date: Fri Nov 21 00:24:10 2025 +0800 初始化项目 diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..a547bf3 --- /dev/null +++ b/.gitignore @@ -0,0 +1,24 @@ +# Logs +logs +*.log +npm-debug.log* +yarn-debug.log* +yarn-error.log* +pnpm-debug.log* +lerna-debug.log* + +node_modules +dist +dist-ssr +*.local + +# Editor directories and files +.vscode/* +!.vscode/extensions.json +.idea +.DS_Store +*.suo +*.ntvs* +*.njsproj +*.sln +*.sw? diff --git a/App.tsx b/App.tsx new file mode 100644 index 0000000..fc5ca31 --- /dev/null +++ b/App.tsx @@ -0,0 +1,494 @@ + +import React, { useState, useRef, useEffect } from 'react'; +import ChatView from './views/ChatView'; +import CreativeStudio from './views/CreativeStudio'; +import SpeakingPracticeView from './views/SpeakingPracticeView'; +import ReadingView from './views/ReadingView'; +import TranslationView from './views/TranslationView'; +import OCRView from './views/OCRView'; +import ListeningView from './views/ListeningView'; +import ToastContainer, { ToastMessage } from './components/Toast'; +import ConfirmModal from './components/ConfirmModal'; +import Onboarding from './components/Onboarding'; +import { MessageCircle, Palette, Mic2, Settings, Globe, Sparkles, BookOpen, Languages, Download, Upload, FileText, X, ScanText, Key, Save, Trash2, Menu, BrainCircuit, Link, Headphones } from 'lucide-react'; +import { AppMode, Language, ChatMessage, TranslationRecord, AppDataBackup, Role, MessageType, ReadingLessonRecord, AVAILABLE_CHAT_MODELS, ChatSession, OCRRecord, ListeningLessonRecord } from './types'; +import { translations } from './utils/localization'; +import { USER_API_KEY_STORAGE, USER_BASE_URL_STORAGE } from './services/geminiService'; + +const STORAGE_KEYS = { + CHAT_SESSIONS: 'sakura_chat_sessions', + ACTIVE_SESSION: 'sakura_active_session_id', + TRANSLATION_HISTORY: 'sakura_translation_history', + READING_HISTORY: 'sakura_reading_history', + LISTENING_HISTORY: 'sakura_listening_history', + OCR_HISTORY: 'sakura_ocr_history', + LANGUAGE: 'sakura_language', + SELECTED_MODEL: 'sakura_selected_model', + HAS_SEEN_ONBOARDING: 'sakura_has_seen_onboarding' +}; + +const App: React.FC = () => { + const [currentView, setCurrentView] = useState(AppMode.CHAT); + // Default to 'zh' (Chinese) + const [language, setLanguage] = useState(() => (localStorage.getItem(STORAGE_KEYS.LANGUAGE) as Language) || 'zh'); + const [chatSessions, setChatSessions] = useState(() => { + const stored = localStorage.getItem(STORAGE_KEYS.CHAT_SESSIONS); + if (stored) return JSON.parse(stored); + return []; + }); + const [activeSessionId, setActiveSessionId] = useState(() => localStorage.getItem(STORAGE_KEYS.ACTIVE_SESSION) || ''); + const [translationHistory, setTranslationHistory] = useState(() => { + const s = localStorage.getItem(STORAGE_KEYS.TRANSLATION_HISTORY); return s ? JSON.parse(s) : []; + }); + const [readingHistory, setReadingHistory] = useState(() => { + const s = localStorage.getItem(STORAGE_KEYS.READING_HISTORY); return s ? JSON.parse(s) : []; + }); + const [listeningHistory, setListeningHistory] = useState(() => { + const s = localStorage.getItem(STORAGE_KEYS.LISTENING_HISTORY); return s ? JSON.parse(s) : []; + }); + const [ocrHistory, setOcrHistory] = useState(() => { + const s = localStorage.getItem(STORAGE_KEYS.OCR_HISTORY); return s ? JSON.parse(s) : []; + }); + const [selectedModel, setSelectedModel] = useState(() => localStorage.getItem(STORAGE_KEYS.SELECTED_MODEL) || AVAILABLE_CHAT_MODELS[0].id); + const [hasSeenOnboarding, setHasSeenOnboarding] = useState(() => !!localStorage.getItem(STORAGE_KEYS.HAS_SEEN_ONBOARDING)); + const [isSettingsOpen, setIsSettingsOpen] = useState(false); + const [isSidebarOpen, setIsSidebarOpen] = useState(false); + + const [userApiKey, setUserApiKey] = useState(''); + const [userBaseUrl, setUserBaseUrl] = useState(''); + + const [toasts, setToasts] = useState([]); + + const [confirmState, setConfirmState] = useState<{isOpen: boolean, title: string, message: string, onConfirm: () => void}>({ + isOpen: false, title: '', message: '', onConfirm: () => {} + }); + + const t = translations[language]; + + useEffect(() => { localStorage.setItem(STORAGE_KEYS.CHAT_SESSIONS, JSON.stringify(chatSessions)); }, [chatSessions]); + useEffect(() => { localStorage.setItem(STORAGE_KEYS.ACTIVE_SESSION, activeSessionId); }, [activeSessionId]); + useEffect(() => { localStorage.setItem(STORAGE_KEYS.TRANSLATION_HISTORY, JSON.stringify(translationHistory)); }, [translationHistory]); + useEffect(() => { localStorage.setItem(STORAGE_KEYS.READING_HISTORY, JSON.stringify(readingHistory)); }, [readingHistory]); + useEffect(() => { localStorage.setItem(STORAGE_KEYS.LISTENING_HISTORY, JSON.stringify(listeningHistory)); }, [listeningHistory]); + useEffect(() => { localStorage.setItem(STORAGE_KEYS.OCR_HISTORY, JSON.stringify(ocrHistory)); }, [ocrHistory]); + useEffect(() => { localStorage.setItem(STORAGE_KEYS.LANGUAGE, language); }, [language]); + useEffect(() => { localStorage.setItem(STORAGE_KEYS.SELECTED_MODEL, selectedModel); }, [selectedModel]); + + useEffect(() => { + const activeSession = chatSessions.find(s => s.id === activeSessionId); + if (activeSession && activeSession.messages.length === 1 && activeSession.messages[0].role === Role.MODEL) { + const newWelcome = translations[language].chat.welcome; + const newTitle = translations[language].chat.newChat; + setChatSessions(prev => prev.map(s => s.id === activeSessionId ? { ...s, title: newTitle, messages: [{ ...s.messages[0], content: newWelcome }] } : s)); + } + }, [language]); + + const hasInitialized = useRef(false); + useEffect(() => { + if (!hasInitialized.current) { + if (chatSessions.length === 0) createNewSession(); + else if (!activeSessionId) setActiveSessionId(chatSessions[0].id); + + const storedKey = localStorage.getItem(USER_API_KEY_STORAGE); + const storedUrl = localStorage.getItem(USER_BASE_URL_STORAGE); + const envKey = process.env.API_KEY; + + if (storedKey) setUserApiKey(storedKey); + if (storedUrl) setUserBaseUrl(storedUrl); + + if (!storedKey && (!envKey || envKey.length === 0)) { + setIsSettingsOpen(true); + setTimeout(() => addToast('info', translations[language].settings.apiKeyMissing), 500); + } + hasInitialized.current = true; + } + }, []); + + const createNewSession = () => { + const newId = Date.now().toString(); + const welcomeMsg: ChatMessage = { id: 'welcome', role: Role.MODEL, type: MessageType.TEXT, content: translations[language].chat.welcome, timestamp: Date.now() }; + setChatSessions(prev => [{ id: newId, title: translations[language].chat.newChat, messages: [welcomeMsg], createdAt: Date.now(), updatedAt: Date.now() }, ...prev]); + setActiveSessionId(newId); + }; + + const updateSessionMessages = (sessionId: string, messages: ChatMessage[]) => { + setChatSessions(prev => prev.map(s => { + if (s.id === sessionId) { + let title = s.title; + if (messages.length > 1) { + const firstUserMsg = messages.find(m => m.role === Role.USER); + if (firstUserMsg) title = firstUserMsg.content.slice(0, 30) + (firstUserMsg.content.length > 30 ? '...' : ''); + } + return { ...s, messages, title, updatedAt: Date.now() }; + } + return s; + })); + }; + + const updateReadingLesson = (record: ReadingLessonRecord) => { + setReadingHistory(prev => prev.map(item => item.id === record.id ? record : item)); + }; + + const updateListeningLesson = (record: ListeningLessonRecord) => { + setListeningHistory(prev => prev.map(item => item.id === record.id ? record : item)); + }; + + const deleteSession = (sessionId: string) => { + setConfirmState({ + isOpen: true, + title: translations[language].common.confirm, + message: translations[language].chat.deleteConfirm, + onConfirm: () => { + const remaining = chatSessions.filter(s => s.id !== sessionId); + setChatSessions(remaining); + if (activeSessionId === sessionId) { + if (remaining.length > 0) setActiveSessionId(remaining[0].id); + else createNewSession(); + } + setConfirmState(prev => ({ ...prev, isOpen: false })); + } + }); + }; + + const clearAllChatSessions = () => { + setConfirmState({ + isOpen: true, + title: translations[language].common.confirm, + message: translations[language].common.clearHistoryConfirm, + onConfirm: () => { + setChatSessions([]); + createNewSession(); + setConfirmState(prev => ({ ...prev, isOpen: false })); + } + }); + }; + + const deleteReadingLesson = (id: string) => { + setConfirmState({ + isOpen: true, + title: translations[language].common.confirm, + message: translations[language].common.deleteItemConfirm, + onConfirm: () => { + setReadingHistory(prev => prev.filter(item => item.id !== id)); + setConfirmState(prev => ({ ...prev, isOpen: false })); + } + }); + }; + + const clearReadingHistory = () => { + setConfirmState({ + isOpen: true, + title: translations[language].common.confirm, + message: translations[language].common.clearHistoryConfirm, + onConfirm: () => { + setReadingHistory([]); + setConfirmState(prev => ({ ...prev, isOpen: false })); + } + }); + }; + + const deleteListeningLesson = (id: string) => { + setConfirmState({ + isOpen: true, + title: translations[language].common.confirm, + message: translations[language].common.deleteItemConfirm, + onConfirm: () => { + setListeningHistory(prev => prev.filter(item => item.id !== id)); + setConfirmState(prev => ({ ...prev, isOpen: false })); + } + }); + }; + + const clearListeningHistory = () => { + setConfirmState({ + isOpen: true, + title: translations[language].common.confirm, + message: translations[language].common.clearHistoryConfirm, + onConfirm: () => { + setListeningHistory([]); + setConfirmState(prev => ({ ...prev, isOpen: false })); + } + }); + }; + + const deleteOCRRecord = (id: string) => { + setConfirmState({ + isOpen: true, + title: translations[language].common.confirm, + message: translations[language].common.deleteItemConfirm, + onConfirm: () => { + setOcrHistory(prev => prev.filter(item => item.id !== id)); + setConfirmState(prev => ({ ...prev, isOpen: false })); + } + }); + }; + + const clearOCRHistory = () => { + setConfirmState({ + isOpen: true, + title: translations[language].common.confirm, + message: translations[language].common.clearHistoryConfirm, + onConfirm: () => { + setOcrHistory([]); + setConfirmState(prev => ({ ...prev, isOpen: false })); + } + }); + }; + + const deleteTranslationRecord = (id: string) => { + setConfirmState({ + isOpen: true, + title: translations[language].common.confirm, + message: translations[language].common.deleteItemConfirm, + onConfirm: () => { + setTranslationHistory(prev => prev.filter(item => item.id !== id)); + setConfirmState(prev => ({ ...prev, isOpen: false })); + } + }); + }; + + const clearTranslationHistory = () => { + setConfirmState({ + isOpen: true, + title: translations[language].common.confirm, + message: translations[language].common.clearHistoryConfirm, + onConfirm: () => { + setTranslationHistory([]); + setConfirmState(prev => ({ ...prev, isOpen: false })); + } + }); + }; + + const addToast = (type: 'success' | 'error' | 'info', message: string) => { + const id = Date.now().toString(); + setToasts(prev => [...prev, { id, type, message }]); + }; + const removeToast = (id: string) => setToasts(prev => prev.filter(t => t.id !== id)); + + const handleSaveSettings = () => { + if (userApiKey.trim()) { + localStorage.setItem(USER_API_KEY_STORAGE, userApiKey.trim()); + } + if (userBaseUrl.trim()) { + localStorage.setItem(USER_BASE_URL_STORAGE, userBaseUrl.trim()); + } else { + localStorage.removeItem(USER_BASE_URL_STORAGE); + } + addToast('success', t.settings.keySaved); + }; + + const handleClearSettings = () => { + localStorage.removeItem(USER_API_KEY_STORAGE); + localStorage.removeItem(USER_BASE_URL_STORAGE); + setUserApiKey(''); + setUserBaseUrl(''); + addToast('info', t.settings.keyRemoved); + }; + + const toggleLanguage = () => { + if (language === 'en') setLanguage('ja'); else if (language === 'ja') setLanguage('zh'); else setLanguage('en'); + }; + + const handleViewChange = (mode: AppMode) => { setCurrentView(mode); setIsSidebarOpen(false); }; + const completeOnboarding = () => { localStorage.setItem(STORAGE_KEYS.HAS_SEEN_ONBOARDING, 'true'); setHasSeenOnboarding(true); }; + + const handleBackup = () => { + const backup: AppDataBackup = { version: 1, createdAt: Date.now(), language, chatSessions, translationHistory, readingHistory, listeningHistory, ocrHistory }; + const blob = new Blob([JSON.stringify(backup, null, 2)], { type: 'application/json' }); + const url = URL.createObjectURL(blob); + const a = document.createElement('a'); a.href = url; a.download = `sakura-backup-${new Date().toISOString().slice(0,10)}.json`; a.click(); URL.revokeObjectURL(url); + }; + + const handleRestore = (e: React.ChangeEvent) => { + const file = e.target.files?.[0]; if (!file) return; + const reader = new FileReader(); + reader.onload = (event) => { + try { + const data = JSON.parse(event.target?.result as string) as any; + if (data.chatSessions && Array.isArray(data.chatSessions)) { + setChatSessions(data.chatSessions); + setActiveSessionId(data.chatSessions[0]?.id || ''); + setTranslationHistory(data.translationHistory || []); + setReadingHistory(data.readingHistory || []); + setListeningHistory(data.listeningHistory || []); + setOcrHistory(data.ocrHistory || []); + setLanguage(data.language || 'en'); + addToast('success', t.settings.successRestore); + setIsSettingsOpen(false); + } else { throw new Error(); } + } catch (err) { addToast('error', t.settings.errorRestore); } + }; + reader.readAsText(file); + }; + + const exportChatText = () => { + const session = chatSessions.find(s => s.id === activeSessionId); if (!session) return; + const text = session.messages.map(m => `[${new Date(m.timestamp).toLocaleString()}] ${m.role}: ${m.content}`).join('\n\n'); + const blob = new Blob([text], { type: 'text/plain' }); const url = URL.createObjectURL(blob); const a = document.createElement('a'); a.href = url; a.download = `chat.txt`; a.click(); + }; + + const exportTranslationCSV = () => { + const header = "Timestamp,Source,Target,SrcText,TgtText\n"; + const rows = translationHistory.map(t => `"${new Date(t.timestamp).toISOString()}","${t.sourceLang}","${t.targetLang}","${t.sourceText.replace(/"/g, '""')}","${t.targetText.replace(/"/g, '""')}"`).join('\n'); + const blob = new Blob([header + rows], { type: 'text/csv' }); const url = URL.createObjectURL(blob); const a = document.createElement('a'); a.href = url; a.download = `translations.csv`; a.click(); + }; + + const exportReadingHistory = () => { + const blob = new Blob([JSON.stringify(readingHistory, null, 2)], { type: 'application/json' }); + const url = URL.createObjectURL(blob); const a = document.createElement('a'); a.href = url; a.download = `reading-history.json`; a.click(); URL.revokeObjectURL(url); + }; + + const exportOCRHistory = () => { + const blob = new Blob([JSON.stringify(ocrHistory, null, 2)], { type: 'application/json' }); + const url = URL.createObjectURL(blob); const a = document.createElement('a'); a.href = url; a.download = `ocr-history.json`; a.click(); URL.revokeObjectURL(url); + }; + + const NavButton = ({ mode, icon: Icon, label, colorClass }: any) => { + const isActive = currentView === mode; + return ( + + ); + }; + + return ( +
+ + setConfirmState(prev => ({...prev, isOpen: false}))} /> + + {!hasSeenOnboarding && } + {isSidebarOpen &&
setIsSidebarOpen(false)} />} + + + +
+
+ + Sakura Sensei +
+
+
+ {currentView === AppMode.CHAT && } + {currentView === AppMode.TRANSLATION && setTranslationHistory(prev => [...prev, rec])} clearHistory={clearTranslationHistory} onDeleteHistoryItem={deleteTranslationRecord} />} + {currentView === AppMode.SPEAKING && } + {currentView === AppMode.CREATIVE && } + {currentView === AppMode.READING && setReadingHistory(prev => [...prev, rec])} onClearHistory={clearReadingHistory} onDeleteHistoryItem={deleteReadingLesson} onUpdateHistory={updateReadingLesson} />} + {currentView === AppMode.LISTENING && setListeningHistory(prev => [...prev, rec])} onClearHistory={clearListeningHistory} onDeleteHistoryItem={deleteListeningLesson} onUpdateHistory={updateListeningLesson} />} + {currentView === AppMode.OCR && setOcrHistory(prev => [...prev, rec])} onClearHistory={clearOCRHistory} onDeleteHistoryItem={deleteOCRRecord} addToast={addToast} />} +
+
+ + {isSettingsOpen && ( +
+
+
+

{t.settings.title}

+ +
+
+
+

{t.settings.apiKeyTitle}

+

{t.settings.apiKeyDesc}

+
+ setUserApiKey(e.target.value)} placeholder={t.settings.apiKeyPlaceholder} className="w-full p-3 rounded-lg border border-amber-200 bg-white text-sm outline-none focus:ring-2 focus:ring-amber-400 transition-all" /> +
+ + setUserBaseUrl(e.target.value)} placeholder={t.settings.baseUrlPlaceholder} className="w-full p-3 pl-9 rounded-lg border border-amber-200 bg-white text-sm outline-none focus:ring-2 focus:ring-amber-400 transition-all" /> +
+
+ + +
+
+
+
+

{t.settings.modelTitle}

+ +
+
+

{t.settings.backupTitle}

+
+
+
{t.settings.backupBtn}
+
{t.settings.backupDesc}
+
+
+
{t.settings.restoreBtn}
+
{t.settings.restoreDesc}
+ +
+
+
+
+

{t.settings.exportTitle}

+
+ + + + +
+
+
+
+
+ )} +
+ ); +}; + +export default App; diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..3cd2c89 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,34 @@ +# Stage 1: Build +FROM node:20-alpine as builder + +WORKDIR /app + +# Copy package files +COPY package*.json ./ + +# Install dependencies +RUN npm install + +# Copy source code +COPY . . + +# Accept API Key as build arg (fallback if user doesn't set one) +ARG VITE_API_KEY +ENV VITE_API_KEY=$VITE_API_KEY + +# Build the app +RUN npm run build + +# Stage 2: Serve +FROM nginx:alpine + +# Copy custom nginx config +COPY nginx.conf /etc/nginx/conf.d/default.conf + +# Copy static files +COPY --from=builder /app/dist /usr/share/nginx/html + +# Cloud Run port +EXPOSE 8080 + +CMD ["nginx", "-g", "daemon off;"] diff --git a/README.md b/README.md new file mode 100644 index 0000000..9487d72 --- /dev/null +++ b/README.md @@ -0,0 +1,20 @@ +
+GHBanner +
+ +# Run and deploy your AI Studio app + +This contains everything you need to run your app locally. + +View your app in AI Studio: https://ai.studio/apps/drive/1MdpOjnvh39r0kvYmztzlvr-cTY1iF2tW + +## Run Locally + +**Prerequisites:** Node.js + + +1. Install dependencies: + `npm install` +2. Set the `GEMINI_API_KEY` in [.env.local](.env.local) to your Gemini API key +3. Run the app: + `npm run dev` diff --git a/components/AudioRecorder.tsx b/components/AudioRecorder.tsx new file mode 100644 index 0000000..11fec18 --- /dev/null +++ b/components/AudioRecorder.tsx @@ -0,0 +1,235 @@ +import React, { useState, useRef, useEffect } from 'react'; +import { Mic, Square, Loader2 } from 'lucide-react'; + +interface AudioRecorderProps { + onAudioCaptured: (base64Audio: string) => void; + disabled?: boolean; + titleStart?: string; + titleStop?: string; +} + +const AudioRecorder: React.FC = ({ + onAudioCaptured, + disabled, + titleStart = "Start Voice Input", + titleStop = "Stop Recording" +}) => { + const [isRecording, setIsRecording] = useState(false); + const [isProcessing, setIsProcessing] = useState(false); + const audioContextRef = useRef(null); + const streamRef = useRef(null); + const processorRef = useRef(null); + const inputRef = useRef(null); + const audioDataRef = useRef([]); + + useEffect(() => { + return () => { + cleanup(); + }; + }, []); + + const cleanup = () => { + if (streamRef.current) { + streamRef.current.getTracks().forEach(track => track.stop()); + streamRef.current = null; + } + if (processorRef.current) { + processorRef.current.disconnect(); + processorRef.current = null; + } + if (inputRef.current) { + inputRef.current.disconnect(); + inputRef.current = null; + } + if (audioContextRef.current) { + if (audioContextRef.current.state !== 'closed') { + audioContextRef.current.close(); + } + audioContextRef.current = null; + } + }; + + const startRecording = async () => { + try { + audioDataRef.current = []; + const stream = await navigator.mediaDevices.getUserMedia({ audio: true }); + streamRef.current = stream; + + const audioContext = new (window.AudioContext || (window as any).webkitAudioContext)(); + if (audioContext.state === 'suspended') { + await audioContext.resume(); + } + audioContextRef.current = audioContext; + + const input = audioContext.createMediaStreamSource(stream); + inputRef.current = input; + + // Buffer size 4096, 1 input channel, 1 output channel + const processor = audioContext.createScriptProcessor(4096, 1, 1); + processorRef.current = processor; + + processor.onaudioprocess = (e) => { + const channelData = e.inputBuffer.getChannelData(0); + // Clone the data + audioDataRef.current.push(new Float32Array(channelData)); + }; + + input.connect(processor); + processor.connect(audioContext.destination); + + setIsRecording(true); + + } catch (err) { + console.error("Error accessing microphone:", err); + alert("Could not access microphone. Please check permissions."); + } + }; + + const stopRecording = async () => { + if (!isRecording) return; + setIsRecording(false); + setIsProcessing(true); + + // Stop capturing + if (streamRef.current) { + streamRef.current.getTracks().forEach(track => track.stop()); + } + if (processorRef.current) { + processorRef.current.disconnect(); + } + if (inputRef.current) { + inputRef.current.disconnect(); + } + + // Small delay to allow last process tick + setTimeout(() => { + try { + if (audioDataRef.current.length === 0) { + setIsProcessing(false); + cleanup(); + return; + } + + const sampleRate = audioContextRef.current?.sampleRate || 44100; + const blob = exportWAV(audioDataRef.current, sampleRate); + cleanup(); + + const reader = new FileReader(); + reader.readAsDataURL(blob); + reader.onloadend = () => { + const result = reader.result as string; + // result is "data:audio/wav;base64,..." + const base64String = result.split(',')[1]; + onAudioCaptured(base64String); + setIsProcessing(false); + }; + } catch (e) { + console.error("WAV Encoding Error", e); + setIsProcessing(false); + cleanup(); + } + }, 100); + }; + + return ( + + ); +}; + +// --- WAV ENCODER HELPERS --- + +const exportWAV = (audioData: Float32Array[], sampleRate: number) => { + const mergedBuffers = mergeBuffers(audioData); + const downsampledBuffer = downsampleBuffer(mergedBuffers, sampleRate); + const buffer = encodeWAV(downsampledBuffer); + return new Blob([buffer], { type: 'audio/wav' }); +}; + +const mergeBuffers = (audioData: Float32Array[]) => { + const totalLength = audioData.reduce((acc, val) => acc + val.length, 0); + const result = new Float32Array(totalLength); + let offset = 0; + for (const arr of audioData) { + result.set(arr, offset); + offset += arr.length; + } + return result; +}; + +const downsampleBuffer = (buffer: Float32Array, sampleRate: number) => { + if (sampleRate === 16000) return buffer; + const targetRate = 16000; + const sampleRateRatio = sampleRate / targetRate; + const newLength = Math.ceil(buffer.length / sampleRateRatio); + const result = new Float32Array(newLength); + let offsetResult = 0; + let offsetBuffer = 0; + + while (offsetResult < result.length) { + const nextOffsetBuffer = Math.round((offsetResult + 1) * sampleRateRatio); + let accum = 0, count = 0; + for (let i = offsetBuffer; i < nextOffsetBuffer && i < buffer.length; i++) { + accum += buffer[i]; + count++; + } + + // Fixed NaN issue here: verify count is > 0 + if (count > 0) { + result[offsetResult] = accum / count; + } else { + result[offsetResult] = 0; + } + + offsetResult++; + offsetBuffer = nextOffsetBuffer; + } + return result; +}; + +const encodeWAV = (samples: Float32Array) => { + const buffer = new ArrayBuffer(44 + samples.length * 2); + const view = new DataView(buffer); + + const writeString = (view: DataView, offset: number, string: string) => { + for (let i = 0; i < string.length; i++) { + view.setUint8(offset + i, string.charCodeAt(i)); + } + }; + + writeString(view, 0, 'RIFF'); + view.setUint32(4, 36 + samples.length * 2, true); + writeString(view, 8, 'WAVE'); + writeString(view, 12, 'fmt '); + view.setUint32(16, 16, true); + view.setUint16(20, 1, true); + view.setUint16(22, 1, true); + view.setUint32(24, 16000, true); + view.setUint32(28, 16000 * 2, true); + view.setUint16(32, 2, true); + view.setUint16(34, 16, true); + writeString(view, 36, 'data'); + view.setUint32(40, samples.length * 2, true); + + const floatTo16BitPCM = (output: DataView, offset: number, input: Float32Array) => { + for (let i = 0; i < input.length; i++, offset += 2) { + const s = Math.max(-1, Math.min(1, input[i])); + output.setInt16(offset, s < 0 ? s * 0x8000 : s * 0x7FFF, true); + } + }; + + floatTo16BitPCM(view, 44, samples); + return view; +}; + +export default AudioRecorder; \ No newline at end of file diff --git a/components/ChatBubble.tsx b/components/ChatBubble.tsx new file mode 100644 index 0000000..4f7ae98 --- /dev/null +++ b/components/ChatBubble.tsx @@ -0,0 +1,267 @@ + + +import React, { useState, useRef } from 'react'; +import { Role, MessageType, ChatMessage, Language } from '../types'; +import { User, Bot, BrainCircuit, Volume2, Pause, Sparkles, Download, Copy, Check, Loader2 } from 'lucide-react'; +import { geminiService, decodeAudioData } from '../services/geminiService'; +import { processAndDownloadAudio } from '../utils/audioUtils'; +import { translations } from '../utils/localization'; +import ReactMarkdown from 'react-markdown'; +import remarkGfm from 'remark-gfm'; + +interface ChatBubbleProps { + message: ChatMessage; + language: Language; + onUpdateMessage?: (updatedMessage: ChatMessage) => void; + onError?: (msg: string) => void; +} + +const ChatBubble: React.FC = ({ message, language, onUpdateMessage, onError }) => { + const isUser = message.role === Role.USER; + const [isPlaying, setIsPlaying] = useState(false); + const [isGeneratingAudio, setIsGeneratingAudio] = useState(false); + const [isCopied, setIsCopied] = useState(false); + + const audioContextRef = useRef(null); + const audioSourceRef = useRef(null); + + const t = translations[language].chat; + const tCommon = translations[language].common; + + const stopAudio = () => { + if (audioSourceRef.current) { + audioSourceRef.current.stop(); + audioSourceRef.current = null; + } + setIsPlaying(false); + }; + + const handlePlayAudio = async () => { + if (isPlaying) { + stopAudio(); + return; + } + + let base64Data = message.metadata?.audioUrl; + + // If no audio cached, generate it on demand + if (!base64Data && message.content && message.type === MessageType.TEXT) { + try { + setIsGeneratingAudio(true); + base64Data = await geminiService.generateSpeech(message.content); + + if (!base64Data) throw new Error("Audio generation returned empty"); + + // Cache it if parent provided update handler + if (onUpdateMessage) { + onUpdateMessage({ + ...message, + metadata: { + ...message.metadata, + audioUrl: base64Data + } + }); + } + } catch (e) { + console.error("Audio gen failed", e); + setIsGeneratingAudio(false); + if (onError) onError(translations[language].common.error); + return; + } finally { + setIsGeneratingAudio(false); + } + } + + if (!base64Data) return; + + try { + if (!audioContextRef.current) { + audioContextRef.current = new (window.AudioContext || (window as any).webkitAudioContext)(); + } + const ctx = audioContextRef.current; + if (ctx.state === 'suspended') await ctx.resume(); + + const buffer = await decodeAudioData(base64Data, ctx); + + const source = ctx.createBufferSource(); + source.buffer = buffer; + source.connect(ctx.destination); + source.onended = () => setIsPlaying(false); + source.start(); + audioSourceRef.current = source; + setIsPlaying(true); + } catch (e) { + console.error("Audio playback error", e); + setIsPlaying(false); + if (onError) onError(translations[language].common.error); + } + }; + + const handleDownloadAudio = async () => { + let base64Data = message.metadata?.audioUrl; + if (!base64Data && message.content && message.type === MessageType.TEXT) { + try { + setIsGeneratingAudio(true); + base64Data = await geminiService.generateSpeech(message.content); + if (!base64Data) throw new Error("Audio generation returned empty"); + + if (onUpdateMessage) { + onUpdateMessage({ ...message, metadata: { ...message.metadata, audioUrl: base64Data } }); + } + } catch (e) { + console.error(e); + if (onError) onError(translations[language].common.error); + } finally { + setIsGeneratingAudio(false); + } + } + + if (base64Data) { + const filename = `sakura_audio_${Date.now()}.wav`; + processAndDownloadAudio(base64Data, filename); + } + }; + + const handleCopy = () => { + if (message.content) { + navigator.clipboard.writeText(message.content); + setIsCopied(true); + setTimeout(() => setIsCopied(false), 2000); + } + }; + + const formatTime = (timestamp: number) => { + const date = new Date(timestamp); + const now = new Date(); + const isToday = date.getDate() === now.getDate() && date.getMonth() === now.getMonth() && date.getFullYear() === now.getFullYear(); + + const timeStr = date.toLocaleTimeString([], {hour: '2-digit', minute:'2-digit'}); + if (isToday) return timeStr; + + return `${date.toLocaleDateString()} ${timeStr}`; + }; + + return ( +
+
+ + {/* Avatar */} +
+ {isUser ? : } +
+ + {/* Content Bubble */} +
+ + {/* Metadata Badges */} + {message.metadata?.isThinking && ( + + {t.deepThinking} + + )} + +
+ + {/* TEXT CONTENT - MARKDOWN RENDERED */} + {message.content && ( +
+ {message.type === MessageType.TEXT ? ( + + {message.content} + + ) : ( + message.content + )} +
+ )} + + {/* IMAGE CONTENT */} + {message.type === MessageType.IMAGE && message.metadata?.imageUrl && ( +
+ Uploaded or Generated +
+ )} + {message.type === MessageType.TEXT && message.metadata?.imageUrl && ( +
+ Context +
{t.imageAnalyzed}
+
+ )} + + {/* Action Bar (Copy, TTS, Download) - Always visible for Text messages or if audioUrl exists */} + {(message.type === MessageType.TEXT || message.metadata?.audioUrl) && ( +
+ + {/* Play TTS */} + + + {/* Download Audio */} + + + {/* Copy Text */} + +
+ )} +
+ + {/* Footer (Timestamp + Model) */} +
+ + {formatTime(message.timestamp)} + + {!isUser && message.model && ( +
+ {tCommon.generatedBy} {message.model.replace('gemini-', '')} +
+ )} +
+ +
+
+
+ ); +}; + +export default ChatBubble; \ No newline at end of file diff --git a/components/ConfirmModal.tsx b/components/ConfirmModal.tsx new file mode 100644 index 0000000..303faf0 --- /dev/null +++ b/components/ConfirmModal.tsx @@ -0,0 +1,52 @@ + +import React from 'react'; +import { AlertTriangle, X } from 'lucide-react'; +import { Language } from '../types'; +import { translations } from '../utils/localization'; + +interface ConfirmModalProps { + isOpen: boolean; + title: string; + message: string; + language: Language; + onConfirm: () => void; + onCancel: () => void; +} + +const ConfirmModal: React.FC = ({ isOpen, title, message, language, onConfirm, onCancel }) => { + if (!isOpen) return null; + const t = translations[language].common; + + return ( +
+
+ +
+
+ +
+

{title}

+

{message}

+
+ + +
+
+
+
+ ); +}; + +export default ConfirmModal; diff --git a/components/Onboarding.tsx b/components/Onboarding.tsx new file mode 100644 index 0000000..5a48011 --- /dev/null +++ b/components/Onboarding.tsx @@ -0,0 +1,127 @@ + +import React, { useState } from 'react'; +import { MessageCircle, Sparkles, Mic2, BookOpen, X, ArrowRight, Check, Globe } from 'lucide-react'; +import { Language } from '../types'; +import { translations } from '../utils/localization'; + +interface OnboardingProps { + language: Language; + setLanguage: (lang: Language) => void; + onComplete: () => void; +} + +const Onboarding: React.FC = ({ language, setLanguage, onComplete }) => { + const t = translations[language].onboarding; + const [step, setStep] = useState(0); + + const steps = [ + { + title: t.step1Title, + desc: t.step1Desc, + icon: , + color: 'bg-indigo-50', + }, + { + title: t.step2Title, + desc: t.step2Desc, + icon: , + color: 'bg-orange-50', + }, + { + title: t.step3Title, + desc: t.step3Desc, + icon: , + color: 'bg-blue-50', + } + ]; + + const handleNext = () => { + if (step < steps.length - 1) { + setStep(step + 1); + } else { + onComplete(); + } + }; + + return ( +
+
+ {/* Skip/Close */} + + + {/* Header Image/Graphic */} +
+
+
+ + +

Sakura Sensei

+ + {/* Language Switcher in Header (Step 0) */} + {step === 0 && ( +
+ {(['en', 'ja', 'zh'] as Language[]).map(lang => ( + + ))} +
+ )} +
+ +
+
+

{t.welcome}

+

{t.desc1}

+
+ + {/* Step Card */} +
+ {steps.map((s, idx) => ( +
+
+ {s.icon} +
+

{s.title}

+

{s.desc}

+
+ ))} +
+ + {/* Controls */} +
+ {/* Indicators */} +
+ {steps.map((_, idx) => ( +
+ ))} +
+ + +
+
+
+
+ ); +}; + +export default Onboarding; diff --git a/components/Toast.tsx b/components/Toast.tsx new file mode 100644 index 0000000..1d2015b --- /dev/null +++ b/components/Toast.tsx @@ -0,0 +1,64 @@ + +import React, { useEffect } from 'react'; +import { CheckCircle, AlertCircle, X } from 'lucide-react'; + +export interface ToastMessage { + id: string; + type: 'success' | 'error' | 'info'; + message: string; +} + +interface ToastProps { + toasts: ToastMessage[]; + onRemove: (id: string) => void; +} + +const ToastContainer: React.FC = ({ toasts, onRemove }) => { + return ( +
+ {toasts.map((toast) => ( + + ))} +
+ ); +}; + +const ToastItem: React.FC<{ toast: ToastMessage; onRemove: (id: string) => void }> = ({ toast, onRemove }) => { + useEffect(() => { + const timer = setTimeout(() => { + onRemove(toast.id); + }, 3000); + return () => clearTimeout(timer); + }, [toast.id, onRemove]); + + const getStyles = () => { + switch (toast.type) { + case 'success': + return 'bg-emerald-50 border-emerald-100 text-emerald-700'; + case 'error': + return 'bg-red-50 border-red-100 text-red-700'; + default: + return 'bg-indigo-50 border-indigo-100 text-indigo-700'; + } + }; + + const getIcon = () => { + switch (toast.type) { + case 'success': return ; + case 'error': return ; + default: return ; + } + }; + + return ( +
+ {getIcon()} +

{toast.message}

+ +
+ ); +}; + +export default ToastContainer; \ No newline at end of file diff --git a/index.html b/index.html new file mode 100644 index 0000000..0d50d57 --- /dev/null +++ b/index.html @@ -0,0 +1,84 @@ + + + + + + + + + Sakura Sensei 🌸 + + + + + + +
+ + + + \ No newline at end of file diff --git a/index.tsx b/index.tsx new file mode 100644 index 0000000..6ca5361 --- /dev/null +++ b/index.tsx @@ -0,0 +1,15 @@ +import React from 'react'; +import ReactDOM from 'react-dom/client'; +import App from './App'; + +const rootElement = document.getElementById('root'); +if (!rootElement) { + throw new Error("Could not find root element to mount to"); +} + +const root = ReactDOM.createRoot(rootElement); +root.render( + + + +); \ No newline at end of file diff --git a/manifest.json b/manifest.json new file mode 100644 index 0000000..1070513 --- /dev/null +++ b/manifest.json @@ -0,0 +1,22 @@ +{ + "name": "Sakura Sensei", + "short_name": "Sakura", + "start_url": "/", + "display": "standalone", + "background_color": "#ffffff", + "theme_color": "#ffffff", + "icons": [ + { + "src": "https://api.iconify.design/twemoji:cherry-blossom.svg", + "sizes": "192x192", + "type": "image/svg+xml", + "purpose": "any maskable" + }, + { + "src": "https://api.iconify.design/twemoji:cherry-blossom.svg", + "sizes": "512x512", + "type": "image/svg+xml", + "purpose": "any maskable" + } + ] +} \ No newline at end of file diff --git a/metadata.json b/metadata.json new file mode 100644 index 0000000..f6f43d1 --- /dev/null +++ b/metadata.json @@ -0,0 +1,8 @@ +{ + "name": "Sakura Sensei 🌸 - AI Japanese Tutor", + "description": "Immerse yourself in Japanese with Sakura Sensei. Experience realistic roleplay, deep cultural insights, and creative tools powered by Gemini.", + "requestFramePermissions": [ + "microphone", + "camera" + ] +} \ No newline at end of file diff --git a/nginx.conf b/nginx.conf new file mode 100644 index 0000000..555292f --- /dev/null +++ b/nginx.conf @@ -0,0 +1,19 @@ +server { + listen 8080; + server_name localhost; + + root /usr/share/nginx/html; + index index.html; + + gzip on; + gzip_types text/plain text/css application/json application/javascript text/xml application/xml application/xml+rss text/javascript; + + location / { + try_files $uri $uri/ /index.html; + } + + location ~* \.(js|css|png|jpg|jpeg|gif|ico|svg)$ { + expires 1y; + add_header Cache-Control "public, no-transform"; + } +} diff --git a/package.json b/package.json new file mode 100644 index 0000000..6374323 --- /dev/null +++ b/package.json @@ -0,0 +1,31 @@ +{ + "name": "sakura-sensei", + "private": true, + "version": "1.1.0", + "type": "module", + "scripts": { + "dev": "vite", + "build": "vite build", + "preview": "vite preview" + }, + "dependencies": { + "@google/genai": "*", + "html2canvas": "^1.4.1", + "lucide-react": "^0.344.0", + "react": "^18.2.0", + "react-dom": "^18.2.0", + "react-markdown": "^9.0.1", + "remark-gfm": "^4.0.0" + }, + "devDependencies": { + "@types/react": "^18.2.64", + "@types/react-dom": "^18.2.21", + "@vitejs/plugin-react": "^4.2.1", + "autoprefixer": "^10.4.18", + "postcss": "^8.4.35", + "tailwindcss": "^3.4.1", + "@tailwindcss/typography": "^0.5.10", + "typescript": "^5.4.2", + "vite": "^5.1.6" + } +} \ No newline at end of file diff --git a/releases/HTY1024-APP-SKR-0.1.0_20251120.zip b/releases/HTY1024-APP-SKR-0.1.0_20251120.zip new file mode 100644 index 0000000..6071228 Binary files /dev/null and b/releases/HTY1024-APP-SKR-0.1.0_20251120.zip differ diff --git a/releases/HTY1024-APP-SKR-0.2.0_20251120.zip b/releases/HTY1024-APP-SKR-0.2.0_20251120.zip new file mode 100644 index 0000000..7ee3246 Binary files /dev/null and b/releases/HTY1024-APP-SKR-0.2.0_20251120.zip differ diff --git a/releases/HTY1024-APP-SKR-0.3.0_20251120.zip b/releases/HTY1024-APP-SKR-0.3.0_20251120.zip new file mode 100644 index 0000000..e8d265b Binary files /dev/null and b/releases/HTY1024-APP-SKR-0.3.0_20251120.zip differ diff --git a/releases/HTY1024-APP-SKR-main_20251121_0023.zip b/releases/HTY1024-APP-SKR-main_20251121_0023.zip new file mode 100644 index 0000000..9ac6dce Binary files /dev/null and b/releases/HTY1024-APP-SKR-main_20251121_0023.zip differ diff --git a/service-worker.js b/service-worker.js new file mode 100644 index 0000000..535018c --- /dev/null +++ b/service-worker.js @@ -0,0 +1,42 @@ +const CACHE_NAME = 'sakura-sensei-v1'; +const urlsToCache = [ + '/', + '/index.html', + '/manifest.json' +]; + +self.addEventListener('install', (event) => { + event.waitUntil( + caches.open(CACHE_NAME) + .then((cache) => { + return cache.addAll(urlsToCache); + }) + ); +}); + +self.addEventListener('fetch', (event) => { + event.respondWith( + caches.match(event.request) + .then((response) => { + if (response) { + return response; + } + return fetch(event.request); + }) + ); +}); + +self.addEventListener('activate', (event) => { + const cacheWhitelist = [CACHE_NAME]; + event.waitUntil( + caches.keys().then((cacheNames) => { + return Promise.all( + cacheNames.map((cacheName) => { + if (cacheWhitelist.indexOf(cacheName) === -1) { + return caches.delete(cacheName); + } + }) + ); + }) + ); +}); \ No newline at end of file diff --git a/services/geminiService.ts b/services/geminiService.ts new file mode 100644 index 0000000..47fc995 --- /dev/null +++ b/services/geminiService.ts @@ -0,0 +1,543 @@ + + +import { GoogleGenAI, Modality, Type } from "@google/genai"; +import { PronunciationFeedback, Language, ReadingLesson, ReadingDifficulty, OCRAnalysis, ListeningLesson } from "../types"; +import { base64ToUint8Array, uint8ArrayToBase64 } from "../utils/audioUtils"; + +export const USER_API_KEY_STORAGE = 'sakura_user_api_key'; +export const USER_BASE_URL_STORAGE = 'sakura_user_base_url'; + +// Helper to decode audio for playback +// Updated to support raw PCM (typically returned by Gemini TTS) which browser cannot decode automatically +export const decodeAudioData = async ( + base64Data: string, + audioContext: AudioContext +): Promise => { + const binaryString = atob(base64Data); + const len = binaryString.length; + const bytes = new Uint8Array(len); + for (let i = 0; i < len; i++) { + bytes[i] = binaryString.charCodeAt(i); + } + + try { + // Try standard decoding first (wav/mp3 containers) + // We clone the buffer because decodeAudioData detaches it + return await audioContext.decodeAudioData(bytes.buffer.slice(0)); + } catch (e) { + // Fallback: Treat as raw PCM (16-bit, 24kHz default for Gemini TTS, or 16kHz) + // Assuming 24kHz Mono 16-bit Little Endian based on typical Gemini TTS raw output + const pcmData = new Int16Array(bytes.buffer); + const float32Data = new Float32Array(pcmData.length); + for (let i = 0; i < pcmData.length; i++) { + // Convert int16 to float32 (-1.0 to 1.0) + float32Data[i] = pcmData[i] / 32768.0; + } + + // Create buffer: 1 channel, length, 24000 sample rate + const audioBuffer = audioContext.createBuffer(1, float32Data.length, 24000); + audioBuffer.getChannelData(0).set(float32Data); + return audioBuffer; + } +}; + +// Helper to check/request Veo key +export const ensureVeoKey = async (): Promise => { + // @ts-ignore + if (window.aistudio) { + // @ts-ignore + const hasKey = await window.aistudio.hasSelectedApiKey(); + if (!hasKey) { + // @ts-ignore + await window.aistudio.openSelectKey(); + } + } +}; + +const LANGUAGE_MAP = { + en: "English", + ja: "Japanese", + zh: "Chinese (Simplified)" +}; + +class GeminiService { + private getAi() { + const userKey = localStorage.getItem(USER_API_KEY_STORAGE); + const userBaseUrl = localStorage.getItem(USER_BASE_URL_STORAGE); + const envKey = process.env.API_KEY; + const keyToUse = (userKey && userKey.trim().length > 0) ? userKey : envKey; + + if (!keyToUse) { + console.error("API_KEY is missing."); + throw new Error("API Key is missing"); + } + + const config: any = { apiKey: keyToUse }; + if (userBaseUrl && userBaseUrl.trim().length > 0) { + config.baseUrl = userBaseUrl.trim(); + } + + return new GoogleGenAI(config); + } + + private async getApiKey(): Promise { + const userKey = localStorage.getItem(USER_API_KEY_STORAGE); + const envKey = process.env.API_KEY; + const key = (userKey && userKey.trim().length > 0) ? userKey : envKey; + if (!key) throw new Error("No API Key available"); + return key; + } + + private async retryOperation(operation: () => Promise, retries = 3, delay = 1000): Promise { + try { + return await operation(); + } catch (error: any) { + const isOverloaded = + error?.status === 503 || + error?.response?.status === 503 || + error?.message?.includes('503') || + error?.message?.includes('overloaded'); + + if (isOverloaded && retries > 0) { + console.warn(`Model overloaded (503). Retrying...`); + await new Promise(resolve => setTimeout(resolve, delay)); + return this.retryOperation(operation, retries - 1, delay * 2); + } + throw error; + } + } + + // 1. Text Chat Response - Returns { text, model } + async generateTextResponse( + prompt: string, + imageBase64?: string, + useThinking: boolean = false, + language: Language = 'en', + modelOverride?: string, + aiSpeakingLanguage: 'ja' | 'native' = 'native' + ): Promise<{ text: string, model: string }> { + const ai = this.getAi(); + + let modelName = useThinking + ? 'gemini-3-pro-preview' + : (imageBase64 ? 'gemini-3-pro-preview' : (modelOverride || 'gemini-2.5-flash')); + + const targetLangName = LANGUAGE_MAP[language]; + const parts: any[] = []; + + if (imageBase64) { + parts.push({ + inlineData: { + mimeType: 'image/jpeg', + data: imageBase64 + } + }); + parts.push({ text: `Analyze this image in the context of learning Japanese. Explain in ${targetLangName}: ` + prompt }); + } else { + parts.push({ text: prompt }); + } + + let instruction = ""; + if (aiSpeakingLanguage === 'ja') { + instruction = `You are Sakura, a Japanese language tutor. + IMPORTANT: + - Respond primarily in Japanese (日本語) to help the user practice immersion. + - Only use ${targetLangName} for complex grammar explanations or if the user asks specifically for a translation. + - Keep the tone encouraging and natural.`; + } else { + instruction = `You are Sakura, a friendly, encouraging, and highly skilled Japanese language tutor. You help users learn vocabulary, grammar, listening, and speaking. You provide clear explanations, examples, and translations. + IMPORTANT: + - You are teaching Japanese. + - However, the user speaks ${targetLangName}. + - Provide your explanations, translations, and feedback in ${targetLangName}.`; + } + + const config: any = { + systemInstruction: instruction, + }; + + if (useThinking) { + config.thinkingConfig = { thinkingBudget: 32768 }; + } + + return this.retryOperation(async () => { + const response = await ai.models.generateContent({ + model: modelName, + contents: { parts }, + config: config + }); + return { + text: response.text || "I apologize, I couldn't generate a response.", + model: modelName + }; + }); + } + + // Internal helper for single TTS chunk + private async _generateSpeechChunk(text: string): Promise { + const ai = this.getAi(); + return this.retryOperation(async () => { + try { + const response = await ai.models.generateContent({ + model: 'gemini-2.5-flash-preview-tts', + contents: [{ parts: [{ text }] }], + config: { + responseModalities: [Modality.AUDIO], + speechConfig: { + voiceConfig: { prebuiltVoiceConfig: { voiceName: 'Kore' } }, + }, + }, + }); + return response.candidates?.[0]?.content?.parts?.[0]?.inlineData?.data || null; + } catch (e) { + console.error("TTS Chunk Error", e); + return null; + } + }); + } + + async generateSpeech(text: string): Promise { + if (!text || !text.trim()) return null; + + const MAX_CHUNK_LENGTH = 250; // Safe limit to prevent network timeout on long generation + + // If text is short, process directly + if (text.length <= MAX_CHUNK_LENGTH) { + return this._generateSpeechChunk(text); + } + + // Split text into chunks by sentence to avoid breaking words + const regex = /[^。!?.!?\n]+[。!?.!?\n]*|[^。!?.!?\n]+$/g; + const sentences = text.match(regex) || [text]; + const chunks: string[] = []; + let currentChunk = ''; + + for (const sentence of sentences) { + if ((currentChunk + sentence).length > MAX_CHUNK_LENGTH) { + if (currentChunk) chunks.push(currentChunk); + currentChunk = sentence; + // Force split if a single sentence exceeds max length + while (currentChunk.length > MAX_CHUNK_LENGTH) { + chunks.push(currentChunk.slice(0, MAX_CHUNK_LENGTH)); + currentChunk = currentChunk.slice(MAX_CHUNK_LENGTH); + } + } else { + currentChunk += sentence; + } + } + if (currentChunk) chunks.push(currentChunk); + + try { + // Generate chunks in parallel to speed up total time + // Note: Promise.all order is preserved + const results = await Promise.all(chunks.map(chunk => this._generateSpeechChunk(chunk))); + + // If any chunk failed, the whole audio is compromised + if (results.some(r => r === null)) return null; + + // Convert Base64 -> Uint8Array + const audioSegments = results.map(r => base64ToUint8Array(r!)); + + // Concatenate raw PCM data + const totalLength = audioSegments.reduce((acc, cur) => acc + cur.length, 0); + const combined = new Uint8Array(totalLength); + let offset = 0; + for (const seg of audioSegments) { + combined.set(seg, offset); + offset += seg.length; + } + + // Convert back to Base64 for playback/storage + return uint8ArrayToBase64(combined); + } catch (e) { + console.error("TTS Assembly Error", e); + return null; + } + } + + async transcribeAudio(audioBase64: string): Promise { + const ai = this.getAi(); + return this.retryOperation(async () => { + const response = await ai.models.generateContent({ + model: 'gemini-2.5-flash', + contents: { + parts: [ + { inlineData: { mimeType: 'audio/wav', data: audioBase64 } }, + { text: "Transcribe accurately." }, + ], + }, + }); + return response.text || ""; + }); + } + + async generateImage(prompt: string): Promise { + const ai = this.getAi(); + return this.retryOperation(async () => { + try { + const response = await ai.models.generateImages({ + model: 'imagen-4.0-generate-001', + prompt: prompt + " style of a japanese textbook illustration", + config: { numberOfImages: 1, outputMimeType: 'image/jpeg', aspectRatio: '1:1' }, + }); + const bytes = response.generatedImages?.[0]?.image?.imageBytes; + return bytes ? `data:image/jpeg;base64,${bytes}` : null; + } catch (e) { + console.error("Image Gen Error", e); + return null; + } + }); + } + + async editImage(base64Original: string, prompt: string): Promise { + const ai = this.getAi(); + return this.retryOperation(async () => { + try { + const cleanBase64 = base64Original.replace(/^data:image\/(png|jpeg|jpg|webp|heic|heif);base64,/i, ""); + const response = await ai.models.generateContent({ + model: 'gemini-2.5-flash-image', + contents: { + parts: [ + { inlineData: { data: cleanBase64, mimeType: 'image/jpeg' } }, + { text: prompt } + ] + }, + config: { responseModalities: [Modality.IMAGE] } + }); + for (const part of response.candidates?.[0]?.content?.parts || []) { + if (part.inlineData) return `data:image/png;base64,${part.inlineData.data}`; + } + return null; + } catch (e) { + console.error("Image Edit Error", e); + return null; + } + }); + } + + async generateVideo(prompt: string, onStatusUpdate: (status: string) => void): Promise { + await ensureVeoKey(); + const ai = this.getAi(); + try { + onStatusUpdate("Initializing Veo..."); + let operation = await ai.models.generateVideos({ + model: 'veo-3.1-fast-generate-preview', + prompt: prompt, + config: { numberOfVideos: 1, resolution: '720p', aspectRatio: '16:9' } + }); + onStatusUpdate("Dreaming up video..."); + while (!operation.done) { + await new Promise(resolve => setTimeout(resolve, 5000)); + operation = await ai.operations.getVideosOperation({ operation: operation }); + } + const videoUri = operation.response?.generatedVideos?.[0]?.video?.uri; + if (!videoUri) return null; + const apiKey = await this.getApiKey(); + const videoRes = await fetch(`${videoUri}&key=${apiKey}`); + const blob = await videoRes.blob(); + return URL.createObjectURL(blob); + } catch (e) { + console.error("Veo Error", e); + return null; + } + } + + async analyzeSpeakingPerformance(audioBase64: string, scenarioContext: string, historyContext: string, language: Language = 'en'): Promise { + const ai = this.getAi(); + const targetLangName = LANGUAGE_MAP[language]; + const prompt = `Roleplay: ${scenarioContext}. History: ${historyContext}. Listen, Transcribe, Reply, Evaluate (JSON). Translation/Advice in ${targetLangName}.`; + + return this.retryOperation(async () => { + const response = await ai.models.generateContent({ + model: 'gemini-2.5-flash', + contents: { + parts: [{ inlineData: { mimeType: 'audio/wav', data: audioBase64 } }, { text: prompt }] + }, + config: { + responseMimeType: "application/json", + responseSchema: { + type: Type.OBJECT, + properties: { + transcription: { type: Type.STRING }, + response: { type: Type.STRING }, + translation: { type: Type.STRING }, + score: { type: Type.INTEGER }, + pronunciationIssues: { type: Type.ARRAY, items: { type: Type.STRING } }, + advice: { type: Type.STRING } + }, + required: ["transcription", "response", "translation", "score", "pronunciationIssues", "advice"] + } + } + }); + return response.text ? JSON.parse(response.text) : null; + }); + } + + async generateReadingLesson(topic: string, difficulty: ReadingDifficulty, language: Language): Promise { + const ai = this.getAi(); + const targetLangName = LANGUAGE_MAP[language]; + const prompt = `Create a complete Japanese reading lesson on "${topic}", level ${difficulty}. + The 'japaneseContent' MUST be a complete article or story (at least 300 characters). + Output JSON with title, japaneseContent, translation (${targetLangName}), vocabulary, and grammarPoints (list of key grammar used in the text with explanations).`; + + return this.retryOperation(async () => { + const response = await ai.models.generateContent({ + model: 'gemini-2.5-flash', + contents: { parts: [{ text: prompt }] }, + config: { + responseMimeType: "application/json", + responseSchema: { + type: Type.OBJECT, + properties: { + title: { type: Type.STRING }, + japaneseContent: { type: Type.STRING }, + translation: { type: Type.STRING }, + vocabulary: { type: Type.ARRAY, items: { type: Type.OBJECT, properties: { word: { type: Type.STRING }, reading: { type: Type.STRING }, meaning: { type: Type.STRING } } } }, + grammarPoints: { type: Type.ARRAY, items: { type: Type.OBJECT, properties: { point: { type: Type.STRING }, explanation: { type: Type.STRING } } } } + }, + required: ["title", "japaneseContent", "translation", "vocabulary", "grammarPoints"] + } + } + }); + return response.text ? JSON.parse(response.text) : null; + }); + } + + async generateListeningLesson(topic: string, difficulty: ReadingDifficulty, language: Language): Promise { + const ai = this.getAi(); + const targetLangName = LANGUAGE_MAP[language]; + // Prompt asks for a conversation or monologue suitable for listening practice + const prompt = `Create a Japanese listening practice script on "${topic}", level ${difficulty}. It should be a conversation or monologue. + Output JSON with: + - title + - script (The full Japanese text of the conversation/monologue) + - translation (The full text in ${targetLangName}) + - vocabulary (Key words) + - questions (3 multiple choice comprehension questions in ${targetLangName}) + - Each question needs: question, options (array of 3 strings), correctIndex (0-2), explanation. + `; + + return this.retryOperation(async () => { + const response = await ai.models.generateContent({ + model: 'gemini-2.5-flash', + contents: { parts: [{ text: prompt }] }, + config: { + responseMimeType: "application/json", + responseSchema: { + type: Type.OBJECT, + properties: { + title: { type: Type.STRING }, + script: { type: Type.STRING }, + translation: { type: Type.STRING }, + vocabulary: { type: Type.ARRAY, items: { type: Type.OBJECT, properties: { word: { type: Type.STRING }, reading: { type: Type.STRING }, meaning: { type: Type.STRING } } } }, + questions: { + type: Type.ARRAY, + items: { + type: Type.OBJECT, + properties: { + id: { type: Type.STRING }, + question: { type: Type.STRING }, + options: { type: Type.ARRAY, items: { type: Type.STRING } }, + correctIndex: { type: Type.INTEGER }, + explanation: { type: Type.STRING } + }, + required: ["question", "options", "correctIndex", "explanation"] + } + } + }, + required: ["title", "script", "translation", "vocabulary", "questions"] + } + } + }); + return response.text ? JSON.parse(response.text) : null; + }); + } + + async generateReadingTutorResponse(question: string, lesson: ReadingLesson | ListeningLesson, history: string, language: Language): Promise { + const ai = this.getAi(); + // Handle both ReadingLesson (japaneseContent) and ListeningLesson (script) + const content = 'japaneseContent' in lesson ? lesson.japaneseContent : lesson.script; + const prompt = `Tutor for text "${lesson.title}". Question: "${question}". History: ${history}. Explain in ${LANGUAGE_MAP[language]}.`; + return this.retryOperation(async () => { + const res = await ai.models.generateContent({ + model: 'gemini-2.5-flash', + contents: { parts: [{ text: prompt }] } + }); + return res.text || ""; + }); + } + + async translateText(text: string, target: string, source: string = "Auto"): Promise { + const ai = this.getAi(); + return this.retryOperation(async () => { + const res = await ai.models.generateContent({ + model: 'gemini-2.5-flash', + contents: { parts: [{ text: `Translate the following text from ${source} to ${target}.` }, { text: text }] }, + config: { + responseMimeType: "application/json", + responseSchema: { + type: Type.OBJECT, + properties: { translation: { type: Type.STRING } }, + required: ["translation"] + } + } + }); + return (res.text ? JSON.parse(res.text).translation : "") || ""; + }); + } + + async translateImage(base64: string, target: string, source: string = "Auto"): Promise<{ original: string; translated: string } | null> { + const ai = this.getAi(); + const cleanBase64 = base64.replace(/^data:image\/(png|jpeg|jpg|webp|heic|heif);base64,/i, ""); + return this.retryOperation(async () => { + const res = await ai.models.generateContent({ + model: 'gemini-2.5-flash', + contents: { + parts: [{ inlineData: { mimeType: 'image/jpeg', data: cleanBase64 } }, { text: `Extract text (Language: ${source}) and translate to ${target}. JSON output: original, translated.` }] + }, + config: { + responseMimeType: "application/json", + responseSchema: { + type: Type.OBJECT, + properties: { original: { type: Type.STRING }, translated: { type: Type.STRING } }, + required: ["original", "translated"] + } + } + }); + return res.text ? JSON.parse(res.text) : null; + }); + } + + async extractAndAnalyzeText(base64: string, language: Language): Promise { + const ai = this.getAi(); + const cleanBase64 = base64.replace(/^data:image\/(png|jpeg|jpg|webp|heic|heif);base64,/i, ""); + const targetLang = LANGUAGE_MAP[language]; + const prompt = `OCR and analyze text. Explain in ${targetLang}. JSON: extractedText, detectedLanguage, summary, vocabulary, grammarPoints.`; + + return this.retryOperation(async () => { + const res = await ai.models.generateContent({ + model: 'gemini-2.5-flash', + contents: { + parts: [{ inlineData: { mimeType: 'image/jpeg', data: cleanBase64 } }, { text: prompt }] + }, + config: { + responseMimeType: "application/json", + responseSchema: { + type: Type.OBJECT, + properties: { + extractedText: { type: Type.STRING }, + detectedLanguage: { type: Type.STRING }, + summary: { type: Type.STRING }, + vocabulary: { type: Type.ARRAY, items: { type: Type.OBJECT, properties: { word: { type: Type.STRING }, reading: { type: Type.STRING }, meaning: { type: Type.STRING } } } }, + grammarPoints: { type: Type.ARRAY, items: { type: Type.OBJECT, properties: { point: { type: Type.STRING }, explanation: { type: Type.STRING } } } } + }, + required: ["extractedText", "detectedLanguage", "summary", "vocabulary", "grammarPoints"] + } + } + }); + return res.text ? JSON.parse(res.text) : null; + }); + } +} + +export const geminiService = new GeminiService(); \ No newline at end of file diff --git a/tailwind.config.js b/tailwind.config.js new file mode 100644 index 0000000..2ff5628 --- /dev/null +++ b/tailwind.config.js @@ -0,0 +1,14 @@ +/** @type {import('tailwindcss').Config} */ +export default { + content: [ + "./index.html", + "./src/**/*.{js,ts,jsx,tsx}", + "./**/*.{js,ts,jsx,tsx}" + ], + theme: { + extend: {}, + }, + plugins: [ + require('@tailwindcss/typography'), + ], +} \ No newline at end of file diff --git a/tsconfig.json b/tsconfig.json new file mode 100644 index 0000000..c2da5b3 --- /dev/null +++ b/tsconfig.json @@ -0,0 +1,20 @@ +{ + "compilerOptions": { + "target": "ES2020", + "useDefineForClassFields": true, + "lib": ["ES2020", "DOM", "DOM.Iterable"], + "module": "ESNext", + "skipLibCheck": true, + "moduleResolution": "bundler", + "allowImportingTsExtensions": true, + "resolveJsonModule": true, + "isolatedModules": true, + "noEmit": true, + "jsx": "react-jsx", + "strict": true, + "noUnusedLocals": false, + "noUnusedParameters": false, + "noFallthroughCasesInSwitch": true + }, + "include": ["./**/*.ts", "./**/*.tsx"] +} \ No newline at end of file diff --git a/types.ts b/types.ts new file mode 100644 index 0000000..80963f2 --- /dev/null +++ b/types.ts @@ -0,0 +1,172 @@ + + +export enum Role { + USER = 'user', + MODEL = 'model', +} + +export enum MessageType { + TEXT = 'text', + AUDIO = 'audio', + IMAGE = 'image', + VIDEO = 'video', +} + +export interface ChatMessage { + id: string; + role: Role; + type: MessageType; + content: string; // Text content or base64/url for media + model?: string; // Model used for generation + metadata?: { + isThinking?: boolean; + audioUrl?: string; // For TTS playback or User recording + imageUrl?: string; + videoUrl?: string; + transcription?: string; // For audio inputs + }; + timestamp: number; +} + +// New Interface for Chat Sessions +export interface ChatSession { + id: string; + title: string; + messages: ChatMessage[]; + createdAt: number; + updatedAt: number; +} + +export enum AppMode { + CHAT = 'chat', + READING = 'reading', + LISTENING = 'listening', // New Listening Mode + SPEAKING = 'speaking', + CREATIVE = 'creative', + TRANSLATION = 'translation', + OCR = 'ocr', +} + +export type Language = 'en' | 'ja' | 'zh'; + +// Specific Gemini Models +export enum ModelNames { + TEXT_FAST = 'gemini-2.5-flash', + TEXT_REASONING = 'gemini-3-pro-preview', + TTS = 'gemini-2.5-flash-preview-tts', + IMAGE_GEN = 'imagen-4.0-generate-001', + IMAGE_EDIT = 'gemini-2.5-flash-image', // Nano Banana + VIDEO_GEN = 'veo-3.1-fast-generate-preview', + TRANSCRIPTION = 'gemini-2.5-flash', +} + +export const AVAILABLE_CHAT_MODELS = [ + { id: 'gemini-3-pro-preview', name: 'Gemini 3 Pro (Default - Best Reasoning)' }, + { id: 'gemini-2.5-flash', name: 'Gemini 2.5 Flash (Fast & Balanced)' } +]; + +// Speaking Mode Types +export interface PronunciationFeedback { + score: number; // 0-100 + transcription: string; + response: string; // AI Reply in Japanese + translation: string; // AI Reply in English/Native Lang + pronunciationIssues: string[]; // List of specific phoneme/pitch errors + advice: string; // General advice +} + +export interface Scenario { + id: string; + title: string; + icon: string; + description: string; + initialMessage: string; // What AI says first + initialTranslation?: string; // Translation of initial message + role: string; // Who the AI is +} + +// Reading Mode Types +export enum ReadingDifficulty { + BEGINNER = 'beginner', // N5/N4 + INTERMEDIATE = 'intermediate', // N3/N2 + ADVANCED = 'advanced', // N1+ +} + +export interface ReadingLesson { + title: string; + japaneseContent: string; + translation: string; + vocabulary: { word: string; reading: string; meaning: string }[]; + grammarPoints?: { point: string; explanation: string }[]; +} + +export interface ReadingLessonRecord extends ReadingLesson { + id: string; + topic: string; + difficulty: ReadingDifficulty; + timestamp: number; + chatHistory?: ChatMessage[]; // Persist tutor chat +} + +// Listening Mode Types +export interface QuizQuestion { + id: string; + question: string; + options: string[]; + correctIndex: number; + explanation: string; +} + +export interface ListeningLesson { + title: string; + script: string; // The full Japanese text (initially hidden) + translation: string; + vocabulary: { word: string; reading: string; meaning: string }[]; + questions: QuizQuestion[]; +} + +export interface ListeningLessonRecord extends ListeningLesson { + id: string; + topic: string; + difficulty: ReadingDifficulty; + timestamp: number; + chatHistory?: ChatMessage[]; +} + +// OCR Mode Types +export interface OCRAnalysis { + extractedText: string; + detectedLanguage: string; + summary: string; + vocabulary: { word: string; reading: string; meaning: string }[]; + grammarPoints: { point: string; explanation: string }[]; +} + +export interface OCRRecord { + id: string; + timestamp: number; + imagePreview: string; + analysis: OCRAnalysis; +} + +// Translation Mode Types +export interface TranslationRecord { + id: string; + sourceText: string; + targetText: string; + sourceLang: string; // e.g. 'Detected Language' or 'English' + targetLang: string; // e.g. 'Japanese' + timestamp: number; +} + +// Backup Data Type +export interface AppDataBackup { + version: number; + createdAt: number; + language: Language; + chatSessions: ChatSession[]; + translationHistory: TranslationRecord[]; + readingHistory?: ReadingLessonRecord[]; + ocrHistory?: OCRRecord[]; + listeningHistory?: ListeningLessonRecord[]; +} \ No newline at end of file diff --git a/utils/audioUtils.ts b/utils/audioUtils.ts new file mode 100644 index 0000000..3f9419b --- /dev/null +++ b/utils/audioUtils.ts @@ -0,0 +1,80 @@ + +export const base64ToUint8Array = (base64: string) => { + const binaryString = atob(base64); + const len = binaryString.length; + const bytes = new Uint8Array(len); + for (let i = 0; i < len; i++) { + bytes[i] = binaryString.charCodeAt(i); + } + return bytes; +}; + +export const uint8ArrayToBase64 = (bytes: Uint8Array) => { + let binary = ''; + const len = bytes.byteLength; + for (let i = 0; i < len; i++) { + binary += String.fromCharCode(bytes[i]); + } + return btoa(binary); +}; + +export const triggerDownload = (blob: Blob, filename: string) => { + const url = URL.createObjectURL(blob); + const a = document.createElement('a'); + a.href = url; + a.download = filename; + document.body.appendChild(a); + a.click(); + document.body.removeChild(a); + URL.revokeObjectURL(url); +}; + +export const createWavFileFromPcm = (pcmData: Uint8Array, sampleRate: number = 24000, numChannels: number = 1): Blob => { + const header = new ArrayBuffer(44); + const view = new DataView(header); + + const writeString = (view: DataView, offset: number, string: string) => { + for (let i = 0; i < string.length; i++) { + view.setUint8(offset + i, string.charCodeAt(i)); + } + }; + + writeString(view, 0, 'RIFF'); + view.setUint32(4, 36 + pcmData.length, true); + writeString(view, 8, 'WAVE'); + writeString(view, 12, 'fmt '); + view.setUint32(16, 16, true); + view.setUint16(20, 1, true); + view.setUint16(22, numChannels, true); + view.setUint32(24, sampleRate, true); + view.setUint32(28, sampleRate * numChannels * 2, true); + view.setUint16(32, numChannels * 2, true); + view.setUint16(34, 16, true); + writeString(view, 36, 'data'); + view.setUint32(40, pcmData.length, true); + + return new Blob([view, pcmData], { type: 'audio/wav' }); +}; + +export const processAndDownloadAudio = (base64Data: string, filename: string) => { + try { + // Check for RIFF header (WAV) + // Some base64 strings might have newlines, strip them if necessary, + // but generally atob handles it or we assume clean base64. + const binaryString = atob(base64Data.substring(0, 50).replace(/\s/g, '')); + const isWav = binaryString.startsWith('RIFF'); + + if (isWav) { + const bytes = base64ToUint8Array(base64Data); + const blob = new Blob([bytes], { type: 'audio/wav' }); + triggerDownload(blob, filename); + } else { + // Assume Raw PCM 24kHz 16-bit Mono (Gemini Flash TTS default) + const bytes = base64ToUint8Array(base64Data); + const blob = createWavFileFromPcm(bytes, 24000, 1); + triggerDownload(blob, filename); + } + } catch (e) { + console.error("Error downloading audio", e); + } +}; diff --git a/utils/localization.ts b/utils/localization.ts new file mode 100644 index 0000000..3efa0ba --- /dev/null +++ b/utils/localization.ts @@ -0,0 +1,926 @@ + + +import { Language, Scenario } from "../types"; + +export const getScenarios = (language: Language): Scenario[] => { + switch(language) { + case 'ja': + return [ + { + id: 'cafe_order', + title: 'カフェで注文', + icon: '☕', + description: '東京のカフェでコーヒーと軽食を注文する練習。', + initialMessage: 'いらっしゃいませ!ご注文はお決まりですか?', + role: '店員' + }, + { + id: 'train_station', + title: '駅での道案内', + icon: '🚄', + description: '駅員に行き先を尋ねる練習。', + initialMessage: 'はい、どうされましたか?', + role: '駅員' + }, + { + id: 'conbini', + title: 'コンビニでの買い物', + icon: '🏪', + description: 'コンビニで支払いをする練習。', + initialMessage: 'お弁当温めますか?', + role: '店員' + }, + { + id: 'hotel_checkin', + title: 'ホテルのチェックイン', + icon: '🏨', + description: 'ホテルのフロントでチェックインをする。', + initialMessage: 'いらっしゃいませ。チェックインでございますか?', + role: 'フロント係' + }, + { + id: 'immigration', + title: '入国審査', + icon: '🛂', + description: '空港の入国審査で質問に答える練習。', + initialMessage: '次の方どうぞ。パスポートを見せてください。', + role: '審査官' + }, + { + id: 'boarding', + title: '搭乗手続き', + icon: '✈️', + description: '搭乗ゲートでのやり取り。', + initialMessage: 'ご搭乗ありがとうございます。パスポートと搭乗券を拝見します。', + role: '地上係員' + } + ]; + case 'zh': + return [ + { + id: 'cafe_order', + title: '咖啡厅点单', + icon: '☕', + description: '练习在东京的咖啡厅点咖啡和小吃。', + initialMessage: 'いらっしゃいませ!ご注文はお決まりですか?', + initialTranslation: '欢迎光临!决定好要点什么了吗?', + role: '店员' + }, + { + id: 'train_station', + title: '车站问路', + icon: '🚄', + description: '练习询问车站工作人员路线。', + initialMessage: 'はい、どうされましたか?', + initialTranslation: '您好,有什么可以帮您的吗?', + role: '站务员' + }, + { + id: 'conbini', + title: '便利店购物', + icon: '🏪', + description: '练习在便利店结账。', + initialMessage: 'お弁当温めますか?', + initialTranslation: '便当需要加热吗?', + role: '店员' + }, + { + id: 'hotel_checkin', + title: '酒店入住', + icon: '🏨', + description: '在酒店前台办理入住手续。', + initialMessage: 'いらっしゃいませ。チェックインでございますか?', + initialTranslation: '欢迎光临。是办理入住吗?', + role: '前台接待' + }, + { + id: 'immigration', + title: '入境审查', + icon: '🛂', + description: '练习在机场回答入境审查官的提问。', + initialMessage: '次の方どうぞ。パスポートを見せてください。', + initialTranslation: '下一位。请出示您的护照。', + role: '审查官' + }, + { + id: 'boarding', + title: '登机手续', + icon: '✈️', + description: '练习登机口的对话。', + initialMessage: 'ご搭乗ありがとうございます。パスポートと搭乗券を拝見します。', + initialTranslation: '感谢您的搭乘。请出示护照和登机牌。', + role: '地勤人员' + } + ]; + default: // en + return [ + { + id: 'cafe_order', + title: 'Ordering at a Cafe', + icon: '☕', + description: 'Practice ordering coffee and snacks at a cafe in Tokyo.', + initialMessage: 'いらっしゃいませ!ご注文はお決まりですか?', + initialTranslation: 'Welcome! Have you decided on your order?', + role: 'Barista' + }, + { + id: 'train_station', + title: 'Asking Directions', + icon: '🚄', + description: 'Practice asking a station attendant for help finding a platform.', + initialMessage: 'はい、どうされましたか?', + initialTranslation: 'Yes, how can I help you?', + role: 'Station Attendant' + }, + { + id: 'conbini', + title: 'Convenience Store', + icon: '🏪', + description: 'Buying items at a Konbini.', + initialMessage: 'お弁当温めますか?', + initialTranslation: 'Would you like your bento warmed up?', + role: 'Clerk' + }, + { + id: 'hotel_checkin', + title: 'Hotel Check-in', + icon: '🏨', + description: 'Checking into a hotel.', + initialMessage: 'いらっしゃいませ。チェックインでございますか?', + initialTranslation: 'Welcome. Are you checking in?', + role: 'Receptionist' + }, + { + id: 'immigration', + title: 'Immigration', + icon: '🛂', + description: 'Answering questions at airport immigration control.', + initialMessage: '次の方どうぞ。パスポートを見せてください。', + initialTranslation: 'Next person, please. Show me your passport.', + role: 'Officer' + }, + { + id: 'boarding', + title: 'Boarding Gate', + icon: '✈️', + description: 'Interacting with staff at the boarding gate.', + initialMessage: 'ご搭乗ありがとうございます。パスポートと搭乗券を拝見します。', + initialTranslation: 'Thank you for boarding. May I see your passport and boarding pass?', + role: 'Ground Staff' + } + ]; + } +}; + +export const translations = { + en: { + appTitle: "Sakura Sensei 🌸", + nav: { + sectionStudy: "Study & Input", + sectionPractice: "Practice & Output", + sectionTools: "Toolbox", + sectionImmersion: "Immersion", + chat: "Tutor Dojo", + reading: "Reading Hall", + listening: "Listening Lab", + speaking: "Roleplay", + creative: "Atelier", + translation: "Translator", + ocr: "Scanner", + settings: "Settings" + }, + common: { + cancel: "Cancel", + confirm: "Confirm", + delete: "Delete", + next: "Next", + generatedBy: "Generated by", + error: "Error occurred", + poweredBy: "Powered by Gemini", + deleteItemConfirm: "Are you sure you want to delete this item?", + clearHistoryConfirm: "Are you sure you want to clear the entire history?", + save: "Save", + download: "Download", + content: "Content", + tutor: "Tutor", + text: "Text", + explanation: "Explanation", + clear: "Clear", + copy: "Copy", + copied: "Copied!", + share: "Share", + shareImage: "Image", + shareText: "Text", + shareFile: "File (TXT)", + aiLanguage: "AI Language", + langJa: "Japanese", + langNative: "User Language", + today: "Today", + yesterday: "Yesterday" + }, + onboarding: { + welcome: "Welcome to Sakura Sensei!", + desc1: "Your AI-powered companion for mastering Japanese.", + step1Title: "Conversational Tutor", + step1Desc: "Chat with Sakura (Gemini 3 Pro) to practice grammar or ask cultural questions.", + step2Title: "Immersive Practice", + step2Desc: "Roleplay realistic scenarios, generate reading materials, and scan real-world text.", + step3Title: "Creative Tools", + step3Desc: "Generate images and videos to visualize your learning journey.", + startBtn: "Start Learning", + selectLang: "Select Interface Language" + }, + chat: { + welcome: "Konnichiwa! 🌸 I am Sakura. How can I help you with your Japanese today?", + inputPlaceholder: "Send a message...", + thinkingPlaceholder: "Reasoning about grammar...", + imageAttached: "Image attached", + sending: "Sakura is thinking...", + error: "Connection lost.", + locationError: "Region not supported. Please configure a Proxy URL in Settings.", + playUserAudio: "Play Recording", + listenPronunciation: "Listen", + deepThinking: "Deep Thought", + imageAnalyzed: "Image Analyzed", + thinkingToggle: "Thinking Mode", + newChat: "New Chat", + history: "Chat History", + noHistory: "No previous chats.", + deleteChat: "Delete", + deleteConfirm: "Are you sure you want to delete this chat session?", + untitled: "Untitled Chat", + transcribedPrefix: "(Transcribed): " + }, + creative: { + title: "Creative Atelier 🎨", + genImage: "Paint", + editImage: "Magic Edit", + genVideo: "Dream Video", + promptLabel: "Your Vision", + editLabel1: "1. Base Photo", + editLabel2: "2. Instruction", + uploadPlaceholder: "Drop an image here", + generateBtn: "Generate", + creatingBtn: "Creating...", + download: "Download", + videoWarning: "* Video generation (Veo) takes time.", + emptyState: "Your masterpiece will appear here", + imagePrompt: "Cyberpunk samurai cat in neon Tokyo...", + editPrompt: "Turn trees into cherry blossoms...", + videoPrompt: "Traditional tea ceremony in a futuristic garden...", + uploadAlert: "Please upload an image first!" + }, + speaking: { + title: "Conversation Dojo 🗣️", + subtitle: "Roleplay in realistic scenarios. Get instant feedback on accent and fluency.", + back: "Exit", + listening: "Listening...", + tapSpeak: "Tap to Speak", + processing: "Analyzing...", + feedbackTitle: "Sensei's Report", + score: "Fluency", + toImprove: "Corrections", + advice: "Advice", + transcription: "You Said", + meaning: "Meaning", + perfect: "Sugoi! Perfect pronunciation! 🎉", + emptyFeedback: "Speak clearly to get feedback.", + replay: "Replay", + start: "Start", + roleplay: "Role", + translation: "Translation" + }, + reading: { + title: "Reading Hall 📜", + subtitle: "Generate custom reading lessons based on your level.", + topicLabel: "Topic", + difficultyLabel: "Level", + levels: { + beginner: "Beginner (N5-N4)", + intermediate: "Intermediate (N3-N2)", + advanced: "Advanced (N1)" + }, + generate: "Create Lesson", + generating: "Writing...", + translationToggle: "Translation", + vocabTitle: "Vocabulary", + grammarHeader: "Grammar", + qaTitle: "Tutor Chat", + qaPlaceholder: "Ask about this text...", + qaWelcome: "Lesson generated. Ask me anything about the text!", + historyTitle: "Library", + loadMore: "Open", + emptyHistory: "Empty Library", + clear: "Clear", + placeholder: "e.g. Kyoto History, Anime Culture", + translationLabel: "Translation", + thinking: "thinking...", + playAudio: "Listen", + stopAudio: "Stop", + contentMissing: "No text generated. Please try creating a new lesson with a more specific topic.", + translationMissing: "No translation available." + }, + listening: { + title: "Listening Lab 🎧", + subtitle: "Train your ears with AI-generated conversations and quizzes.", + generate: "Create Practice", + generating: "Composing...", + play: "Play Audio", + pause: "Pause", + replay: "Replay", + showScript: "Show Transcript", + hideScript: "Hide Transcript", + quizTitle: "Comprehension Quiz", + check: "Check Answer", + correct: "Correct!", + incorrect: "Incorrect, try again.", + scriptTitle: "Transcript", + historyTitle: "Practice Log", + emptyHistory: "No practice logs", + qaWelcome: "I've generated a listening exercise. Listen to the audio first, try the quiz, then ask me anything!", + noScript: "No script available to play.", + scriptMissing: "No script generated. Please try generating again." + }, + ocr: { + title: "Text Scanner 🔍", + subtitle: "Scan text (books, menus) to create study guides.", + uploadBtn: "Upload", + cameraBtn: "Camera", + processing: "Scanning...", + extractedTitle: "Extracted Text", + analysisTitle: "Study Notes", + vocabHeader: "Vocabulary", + grammarHeader: "Grammar", + summaryHeader: "Summary", + chatPlaceholder: "Ask about this text...", + reScan: "New Scan", + error: "Could not analyze image.", + history: "Scan History", + emptyHistory: "No scans yet", + clear: "Clear", + analyzedIntro: "Analyzed (Language: $lang). Ask me anything!", + historyIntro: "Loaded from history (Language: $lang).", + tutorChat: "Tutor Chat", + thinking: "thinking...", + analysisFailed: "Analysis failed." + }, + translation: { + title: "Translator", + inputLabel: "Input", + outputLabel: "Translation", + translateBtn: "Translate", + translating: "Translating...", + extracting: "Scanning...", + scanImage: "Camera", + uploadImage: "Image", + sourceLang: "Source", + targetLang: "Target", + history: "Translator History", + clear: "Clear", + copy: "Copy", + langs: { + auto: "Auto Detect", + en: "English", + ja: "Japanese", + zh: "Chinese", + ko: "Korean", + fr: "French", + es: "Spanish" + }, + errorTranslating: "Error translating.", + imageReadError: "Could not read text from image.", + imageTransError: "Image translation failed." + }, + settings: { + title: "Settings & Data", + backupTitle: "Backup", + backupDesc: "Download all data locally.", + backupBtn: "Backup", + restoreDesc: "Restore from backup file.", + restoreBtn: "Restore", + exportTitle: "Export", + exportChatBtn: "Chat Log (TXT)", + exportTransBtn: "Translations (CSV)", + exportReadingBtn: "Reading History (JSON)", + exportOCRBtn: "Scan History (JSON)", + successRestore: "Restored successfully!", + errorRestore: "Invalid file.", + apiKeyTitle: "API Configuration", + apiKeyDesc: "Configure your Gemini API access.", + apiKeyPlaceholder: "Paste API Key", + baseUrlPlaceholder: "Base URL (Optional, e.g. for Proxy)", + apiKeyMissing: "API Key is required.", + saveKey: "Save", + removeKey: "Remove", + keySaved: "Settings saved!", + keyRemoved: "Settings cleared.", + modelTitle: "AI Model", + modelDesc: "Select model for chat/reasoning.", + modelSaved: "Model updated!" + }, + recorder: { + start: "Start Mic", + stop: "Stop Mic" + } + }, + ja: { + appTitle: "さくら先生 🌸", + nav: { + sectionStudy: "学習とインプット", + sectionPractice: "練習とアウトプット", + sectionTools: "ツールボックス", + sectionImmersion: "没入体験", + chat: "学習道場", + reading: "読書の間", + listening: "聴解ラボ", + speaking: "ロールプレイ", + creative: "アトリエ", + translation: "翻訳機", + ocr: "スキャナー", + settings: "設定" + }, + common: { + cancel: "キャンセル", + confirm: "確認", + delete: "削除", + next: "次へ", + generatedBy: "生成モデル:", + error: "エラーが発生しました", + poweredBy: "Powered by Gemini", + deleteItemConfirm: "この項目を削除してもよろしいですか?", + clearHistoryConfirm: "履歴をすべて消去してもよろしいですか?", + save: "保存", + download: "ダウンロード", + content: "コンテンツ", + tutor: "チューター", + text: "テキスト", + explanation: "解説", + clear: "クリア", + copy: "コピー", + copied: "コピーしました!", + share: "共有", + shareImage: "画像", + shareText: "テキスト", + shareFile: "ファイル (TXT)", + aiLanguage: "AIの使用言語", + langJa: "日本語", + langNative: "ユーザー言語", + today: "今日", + yesterday: "昨日" + }, + onboarding: { + welcome: "さくら先生へようこそ!", + desc1: "あなたのためのAI日本語学習パートナーです。", + step1Title: "会話チューター", + step1Desc: "さくら先生(Gemini 3 Pro)とチャットして、文法や文化について学びましょう。", + step2Title: "没入型練習", + step2Desc: "リアルなシナリオでのロールプレイ、読み物の作成、現実世界のテキストのスキャン。", + step3Title: "クリエイティブツール", + step3Desc: "学習の旅を視覚化するために画像やビデオを生成します。", + startBtn: "学習を始める", + selectLang: "言語を選択" + }, + chat: { + welcome: "こんにちは!🌸 さくらです。日本語の勉強をお手伝いします。", + inputPlaceholder: "メッセージを送信...", + thinkingPlaceholder: "文法を推論中...", + imageAttached: "画像が添付されました", + sending: "さくら先生が考え中...", + error: "接続が失われました。", + locationError: "この地域はサポートされていません。設定でプロキシURLを設定してください。", + playUserAudio: "録音を再生", + listenPronunciation: "聞く", + deepThinking: "深い思考", + imageAnalyzed: "画像を分析しました", + thinkingToggle: "思考モード", + newChat: "新しいチャット", + history: "チャット履歴", + noHistory: "履歴はありません。", + deleteChat: "削除", + deleteConfirm: "このチャットセッションを削除してもよろしいですか?", + untitled: "無題のチャット", + transcribedPrefix: "(書き起こし): " + }, + creative: { + title: "クリエイティブアトリエ 🎨", + genImage: "描画", + editImage: "マジック編集", + genVideo: "夢のビデオ", + promptLabel: "あなたのビジョン", + editLabel1: "1. 元の画像", + editLabel2: "2. 指示", + uploadPlaceholder: "ここに画像をドロップ", + generateBtn: "生成", + creatingBtn: "作成中...", + download: "ダウンロード", + videoWarning: "* ビデオ生成 (Veo) には時間がかかります。", + emptyState: "ここに作品が表示されます", + imagePrompt: "ネオン輝く東京のサイバーパンク侍猫...", + editPrompt: "木を桜に変えて...", + videoPrompt: "未来的な庭園での伝統的な茶道...", + uploadAlert: "まずは画像をアップロードしてください!" + }, + speaking: { + title: "会話道場 🗣️", + subtitle: "リアルなシナリオでロールプレイ。アクセントや流暢さを即座にフィードバック。", + back: "終了", + listening: "聞いています...", + tapSpeak: "タップして話す", + processing: "分析中...", + feedbackTitle: "先生のレポート", + score: "流暢さ", + toImprove: "修正点", + advice: "アドバイス", + transcription: "あなたの発言", + meaning: "意味", + perfect: "すごい!完璧な発音です!🎉", + emptyFeedback: "はっきりと話してください。", + replay: "再生", + start: "開始", + roleplay: "役割", + translation: "翻訳" + }, + reading: { + title: "読書の間 📜", + subtitle: "レベルに合わせて読み物を生成します。", + topicLabel: "トピック", + difficultyLabel: "レベル", + levels: { + beginner: "初級 (N5-N4)", + intermediate: "中級 (N3-N2)", + advanced: "上級 (N1)" + }, + generate: "レッスン作成", + generating: "執筆中...", + translationToggle: "翻訳", + vocabTitle: "語彙", + grammarHeader: "文法", + qaTitle: "チューターチャット", + qaPlaceholder: "このテキストについて質問...", + qaWelcome: "レッスンを作成しました。テキストについて何でも聞いてください!", + historyTitle: "ライブラリ", + loadMore: "開く", + emptyHistory: "ライブラリは空です", + clear: "クリア", + placeholder: "例:京都の歴史、アニメ文化", + translationLabel: "翻訳", + thinking: "考え中...", + playAudio: "聞く", + stopAudio: "停止", + contentMissing: "コンテンツが生成されませんでした。新しいトピックで試してください。", + translationMissing: "翻訳がありません。" + }, + listening: { + title: "聴解ラボ 🎧", + subtitle: "AIが生成した会話とクイズで耳を鍛えましょう。", + generate: "練習を作成", + generating: "作成中...", + play: "音声を再生", + pause: "一時停止", + replay: "もう一度", + showScript: "スクリプトを表示", + hideScript: "スクリプトを隠す", + quizTitle: "理解度クイズ", + check: "答え合わせ", + correct: "正解!", + incorrect: "不正解、もう一度。", + scriptTitle: "スクリプト", + historyTitle: "練習ログ", + emptyHistory: "練習ログなし", + qaWelcome: "リスニング練習を作成しました。まず音声を聞いてクイズに挑戦し、その後何でも質問してください!", + noScript: "再生できるスクリプトがありません。", + scriptMissing: "スクリプトが生成されませんでした。もう一度試してください。" + }, + ocr: { + title: "テキストスキャナー 🔍", + subtitle: "テキスト(本、メニュー)をスキャンして学習ガイドを作成。", + uploadBtn: "アップロード", + cameraBtn: "カメラ", + processing: "スキャン中...", + extractedTitle: "抽出されたテキスト", + analysisTitle: "学習ノート", + vocabHeader: "語彙", + grammarHeader: "文法", + summaryHeader: "要約", + chatPlaceholder: "このテキストについて質問...", + reScan: "新しいスキャン", + error: "画像を分析できませんでした。", + history: "スキャン履歴", + emptyHistory: "スキャン履歴なし", + clear: "クリア", + analyzedIntro: "分析しました(言語:$lang)。何でも聞いてください!", + historyIntro: "履歴から読み込みました(言語:$lang)。", + tutorChat: "チューターチャット", + thinking: "考え中...", + analysisFailed: "分析に失敗しました。" + }, + translation: { + title: "翻訳機", + inputLabel: "入力", + outputLabel: "翻訳", + translateBtn: "翻訳", + translating: "翻訳中...", + extracting: "スキャン中...", + scanImage: "カメラ", + uploadImage: "画像", + sourceLang: "翻訳元", + targetLang: "翻訳先", + history: "翻訳履歴", + clear: "クリア", + copy: "コピー", + langs: { + auto: "自動検出", + en: "英語", + ja: "日本語", + zh: "中国語", + ko: "韓国語", + fr: "フランス語", + es: "スペイン語" + }, + errorTranslating: "翻訳エラー。", + imageReadError: "テキストを読み取れませんでした。", + imageTransError: "画像の翻訳に失敗しました。" + }, + settings: { + title: "設定とデータ", + backupTitle: "バックアップ", + backupDesc: "すべてのデータをローカルにダウンロード。", + backupBtn: "バックアップ", + restoreDesc: "バックアップファイルから復元。", + restoreBtn: "復元", + exportTitle: "エクスポート", + exportChatBtn: "チャットログ (TXT)", + exportTransBtn: "翻訳 (CSV)", + exportReadingBtn: "読書履歴 (JSON)", + exportOCRBtn: "スキャン履歴 (JSON)", + successRestore: "正常に復元されました!", + errorRestore: "無効なファイルです。", + apiKeyTitle: "API構成", + apiKeyDesc: "Gemini APIアクセスを構成します。", + apiKeyPlaceholder: "APIキーを貼り付け", + baseUrlPlaceholder: "ベースURL (オプション、プロキシ用)", + apiKeyMissing: "APIキーが必要です。", + saveKey: "保存", + removeKey: "削除", + keySaved: "設定を保存しました!", + keyRemoved: "設定をクリアしました。", + modelTitle: "AIモデル", + modelDesc: "チャット/推論用のモデルを選択。", + modelSaved: "モデルを更新しました!" + }, + recorder: { + start: "マイク開始", + stop: "マイク停止" + } + }, + zh: { + appTitle: "樱花老师 🌸", + nav: { + sectionStudy: "学习与输入", + sectionPractice: "练习与输出", + sectionTools: "工具箱", + sectionImmersion: "沉浸体验", + chat: "学习道场", + reading: "阅读室", + listening: "听力实验室", + speaking: "角色扮演", + creative: "工作室", + translation: "翻译机", + ocr: "扫描仪", + settings: "设置" + }, + common: { + cancel: "取消", + confirm: "确认", + delete: "删除", + next: "下一步", + generatedBy: "生成模型:", + error: "发生错误", + poweredBy: "Powered by Gemini", + deleteItemConfirm: "您确定要删除此项目吗?", + clearHistoryConfirm: "您确定要清空历史记录吗?", + save: "保存", + download: "下载", + content: "内容", + tutor: "导师", + text: "文本", + explanation: "解析", + clear: "清除", + copy: "复制", + copied: "已复制!", + share: "分享", + shareImage: "图片", + shareText: "文本", + shareFile: "文件 (TXT)", + aiLanguage: "AI使用语言", + langJa: "日语", + langNative: "用户语言", + today: "今天", + yesterday: "昨天" + }, + onboarding: { + welcome: "欢迎来到樱花老师!", + desc1: "您的AI日语学习伙伴。", + step1Title: "对话导师", + step1Desc: "与樱花老师(Gemini 3 Pro)聊天,学习语法或文化。", + step2Title: "沉浸式练习", + step2Desc: "角色扮演现实场景,生成阅读材料,扫描现实世界的文本。", + step3Title: "创意工具", + step3Desc: "生成图像和视频以可视化您的学习之旅。", + startBtn: "开始学习", + selectLang: "选择界面语言" + }, + chat: { + welcome: "你好!🌸 我是樱花。今天我可以帮你学习日语吗?", + inputPlaceholder: "发送消息...", + thinkingPlaceholder: "正在推理由法...", + imageAttached: "已附上图片", + sending: "樱花老师正在思考...", + error: "连接丢失。", + locationError: "不支持该地区。请在设置中配置代理URL。", + playUserAudio: "播放录音", + listenPronunciation: "听", + deepThinking: "深度思考", + imageAnalyzed: "图像已分析", + thinkingToggle: "思考模式", + newChat: "新聊天", + history: "聊天记录", + noHistory: "没有以前的聊天。", + deleteChat: "删除", + deleteConfirm: "您确定要删除此聊天会话吗?", + untitled: "未命名聊天", + transcribedPrefix: "(转录): " + }, + creative: { + title: "创意工作室 🎨", + genImage: "绘画", + editImage: "魔法编辑", + genVideo: "梦境视频", + promptLabel: "你的愿景", + editLabel1: "1. 基础照片", + editLabel2: "2. 指令", + uploadPlaceholder: "在这里拖放图像", + generateBtn: "生成", + creatingBtn: "正在创建...", + download: "下载", + videoWarning: "* 视频生成 (Veo) 需要时间。", + emptyState: "你的杰作将出现在这里", + imagePrompt: "霓虹灯闪烁的东京赛博朋克武士猫...", + editPrompt: "把树变成樱花...", + videoPrompt: "未来花园中的传统茶道...", + uploadAlert: "请先上传图片!" + }, + speaking: { + title: "对话道场 🗣️", + subtitle: "在现实场景中进行角色扮演。即时反馈口音和流利度。", + back: "退出", + listening: "正在听...", + tapSpeak: "点击说话", + processing: "正在分析...", + feedbackTitle: "老师的报告", + score: "流利度", + toImprove: "修正", + advice: "建议", + transcription: "你说了", + meaning: "意思", + perfect: "太棒了!完美的发音!🎉", + emptyFeedback: "请清楚地说出以获得反馈。", + replay: "重播", + start: "开始", + roleplay: "角色", + translation: "翻译" + }, + reading: { + title: "阅读室 📜", + subtitle: "根据您的水平生成自定义阅读课程。", + topicLabel: "主题", + difficultyLabel: "等级", + levels: { + beginner: "初级 (N5-N4)", + intermediate: "中级 (N3-N2)", + advanced: "高级 (N1)" + }, + generate: "创建课程", + generating: "正在写作...", + translationToggle: "翻译", + vocabTitle: "词汇", + grammarHeader: "语法", + qaTitle: "导师聊天", + qaPlaceholder: "关于此文本的问题...", + qaWelcome: "课程已生成。关于文本的问题尽管问我!", + historyTitle: "图书馆", + loadMore: "打开", + emptyHistory: "图书馆为空", + clear: "清除", + placeholder: "例如:京都历史,动漫文化", + translationLabel: "翻译", + thinking: "思考中...", + playAudio: "听", + stopAudio: "停止", + contentMissing: "未生成内容。请尝试新的主题。", + translationMissing: "暂无翻译。" + }, + listening: { + title: "听力实验室 🎧", + subtitle: "通过AI生成的对话和测验训练您的耳朵。", + generate: "创建练习", + generating: "正在创作...", + play: "播放音频", + pause: "暂停", + replay: "重播", + showScript: "显示脚本", + hideScript: "隐藏脚本", + quizTitle: "理解测验", + check: "检查答案", + correct: "正确!", + incorrect: "不正确,请重试。", + scriptTitle: "脚本", + historyTitle: "练习日志", + emptyHistory: "暂无练习记录", + qaWelcome: "我已生成听力练习。先听音频,尝试测验,然后尽管问我任何问题!", + noScript: "暂无脚本可播放。", + scriptMissing: "未生成脚本。请重试。" + }, + ocr: { + title: "文本扫描仪 🔍", + subtitle: "扫描文本(书籍,菜单)以创建学习指南。", + uploadBtn: "上传", + cameraBtn: "相机", + processing: "正在扫描...", + extractedTitle: "提取的文本", + analysisTitle: "学习笔记", + vocabHeader: "词汇", + grammarHeader: "语法", + summaryHeader: "摘要", + chatPlaceholder: "关于此文本的问题...", + reScan: "新扫描", + error: "无法分析图像。", + history: "扫描记录", + emptyHistory: "暂无扫描", + clear: "清除", + analyzedIntro: "已分析(语言:$lang)。尽管问我!", + historyIntro: "从历史记录加载(语言:$lang)。", + tutorChat: "导师聊天", + thinking: "思考中...", + analysisFailed: "分析失败。" + }, + translation: { + title: "翻译机", + inputLabel: "输入", + outputLabel: "翻译", + translateBtn: "翻译", + translating: "正在翻译...", + extracting: "正在扫描...", + scanImage: "相机", + uploadImage: "图像", + sourceLang: "源语言", + targetLang: "目标语言", + history: "翻译记录", + clear: "清除", + copy: "复制", + langs: { + auto: "自动检测", + en: "英语", + ja: "日语", + zh: "中文", + ko: "韩语", + fr: "法语", + es: "西班牙语" + }, + errorTranslating: "翻译错误。", + imageReadError: "无法读取文本。", + imageTransError: "图片翻译失败。" + }, + settings: { + title: "设置和数据", + backupTitle: "备份", + backupDesc: "下载所有数据到本地。", + backupBtn: "备份", + restoreDesc: "从备份文件恢复。", + restoreBtn: "恢复", + exportTitle: "导出", + exportChatBtn: "聊天记录 (TXT)", + exportTransBtn: "翻译 (CSV)", + exportReadingBtn: "阅读记录 (JSON)", + exportOCRBtn: "扫描记录 (JSON)", + successRestore: "恢复成功!", + errorRestore: "无效文件。", + apiKeyTitle: "API配置", + apiKeyDesc: "配置您的Gemini API访问。", + apiKeyPlaceholder: "粘贴API密钥", + baseUrlPlaceholder: "Base URL (可选,用于代理)", + apiKeyMissing: "需要API密钥。", + saveKey: "保存", + removeKey: "删除", + keySaved: "设置已保存!", + keyRemoved: "设置已清除。", + modelTitle: "AI模型", + modelDesc: "选择聊天/推理模型。", + modelSaved: "模型已更新!" + }, + recorder: { + start: "开始录音", + stop: "停止录音" + } + } +}; \ No newline at end of file diff --git a/views/ChatView.tsx b/views/ChatView.tsx new file mode 100644 index 0000000..dc5c2c9 --- /dev/null +++ b/views/ChatView.tsx @@ -0,0 +1,546 @@ + + +import React, { useState, useRef, useEffect } from 'react'; +import { ChatMessage, Role, MessageType, Language, ChatSession } from '../types'; +import { geminiService } from '../services/geminiService'; +import ChatBubble from '../components/ChatBubble'; +import AudioRecorder from '../components/AudioRecorder'; +import { Send, Image as ImageIcon, BrainCircuit, Loader2, Plus, History, MessageSquare, Trash2, X, Sparkles, PanelRightClose, PanelRightOpen, Share2, Download, FileText, Image as ImageIconLucide, Languages } from 'lucide-react'; +import { translations } from '../utils/localization'; +import html2canvas from 'html2canvas'; + +interface ChatViewProps { + language: Language; + sessions: ChatSession[]; + activeSessionId: string; + onNewSession: () => void; + onSelectSession: (id: string) => void; + onDeleteSession: (id: string) => void; + onClearAllSessions: () => void; + onUpdateSession: (id: string, messages: ChatMessage[]) => void; + selectedModel?: string; + addToast: (type: 'success' | 'error' | 'info', msg: string) => void; +} + +const ChatView: React.FC = ({ + language, + sessions, + activeSessionId, + onNewSession, + onSelectSession, + onDeleteSession, + onClearAllSessions, + onUpdateSession, + selectedModel, + addToast +}) => { + const t = translations[language].chat; + const tCommon = translations[language].common; + + const activeSession = sessions.find(s => s.id === activeSessionId) || sessions[0]; + const messages = activeSession ? activeSession.messages : []; + + const [inputValue, setInputValue] = useState(''); + const [isLoading, setIsLoading] = useState(false); + const [useThinking, setUseThinking] = useState(false); + const [attachedImage, setAttachedImage] = useState(null); + + // Settings State + const [aiSpeakingLanguage, setAiSpeakingLanguage] = useState<'ja' | 'native'>('ja'); + const [isShareMenuOpen, setIsShareMenuOpen] = useState(false); + + // History Sidebar State - Default Closed as requested + const [isHistoryOpen, setIsHistoryOpen] = useState(false); + + const messagesEndRef = useRef(null); + const messagesContainerRef = useRef(null); + const fileInputRef = useRef(null); + + const scrollToBottom = () => { + messagesEndRef.current?.scrollIntoView({ behavior: 'smooth' }); + }; + + useEffect(() => { + scrollToBottom(); + }, [messages, activeSessionId]); + + // Close share menu on click outside + useEffect(() => { + const handleClick = () => setIsShareMenuOpen(false); + if (isShareMenuOpen) window.addEventListener('click', handleClick); + return () => window.removeEventListener('click', handleClick); + }, [isShareMenuOpen]); + + const handleUpdateMessage = (updatedMsg: ChatMessage) => { + const updatedMessages = messages.map(m => m.id === updatedMsg.id ? updatedMsg : m); + onUpdateSession(activeSessionId, updatedMessages); + }; + + const handleSendMessage = async () => { + if ((!inputValue.trim() && !attachedImage) || isLoading) return; + + const currentText = inputValue; + const currentImage = attachedImage; + + setInputValue(''); + setAttachedImage(null); + setIsLoading(true); + + // 1. Construct User Message + const userMsg: ChatMessage = { + id: Date.now().toString(), + role: Role.USER, + type: MessageType.TEXT, + content: currentText, + timestamp: Date.now(), + metadata: { imageUrl: currentImage || undefined } + }; + + // IMPORTANT: Calculate new history locally to avoid stale closure issues after await + const messagesWithUser = [...messages, userMsg]; + + // Update UI immediately with user message + onUpdateSession(activeSessionId, messagesWithUser); + + try { + // 2. Get Response + const result = await geminiService.generateTextResponse( + currentText || "Describe this image", + currentImage || undefined, + useThinking, + language, + selectedModel, + aiSpeakingLanguage + ); + + // 3. TTS (if short and not thinking) + let ttsAudio: string | null = null; + if (!useThinking && result.text.length < 300) { + try { ttsAudio = await geminiService.generateSpeech(result.text); } catch (e) {} + } + + const aiMsg: ChatMessage = { + id: (Date.now() + 1).toString(), + role: Role.MODEL, + type: MessageType.TEXT, + content: result.text, + model: result.model, + timestamp: Date.now(), + metadata: { isThinking: useThinking, audioUrl: ttsAudio || undefined } + }; + + // 4. Add AI Message to the LOCALLY calculated history (messagesWithUser) + // This ensures we don't lose the user message we just added + onUpdateSession(activeSessionId, [...messagesWithUser, aiMsg]); + + } catch (error: any) { + const errorMsg = error?.message || t.error; + const errorMsgObj: ChatMessage = { + id: Date.now().toString(), + role: Role.MODEL, + type: MessageType.TEXT, + content: `${t.error}\n(${errorMsg})`, + timestamp: Date.now() + }; + onUpdateSession(activeSessionId, [...messagesWithUser, errorMsgObj]); + } finally { + setIsLoading(false); + setUseThinking(false); + } + }; + + const handleAudioInput = async (base64Audio: string) => { + setIsLoading(true); + try { + // 1. Transcribe first (async) + const transcription = await geminiService.transcribeAudio(base64Audio); + + const userMsg: ChatMessage = { + id: Date.now().toString(), + role: Role.USER, + type: MessageType.AUDIO, + content: `${t.transcribedPrefix}${transcription}`, + timestamp: Date.now(), + metadata: { audioUrl: base64Audio, transcription: transcription } + }; + + // 2. Update UI with User Message + const messagesWithUser = [...messages, userMsg]; + onUpdateSession(activeSessionId, messagesWithUser); + + // 3. Generate AI Response + const result = await geminiService.generateTextResponse(transcription, undefined, false, language, selectedModel, aiSpeakingLanguage); + const ttsAudio = await geminiService.generateSpeech(result.text); + + const aiMsg: ChatMessage = { + id: (Date.now() + 1).toString(), + role: Role.MODEL, + type: MessageType.TEXT, + content: result.text, + model: result.model, + timestamp: Date.now(), + metadata: { audioUrl: ttsAudio || undefined } + }; + + // 4. Update UI with AI Message using local history + onUpdateSession(activeSessionId, [...messagesWithUser, aiMsg]); + + } catch (e) { + console.error(e); + addToast('error', t.error); + } finally { + setIsLoading(false); + } + }; + + const handleImageUpload = (e: React.ChangeEvent) => { + const file = e.target.files?.[0]; + if (file) { + const reader = new FileReader(); + reader.onloadend = () => { + setAttachedImage(reader.result as string); + }; + reader.readAsDataURL(file); + } + }; + + // Share Handlers + const shareAsText = () => { + const text = messages.map(m => `[${new Date(m.timestamp).toLocaleString()}] ${m.role === Role.USER ? 'User' : 'Sakura'}: ${m.content}`).join('\n\n'); + navigator.clipboard.writeText(text); + addToast('success', tCommon.copied); + }; + + const shareAsFile = () => { + const text = messages.map(m => `[${new Date(m.timestamp).toLocaleString()}] ${m.role === Role.USER ? 'User' : 'Sakura'}: ${m.content}`).join('\n\n'); + const blob = new Blob([text], { type: 'text/plain' }); + const url = URL.createObjectURL(blob); + const a = document.createElement('a'); + a.href = url; + a.download = `sakura_chat_${Date.now()}.txt`; + a.click(); + URL.revokeObjectURL(url); + }; + + const shareAsImage = async () => { + if (!messagesContainerRef.current) return; + addToast('info', 'Generating image...'); + + // Clone the element to capture full content + const original = messagesContainerRef.current; + const clone = original.cloneNode(true) as HTMLElement; + + // We need to maintain the width to ensure text wrapping is identical + const width = original.offsetWidth; + + clone.style.width = `${width}px`; + clone.style.height = 'auto'; + clone.style.maxHeight = 'none'; + clone.style.overflow = 'visible'; + clone.style.position = 'absolute'; + clone.style.top = '-9999px'; + clone.style.left = '0'; + clone.style.background = '#f8fafc'; // Match bg-slate-50 + clone.style.zIndex = '-1'; + + document.body.appendChild(clone); + + try { + // Small delay to ensure DOM rendering + await new Promise(resolve => setTimeout(resolve, 100)); + + const canvas = await html2canvas(clone, { + useCORS: true, + scale: 2, // Higher res + backgroundColor: '#f8fafc', + windowWidth: width, + height: clone.scrollHeight, + windowHeight: clone.scrollHeight + }); + + const url = canvas.toDataURL('image/png'); + const a = document.createElement('a'); + a.href = url; + a.download = `sakura_chat_${Date.now()}.png`; + a.click(); + } catch (e) { + console.error(e); + addToast('error', 'Failed to generate image'); + } finally { + if (document.body.contains(clone)) { + document.body.removeChild(clone); + } + } + }; + + // --- Sub-components --- + + const HistoryContent = () => ( +
+
+

+ {t.history} +

+
+ {sessions.length > 0 && ( + + )} + +
+
+ +
+ +
+ +
+ {sessions.length === 0 &&
{t.noHistory}
} + {sessions.slice().sort((a,b) => b.updatedAt - a.updatedAt).map(session => ( +
{ onSelectSession(session.id); if(window.innerWidth < 768) setIsHistoryOpen(false); }} + > + {/* Icon */} +
+ +
+ + {/* Content */} +
+
+

+ {session.title || t.untitled} +

+
+
+ + {new Date(session.updatedAt).toLocaleDateString()} {new Date(session.updatedAt).toLocaleTimeString([], {hour: '2-digit', minute:'2-digit'})} + +
+

+ {session.messages.length > 1 ? session.messages[session.messages.length-1].content.substring(0, 50) : '...'} +

+
+ + {/* Delete Button */} + +
+ ))} +
+
+ ); + + return ( +
+ + {/* MAIN CHAT AREA */} +
+ + {/* Header / Toolbar */} +
+
+
+ + {selectedModel ? selectedModel.replace('gemini-', '').replace('-preview', '') : 'AI'} +
+ + {/* AI Language Toggle */} + +
+ +
+ {/* Share Button */} +
+ + + {/* Share Dropdown */} + {isShareMenuOpen && ( +
+ + + +
+ )} +
+ + {/* Toggle History Button */} + +
+
+ + {/* Messages Scroll Area */} +
+ {messages.length === 0 && ( +
+
+ +
+

{t.inputPlaceholder}

+
+ )} + {messages.map((msg) => ( + addToast('error', errorMsg)} + /> + ))} + {isLoading && ( +
+
+ +
+ {t.sending} +
+ )} +
+
+ + {/* Input Area */} +
+ {attachedImage && ( +
+
+ Preview + +
+ {t.imageAttached} +
+ )} + +
+
+
+ + + + + + +
+ + +
+ +
+