diff --git a/components/ErrorBoundary.tsx b/components/ErrorBoundary.tsx index 0e8c77a..98590f8 100644 --- a/components/ErrorBoundary.tsx +++ b/components/ErrorBoundary.tsx @@ -11,13 +11,10 @@ interface State { } export class ErrorBoundary extends Component { - constructor(props: Props) { - super(props); - this.state = { - hasError: false, - error: null - }; - } + public state: State = { + hasError: false, + error: null + }; public static getDerivedStateFromError(error: Error): State { return { hasError: true, error }; diff --git a/releases/HTY1024-APP-SKR-0.7.0_20251126.zip b/releases/HTY1024-APP-SKR-0.7.0_20251126.zip new file mode 100644 index 0000000..dfdd161 Binary files /dev/null and b/releases/HTY1024-APP-SKR-0.7.0_20251126.zip differ diff --git a/services/geminiService.ts b/services/geminiService.ts index 6028d3a..18bec9d 100644 --- a/services/geminiService.ts +++ b/services/geminiService.ts @@ -1,4 +1,5 @@ + import { GoogleGenAI, Modality, Type } from "@google/genai"; import { PronunciationFeedback, Language, ReadingLesson, ReadingDifficulty, OCRAnalysis, ListeningLesson } from "../types"; import { base64ToUint8Array, uint8ArrayToBase64 } from "../utils/audioUtils"; @@ -425,7 +426,7 @@ class GeminiService { const ai = this.getAi(); const targetLangName = LANGUAGE_MAP[language]; // Prompt asks for a conversation or monologue suitable for listening practice - const prompt = `Create a Japanese listening practice script on "${topic}", level ${difficulty}. It should be a conversation or monologue. + const prompt = `Create a Japanese listening practice script on "${topic}", level ${difficulty}. Output JSON with: - title - script (The full Japanese text of the conversation/monologue) @@ -433,6 +434,7 @@ class GeminiService { - vocabulary (Key words with meanings in ${targetLangName}) - questions (3 multiple choice comprehension questions in ${targetLangName}) - Each question needs: question, options (array of 3 strings), correctIndex (0-2), explanation (in ${targetLangName}). + - grammarPoints (explanations in ${targetLangName}). `; return this.retryOperation(async () => { @@ -461,9 +463,10 @@ class GeminiService { }, required: ["question", "options", "correctIndex", "explanation"] } - } + }, + grammarPoints: { type: Type.ARRAY, items: { type: Type.OBJECT, properties: { point: { type: Type.STRING }, explanation: { type: Type.STRING } } } } }, - required: ["title", "script", "translation", "vocabulary", "questions"] + required: ["title", "script", "translation", "vocabulary", "questions", "grammarPoints"] } } }); @@ -558,4 +561,4 @@ class GeminiService { } } -export const geminiService = new GeminiService(); +export const geminiService = new GeminiService(); \ No newline at end of file diff --git a/types.ts b/types.ts index 80963f2..65a9cef 100644 --- a/types.ts +++ b/types.ts @@ -123,6 +123,7 @@ export interface ListeningLesson { translation: string; vocabulary: { word: string; reading: string; meaning: string }[]; questions: QuizQuestion[]; + grammarPoints?: { point: string; explanation: string }[]; } export interface ListeningLessonRecord extends ListeningLesson { diff --git a/utils/localization.ts b/utils/localization.ts index bc12ef4..0db96fc 100644 --- a/utils/localization.ts +++ b/utils/localization.ts @@ -107,7 +107,7 @@ export const getScenarios = (language: Language): Scenario[] => { title: '登机手续', icon: '✈️', description: '练习登机口的对话。', - initialMessage: 'ご搭乗ありがとうございます。パスポートと搭乗券を拝見します。', + initialMessage: 'ご搭乗ありがとうございます。パスポートと搭乗券を拝见します。', initialTranslation: '感谢您的搭乘。请出示护照和登机牌。', role: '地勤人员' } @@ -341,7 +341,9 @@ export const translations = { emptyHistory: "No practice logs", qaWelcome: "I've generated a listening exercise. Listen to the audio first, try the quiz, then ask me anything!", noScript: "No script available to play.", - scriptMissing: "No script generated. Please try generating again." + scriptMissing: "No script generated. Please try generating again.", + vocabTitle: "Vocabulary", + grammarHeader: "Grammar" }, ocr: { title: "Text Scanner 🔍", @@ -593,7 +595,9 @@ export const translations = { emptyHistory: "練習ログなし", qaWelcome: "リスニング練習を作成しました。まず音声を聞いてクイズに挑戦し、その後何でも質問してください!", noScript: "再生できるスクリプトがありません。", - scriptMissing: "スクリプトが生成されませんでした。もう一度試してください。" + scriptMissing: "スクリプトが生成されませんでした。もう一度試してください。", + vocabTitle: "語彙", + grammarHeader: "文法" }, ocr: { title: "テキストスキャナー 🔍", @@ -845,7 +849,9 @@ export const translations = { emptyHistory: "暂无练习记录", qaWelcome: "我已生成听力练习。先听音频,尝试测验,然后尽管问我任何问题!", noScript: "暂无脚本可播放。", - scriptMissing: "未生成脚本。请重试。" + scriptMissing: "未生成脚本。请重试。", + vocabTitle: "词汇", + grammarHeader: "语法" }, ocr: { title: "文本扫描仪 🔍", diff --git a/views/ListeningView.tsx b/views/ListeningView.tsx index 9ceab7d..d790308 100644 --- a/views/ListeningView.tsx +++ b/views/ListeningView.tsx @@ -1,8 +1,9 @@ + import React, { useState, useRef, useEffect } from 'react'; import { Language, ListeningLesson, ListeningLessonRecord, ReadingDifficulty, ChatMessage, Role, MessageType } from '../types'; import { geminiService, decodeAudioData } from '../services/geminiService'; import { processAndDownloadAudio } from '../utils/audioUtils'; -import { Headphones, Loader2, Send, Eye, EyeOff, List, HelpCircle, ChevronLeft, History, Trash2, X, PanelRightClose, PanelRightOpen, Volume2, Square, Play, Pause, CheckCircle, AlertCircle, FileText, MessageCircle, Download, RotateCcw, Copy, Check } from 'lucide-react'; +import { Headphones, Loader2, Send, Eye, EyeOff, List, HelpCircle, ChevronLeft, History, Trash2, X, PanelRightClose, PanelRightOpen, Volume2, Square, Play, Pause, CheckCircle, AlertCircle, FileText, MessageCircle, Download, RotateCcw, Copy, Check, PenTool, Sparkles } from 'lucide-react'; import { translations } from '../utils/localization'; import ChatBubble from '../components/ChatBubble'; @@ -61,6 +62,8 @@ const ListeningView: React.FC = ({ language, history, onSave const [isTTSLoading, setIsTTSLoading] = useState(false); const [isPlaying, setIsPlaying] = useState(false); const [audioCache, setAudioCache] = useState(null); + const [playingVocabWord, setPlayingVocabWord] = useState(null); + const audioContextRef = useRef(null); const audioSourceRef = useRef(null); @@ -70,10 +73,32 @@ const ListeningView: React.FC = ({ language, history, onSave const [isChatLoading, setIsChatLoading] = useState(false); const chatEndRef = useRef(null); + // Selection State + const [selectedText, setSelectedText] = useState(null); + const scriptRef = useRef(null); + // Cleanup audio when leaving lesson useEffect(() => { return () => stopAudio(); }, [lesson]); + + // Handle Selection + useEffect(() => { + const handleSelectionChange = () => { + const selection = window.getSelection(); + if (selection && !selection.isCollapsed && scriptRef.current && scriptRef.current.contains(selection.anchorNode)) { + const text = selection.toString().trim(); + if (text.length > 0) { + setSelectedText(text); + return; + } + } + setSelectedText(null); + }; + + document.addEventListener('selectionchange', handleSelectionChange); + return () => document.removeEventListener('selectionchange', handleSelectionChange); + }, [lesson, showScript]); const stopAudio = () => { if (audioSourceRef.current) { @@ -81,9 +106,10 @@ const ListeningView: React.FC = ({ language, history, onSave audioSourceRef.current = null; } setIsPlaying(false); + setPlayingVocabWord(null); }; - const playAudioData = async (base64Data: string) => { + const playAudioData = async (base64Data: string, onEnded?: () => void) => { stopAudio(); if (!audioContextRef.current) { audioContextRef.current = new (window.AudioContext || (window as any).webkitAudioContext)(); @@ -95,7 +121,7 @@ const ListeningView: React.FC = ({ language, history, onSave const source = ctx.createBufferSource(); source.buffer = buffer; source.connect(ctx.destination); - source.onended = () => setIsPlaying(false); + source.onended = onEnded || (() => setIsPlaying(false)); source.start(); audioSourceRef.current = source; }; @@ -108,7 +134,7 @@ const ListeningView: React.FC = ({ language, history, onSave if (audioCache) { setIsPlaying(true); - await playAudioData(audioCache); + await playAudioData(audioCache, () => setIsPlaying(false)); return; } @@ -124,7 +150,7 @@ const ListeningView: React.FC = ({ language, history, onSave setAudioCache(audioBase64); setIsPlaying(true); - await playAudioData(audioBase64); + await playAudioData(audioBase64, () => setIsPlaying(false)); } catch (e) { console.error("TTS Playback failed", e); setIsPlaying(false); @@ -157,6 +183,25 @@ const ListeningView: React.FC = ({ language, history, onSave } }; + const playVocab = async (word: string) => { + if (playingVocabWord === word) { + stopAudio(); + return; + } + + setPlayingVocabWord(word); + try { + const audioBase64 = await geminiService.generateSpeech(word); + if (audioBase64) { + await playAudioData(audioBase64, () => setPlayingVocabWord(null)); + } else { + setPlayingVocabWord(null); + } + } catch (e) { + setPlayingVocabWord(null); + } + }; + const generateLesson = async () => { if (!topic.trim()) return; setIsGenerating(true); @@ -239,11 +284,21 @@ const ListeningView: React.FC = ({ language, history, onSave } }; - const handleAskTutor = async () => { - if (!chatInput.trim() || !lesson) return; + const handleAskTutor = async (customQuestion?: string) => { + const question = customQuestion || chatInput; + if (!question.trim() || !lesson) return; - const question = chatInput; - setChatInput(''); + setMobileTab('tutor'); + + if (!customQuestion) { + setChatInput(''); + } else { + if (window.getSelection) { + window.getSelection()?.removeAllRanges(); + } + setSelectedText(null); + } + setIsChatLoading(true); // Add User Message @@ -433,9 +488,9 @@ const ListeningView: React.FC = ({ language, history, onSave {lesson && (
{/* Left: Content */} -
+
-
+
@@ -475,7 +530,7 @@ const ListeningView: React.FC = ({ language, history, onSave
-
+
{/* Audio Player Section - Modern Card Design */}
@@ -622,7 +677,7 @@ const ListeningView: React.FC = ({ language, history, onSave {/* Script Reveal */} {showScript && ( -
+

{t.scriptTitle}

@@ -642,22 +697,68 @@ const ListeningView: React.FC = ({ language, history, onSave
{/* Vocabulary List */} -
+

- {translations[language].reading.vocabTitle} + {t.vocabTitle}

{lesson.vocabulary?.map((v, i) => ( -
-
- {v.word} - ({v.reading}) +
+
+
+ {v.word} + {v.reading && ({v.reading})} +
+

{v.meaning}

))}
+ + {/* Grammar Section */} + {lesson.grammarPoints && lesson.grammarPoints.length > 0 && ( +
+

+ {t.grammarHeader} +

+
+ {lesson.grammarPoints.map((g, i) => ( +
+
{g.point}
+

{g.explanation}

+
+ ))} +
+
+ )} + + {/* Floating Ask Button */} + {selectedText && ( +
+ +
+ )}
)}
@@ -694,7 +795,7 @@ const ListeningView: React.FC = ({ language, history, onSave onKeyDown={(e) => e.key === 'Enter' && handleAskTutor()} />
- {/* Sidebar History (Desktop) */} + {/* Sidebar History (Desktop - Collapsible) */} {/* Content Scroll Area */} -
+
{/* 1. Image & Extracted Text */}
@@ -421,7 +454,7 @@ const OCRView: React.FC = ({ language, history, onSaveToHistory, o
{analysis?.vocabulary?.map((v, i) => ( v ? ( -
+
{v.word || ''} @@ -448,7 +481,7 @@ const OCRView: React.FC = ({ language, history, onSaveToHistory, o
{analysis.grammarPoints.map((g, i) => ( g ? ( -
+
{g.point || ''}

{g.explanation || ''}

@@ -457,6 +490,19 @@ const OCRView: React.FC = ({ language, history, onSaveToHistory, o
)} + + {/* Floating Ask Button */} + {selectedText && ( +
+ +
+ )}
@@ -491,7 +537,7 @@ const OCRView: React.FC = ({ language, history, onSaveToHistory, o onKeyDown={(e) => e.key === 'Enter' && handleAskTutor()} />
-
-
+
+

{lesson.japaneseContent || {t.contentMissing}} @@ -566,7 +600,11 @@ const ReadingView: React.FC = ({ language, history, onSaveToHi

{lesson.vocabulary?.map((v, i) => ( -
+
{v.word} @@ -593,7 +631,11 @@ const ReadingView: React.FC = ({ language, history, onSaveToHi
{lesson.grammarPoints.map((g, i) => ( -
+
{g.point}

{g.explanation}

@@ -602,6 +644,19 @@ const ReadingView: React.FC = ({ language, history, onSaveToHi
)}
+ + {/* Floating Ask Button */} + {selectedText && ( +
+ +
+ )}
@@ -639,7 +694,7 @@ const ReadingView: React.FC = ({ language, history, onSaveToHi onKeyDown={(e) => e.key === 'Enter' && handleAskTutor()} />