import React, { useState, useEffect, useRef } from 'react'; import { geminiService, decodeAudioData } from '../services/geminiService'; import AudioRecorder from '../components/AudioRecorder'; import { processAndDownloadAudio } from '../utils/audioUtils'; import { Scenario, PronunciationFeedback, Language } from '../types'; import { Mic, Volume2, ChevronRight, Award, AlertCircle, CheckCircle, User, Bot, ArrowLeft, Download, ToggleLeft, ToggleRight, PanelRightClose, PanelRightOpen, X } from 'lucide-react'; import { translations, getScenarios } from '../utils/localization'; interface SpeakingPracticeViewProps { language: Language; } const SpeakingPracticeView: React.FC = ({ language }) => { const t = translations[language].speaking; const tRecorder = translations[language].recorder; const [activeScenario, setActiveScenario] = useState(null); const [history, setHistory] = useState<{role: string, text: string, translation?: string}[]>([]); const [feedback, setFeedback] = useState(null); const [isProcessing, setIsProcessing] = useState(false); const [isPlayingTTS, setIsPlayingTTS] = useState(false); const [lastAudioUrl, setLastAudioUrl] = useState(null); const [showTranslation, setShowTranslation] = useState(false); const [isFeedbackOpen, setIsFeedbackOpen] = useState(false); // New state for mobile feedback drawer const audioContextRef = useRef(null); // Reset flow if language changes, to avoid mismatched text useEffect(() => { reset(); }, [language]); const startScenario = (scenario: Scenario) => { setActiveScenario(scenario); setHistory([{ role: 'model', text: scenario.initialMessage, translation: scenario.initialTranslation }]); setFeedback(null); playTTS(scenario.initialMessage); }; const playTTS = async (text: string) => { try { setIsPlayingTTS(true); const audioBase64 = await geminiService.generateSpeech(text); if (audioBase64) { setLastAudioUrl(audioBase64); if (!audioContextRef.current) { audioContextRef.current = new (window.AudioContext || (window as any).webkitAudioContext)(); } const buffer = await decodeAudioData(audioBase64, audioContextRef.current); const source = audioContextRef.current.createBufferSource(); source.buffer = buffer; source.connect(audioContextRef.current.destination); source.onended = () => setIsPlayingTTS(false); source.start(); } } catch (e) { console.error(e); setIsPlayingTTS(false); } }; const downloadTTS = async (text: string) => { try { if (lastAudioUrl && history[history.length - 1]?.text === text) { // If the last generated audio matches the current requested text, use cached processAndDownloadAudio(lastAudioUrl, `sakura_roleplay_${Date.now()}.wav`); return; } // Otherwise generate const audioBase64 = await geminiService.generateSpeech(text); if (audioBase64) { processAndDownloadAudio(audioBase64, `sakura_roleplay_${Date.now()}.wav`); } } catch (e) { console.error("Download failed", e); } }; const handleAudioInput = async (base64Audio: string) => { if (!activeScenario) return; setIsProcessing(true); const historyText = history.slice(-4).map(h => `${h.role}: ${h.text}`).join('\n'); try { const result = await geminiService.analyzeSpeakingPerformance( base64Audio, `Roleplay as ${activeScenario.role} in context: ${activeScenario.description}`, historyText, language ); if (result) { setFeedback(result); setHistory(prev => [ ...prev, { role: 'user', text: result.transcription }, { role: 'model', text: result.response, translation: result.translation } ]); setIsFeedbackOpen(true); // Open feedback automatically on new input await playTTS(result.response); } } catch (e) { console.error("Analysis failed", e); } finally { setIsProcessing(false); } }; const reset = () => { setActiveScenario(null); setHistory([]); setFeedback(null); setLastAudioUrl(null); setShowTranslation(false); setIsFeedbackOpen(false); }; // Initial View: Scenario Selection if (!activeScenario) { const scenarios = getScenarios(language); return (

{t.title}

{t.subtitle}

{scenarios.map((scenario, index) => ( ))}
); } // Feedback Content Component const FeedbackContent = () => (

{t.feedbackTitle}

{feedback ? (
{/* Score Card */}
80 ? 'text-green-500' : (feedback.score || 0) > 60 ? 'text-amber-500' : 'text-red-500'} transition-all duration-1000 ease-out`} />
{feedback.score || 0}
{t.score}
{/* Issues List */}

{t.toImprove}

{feedback.pronunciationIssues && feedback.pronunciationIssues.length > 0 ? (
    {feedback.pronunciationIssues.map((issue, i) => (
  • {issue}
  • ))}
) : (

{t.perfect}

)}
{/* Advice */}

{t.advice}

{feedback.advice}

{/* Metadata */}

{t.transcription}

"{feedback.transcription}"

{t.meaning}

"{feedback.translation}"

) : (

{t.emptyFeedback}

)}
); // Active Conversation View return (
{/* Left: Conversation Area */}
{/* Header */}
{activeScenario.icon}

{activeScenario.title}

{t.roleplay}: {activeScenario.role}

{/* Feedback Toggle (Mobile) */}
{/* Messages */}
{history.map((msg, idx) => { const isUser = msg.role === 'user'; return (
{isUser ? : }

{msg.text}

{showTranslation && msg.translation && (

{msg.translation}

)} {!isUser && (
)}
); })} {isProcessing && (
{t.listening}
)}
{/* Interaction Area */}
{isProcessing &&
}

{isProcessing ? t.processing : t.tapSpeak}

{/* Right: Feedback Panel (Desktop) */} {/* Right: Feedback Drawer (Mobile) */}
); }; export default SpeakingPracticeView;