更新至 v0.10.0_20251228 版本

This commit is contained in:
2025-12-28 16:01:16 +08:00
parent a1f9e76a13
commit baba106935
4 changed files with 119 additions and 44 deletions

View File

@@ -11,10 +11,13 @@ interface State {
}
export class ErrorBoundary extends Component<Props, State> {
public state: State = {
hasError: false,
error: null
};
constructor(props: Props) {
super(props);
this.state = {
hasError: false,
error: null
};
}
public static getDerivedStateFromError(error: Error): State {
return { hasError: true, error };

Binary file not shown.

View File

@@ -118,17 +118,14 @@ class GeminiService {
}
}
// 1. Text Chat Response - Returns { text, model }
async generateTextResponse(
private _getChatConfig(
prompt: string,
imageBase64?: string,
useThinking: boolean = false,
language: Language = 'en',
modelOverride?: string,
aiSpeakingLanguage: 'ja' | 'native' = 'native'
): Promise<{ text: string, model: string }> {
const ai = this.getAi();
) {
// Ensure model name is clean
let modelName = useThinking
? 'gemini-3-pro-preview'
@@ -175,6 +172,21 @@ class GeminiService {
config.thinkingConfig = { thinkingBudget: 32768 };
}
return { modelName, parts, config };
}
// 1. Text Chat Response - Returns { text, model }
async generateTextResponse(
prompt: string,
imageBase64?: string,
useThinking: boolean = false,
language: Language = 'en',
modelOverride?: string,
aiSpeakingLanguage: 'ja' | 'native' = 'native'
): Promise<{ text: string, model: string }> {
const ai = this.getAi();
const { modelName, parts, config } = this._getChatConfig(prompt, imageBase64, useThinking, language, modelOverride, aiSpeakingLanguage);
return this.retryOperation(async () => {
const response = await ai.models.generateContent({
model: modelName,
@@ -188,6 +200,32 @@ class GeminiService {
});
}
// 1b. Text Chat Streaming Response
async *generateTextStream(
prompt: string,
imageBase64?: string,
useThinking: boolean = false,
language: Language = 'en',
modelOverride?: string,
aiSpeakingLanguage: 'ja' | 'native' = 'native'
): AsyncGenerator<{ text: string, model: string }> {
const ai = this.getAi();
const { modelName, parts, config } = this._getChatConfig(prompt, imageBase64, useThinking, language, modelOverride, aiSpeakingLanguage);
// Initial stream connection with retry logic
const stream = await this.retryOperation(async () => {
return await ai.models.generateContentStream({
model: modelName,
contents: { parts },
config: config
});
});
for await (const chunk of stream) {
yield { text: chunk.text || "", model: modelName };
}
}
// Internal helper for single TTS chunk
private async _generateSpeechChunk(text: string): Promise<string | null> {
const ai = this.getAi();

View File

@@ -98,13 +98,27 @@ const ChatView: React.FC<ChatViewProps> = ({
// IMPORTANT: Calculate new history locally to avoid stale closure issues after await
const messagesWithUser = [...messages, userMsg];
// Update UI immediately with user message
onUpdateSession(activeSessionId, messagesWithUser);
try {
// 2. Get Response
const result = await geminiService.generateTextResponse(
// 2. Prepare AI Message Placeholder
const aiMsgId = (Date.now() + 1).toString();
const initialAiMsg: ChatMessage = {
id: aiMsgId,
role: Role.MODEL,
type: MessageType.TEXT,
content: "", // Start empty
timestamp: Date.now(),
metadata: { isThinking: useThinking }
};
let currentMessages = [...messagesWithUser, initialAiMsg];
onUpdateSession(activeSessionId, currentMessages);
// 3. Start Stream
let fullText = "";
let modelUsed = "";
const stream = geminiService.generateTextStream(
currentText || "Describe this image",
currentImage || undefined,
useThinking,
@@ -113,25 +127,26 @@ const ChatView: React.FC<ChatViewProps> = ({
aiSpeakingLanguage
);
// 3. TTS (if short and not thinking)
let ttsAudio: string | null = null;
if (!useThinking && result.text.length < 300) {
try { ttsAudio = await geminiService.generateSpeech(result.text); } catch (e) {}
for await (const chunk of stream) {
fullText += chunk.text;
modelUsed = chunk.model;
// Update the last message (AI message) content
const updatedAiMsg = { ...initialAiMsg, content: fullText, model: modelUsed };
currentMessages = [...messagesWithUser, updatedAiMsg];
onUpdateSession(activeSessionId, currentMessages);
}
const aiMsg: ChatMessage = {
id: (Date.now() + 1).toString(),
role: Role.MODEL,
type: MessageType.TEXT,
content: result.text,
model: result.model,
timestamp: Date.now(),
metadata: { isThinking: useThinking, audioUrl: ttsAudio || undefined }
};
// 4. Add AI Message to the LOCALLY calculated history (messagesWithUser)
// This ensures we don't lose the user message we just added
onUpdateSession(activeSessionId, [...messagesWithUser, aiMsg]);
// 4. TTS (if short and not thinking)
if (!useThinking && fullText.length < 300) {
try {
const ttsAudio = await geminiService.generateSpeech(fullText);
if (ttsAudio) {
const finalAiMsg = { ...initialAiMsg, content: fullText, model: modelUsed, metadata: { ...initialAiMsg.metadata, audioUrl: ttsAudio } };
onUpdateSession(activeSessionId, [...messagesWithUser, finalAiMsg]);
}
} catch (e) {}
}
} catch (error: any) {
const errorMsg = error?.message || t.error;
@@ -168,22 +183,41 @@ const ChatView: React.FC<ChatViewProps> = ({
const messagesWithUser = [...messages, userMsg];
onUpdateSession(activeSessionId, messagesWithUser);
// 3. Generate AI Response
const result = await geminiService.generateTextResponse(transcription, undefined, false, language, selectedModel, aiSpeakingLanguage);
const ttsAudio = await geminiService.generateSpeech(result.text);
const aiMsg: ChatMessage = {
id: (Date.now() + 1).toString(),
// 3. Prepare AI Message Placeholder
const aiMsgId = (Date.now() + 1).toString();
const initialAiMsg: ChatMessage = {
id: aiMsgId,
role: Role.MODEL,
type: MessageType.TEXT,
content: result.text,
model: result.model,
content: "",
timestamp: Date.now(),
metadata: { audioUrl: ttsAudio || undefined }
};
let currentMessages = [...messagesWithUser, initialAiMsg];
onUpdateSession(activeSessionId, currentMessages);
// 4. Update UI with AI Message using local history
onUpdateSession(activeSessionId, [...messagesWithUser, aiMsg]);
// 4. Stream Response
let fullText = "";
let modelUsed = "";
const stream = geminiService.generateTextStream(transcription, undefined, false, language, selectedModel, aiSpeakingLanguage);
for await (const chunk of stream) {
fullText += chunk.text;
modelUsed = chunk.model;
const updatedAiMsg = { ...initialAiMsg, content: fullText, model: modelUsed };
currentMessages = [...messagesWithUser, updatedAiMsg];
onUpdateSession(activeSessionId, currentMessages);
}
// 5. Generate TTS
try {
const ttsAudio = await geminiService.generateSpeech(fullText);
if (ttsAudio) {
const finalAiMsg = { ...initialAiMsg, content: fullText, model: modelUsed, metadata: { audioUrl: ttsAudio } };
onUpdateSession(activeSessionId, [...messagesWithUser, finalAiMsg]);
}
} catch(e) {}
} catch (e) {
console.error(e);
@@ -438,7 +472,7 @@ const ChatView: React.FC<ChatViewProps> = ({
onError={(errorMsg) => addToast('error', errorMsg)}
/>
))}
{isLoading && (
{isLoading && !messages[messages.length - 1]?.content && (
<div className="flex items-center gap-2 text-slate-400 text-sm ml-4 animate-pulse">
<div className="w-8 h-8 bg-white rounded-full flex items-center justify-center shadow-sm border border-slate-100">
<Loader2 size={14} className="animate-spin text-indigo-500" />
@@ -496,7 +530,7 @@ const ChatView: React.FC<ChatViewProps> = ({
disabled={!inputValue.trim() && !attachedImage || isLoading}
className="sm:hidden p-3 bg-indigo-600 hover:bg-indigo-700 text-white rounded-xl disabled:opacity-50 disabled:scale-95 transition-all shadow-md shadow-indigo-200"
>
{isLoading ? <Loader2 size={20} className="animate-spin" /> : <Send size={20} />}
{isLoading && messages[messages.length - 1]?.content ? <Send size={20} /> : isLoading ? <Loader2 size={20} className="animate-spin" /> : <Send size={20} />}
</button>
</div>
@@ -516,7 +550,7 @@ const ChatView: React.FC<ChatViewProps> = ({
disabled={!inputValue.trim() && !attachedImage || isLoading}
className="hidden sm:flex p-3.5 bg-indigo-600 hover:bg-indigo-700 text-white rounded-xl disabled:opacity-50 transition-all shadow-lg shadow-indigo-200 hover:scale-105 active:scale-95"
>
{isLoading ? <Loader2 size={20} className="animate-spin" /> : <Send size={20} />}
{isLoading && messages[messages.length - 1]?.content ? <Send size={20} /> : isLoading ? <Loader2 size={20} className="animate-spin" /> : <Send size={20} />}
</button>
</div>
</div>