Refactor LlmController and GeminiProvider for improved message handling and redundant tool call detection

- Enhanced LlmController to detect and handle redundant tool calls, ensuring efficient processing and preventing unnecessary requests.
- Updated message formatting in GeminiProvider to align with Gemini's expectations, improving the structure of requests sent to the API.
- Improved logging in AiChat component to provide better insights into received responses and fallback mechanisms for empty content.
- Adjusted handling of final responses in AiChat to ensure meaningful content is displayed, enhancing user experience during interactions.
This commit is contained in:
2026-01-07 00:54:23 +07:00
parent 3fd9463682
commit a0859b6a0d
4 changed files with 1224 additions and 155 deletions

File diff suppressed because it is too large Load Diff

View File

@@ -166,22 +166,13 @@ public class GeminiProvider : ILlmProvider
private object ConvertToGeminiRequest(LlmChatRequest request)
{
var contents = request.Messages
.Where(m => m.Role != "system") // Gemini doesn't support system messages in the same way
.Select(m => new
{
role = m.Role == "assistant" ? "model" : "user",
parts = new[]
{
new { text = m.Content }
}
}).ToList();
// Add system message as first user message if present
var contents = new List<object>();
// Add system message as first user message if present (Gemini only uses first system message)
var systemMessage = request.Messages.FirstOrDefault(m => m.Role == "system");
if (systemMessage != null && !string.IsNullOrWhiteSpace(systemMessage.Content))
{
contents.Insert(0, new
contents.Add(new
{
role = "user",
parts = new[]
@@ -191,6 +182,70 @@ public class GeminiProvider : ILlmProvider
});
}
// Process non-system messages in order
// Gemini expects: user -> model (assistant) -> user (tool results) -> model -> ...
foreach (var message in request.Messages.Where(m => m.Role != "system"))
{
if (message.Role == "assistant")
{
// Assistant message - check if it has tool calls
if (message.ToolCalls != null && message.ToolCalls.Any())
{
// This is a function call request - Gemini handles this automatically
// We still need to add the text content if any
if (!string.IsNullOrWhiteSpace(message.Content))
{
contents.Add(new
{
role = "model",
parts = new[]
{
new { text = message.Content }
}
});
}
}
else
{
// Regular assistant response
contents.Add(new
{
role = "model",
parts = new[]
{
new { text = message.Content ?? "" }
}
});
}
}
else if (message.Role == "tool")
{
// Tool results - Gemini expects these as functionResponse parts in the model's response
// But since we're sending them as separate messages, we'll format them as user messages
// with clear indication they're tool results
contents.Add(new
{
role = "user",
parts = new[]
{
new { text = $"Tool result (call_id: {message.ToolCallId}): {message.Content}" }
}
});
}
else
{
// User message
contents.Add(new
{
role = "user",
parts = new[]
{
new { text = message.Content ?? "" }
}
});
}
}
var geminiRequest = new
{
contents,

View File

@@ -159,22 +159,46 @@ function AiChat({ onClose }: AiChatProps): JSX.Element {
if (update.type === 'final_response' && update.response) {
finalResponse = update.response
console.log('Received final response from LLM:', {
hasContent: !!finalResponse.content,
contentLength: finalResponse.content?.length || 0,
contentPreview: finalResponse.content?.substring(0, 100) || '(empty)',
fullContent: finalResponse.content,
requiresToolExecution: finalResponse.requiresToolExecution
})
}
}
// Add final response if we got one
if (finalResponse) {
// Backend should always send meaningful content, but handle edge cases
const rawContent = finalResponse.content?.trim() || ''
const isContentValid = rawContent.length > 0
const assistantMessage: Message = {
role: 'assistant',
content: finalResponse.content || 'No response from AI',
content: isContentValid
? rawContent
: 'I received your request but the response was empty. Please try rephrasing your question or ask for specific details.',
timestamp: new Date()
}
console.log('Adding final assistant message to chat:', {
rawContentLength: finalResponse.content?.length || 0,
trimmedContentLength: rawContent.length,
isContentValid,
contentPreview: assistantMessage.content.substring(0, 100),
fullContent: assistantMessage.content
})
setMessages(prev => [...prev, assistantMessage])
} else if (lastUpdate && lastUpdate.type === 'final_response' && lastUpdate.response) {
// Fallback: check lastUpdate in case finalResponse wasn't set
console.log('Using fallback: final response from lastUpdate')
const rawContent = lastUpdate.response.content?.trim() || ''
const assistantMessage: Message = {
role: 'assistant',
content: lastUpdate.response.content || 'No response from AI',
content: rawContent.length > 0
? rawContent
: 'I received your request but the response was empty. Please try rephrasing your question or ask for specific details.',
timestamp: new Date()
}
setMessages(prev => [...prev, assistantMessage])
@@ -188,9 +212,14 @@ function AiChat({ onClose }: AiChatProps): JSX.Element {
setMessages(prev => [...prev, errorMessage])
} else {
// If we didn't get a final response, show the last progress message
console.warn('No final response received. Last update:', {
type: lastUpdate?.type,
message: lastUpdate?.message,
hasResponse: !!lastUpdate?.response
})
const assistantMessage: Message = {
role: 'assistant',
content: lastUpdate?.message || 'Response incomplete',
content: lastUpdate?.message || 'Response incomplete. Please try again.',
timestamp: new Date()
}
setMessages(prev => [...prev, assistantMessage])