Implement iterative tool calling in LlmController for enhanced response accuracy

- Added support for iterative tool calling in LlmController, allowing multiple rounds of tool execution until a final answer is reached.
- Updated system message to provide clearer guidance on proactive tool usage and response expectations.
- Enhanced logging to track iterations and tool execution results, improving debugging and user feedback.
- Ensured that the final response is returned even if the maximum iterations are reached, maintaining user engagement.
This commit is contained in:
2026-01-04 23:33:55 +07:00
parent a227c72e1f
commit 073111ddea

View File

@@ -32,6 +32,7 @@ public class LlmController : BaseController
/// <summary>
/// Sends a chat message to an LLM with automatic provider selection and MCP tool calling support.
/// Supports both auto mode (backend selects provider) and BYOK (user provides API key).
/// Implements iterative tool calling to allow multiple rounds of tool execution until a final answer is reached.
/// </summary>
/// <param name="request">The chat request with messages and optional provider/API key</param>
/// <returns>The LLM response with tool calls if applicable</returns>
@@ -65,7 +66,7 @@ public class LlmController : BaseController
request.Messages.Remove(msg);
}
// Add explicit system message at the beginning
// Add explicit system message at the beginning with proactive tool usage guidance
var systemMessage = new LlmMessage
{
Role = "system",
@@ -75,18 +76,39 @@ public class LlmController : BaseController
"Tools are ONLY for specific system operations like backtesting, agent management, or retrieving real-time market data. " +
"For questions about financial concepts, mathematical formulas (like Black-Scholes), trading strategies, or any theoretical knowledge, " +
"you MUST provide a direct answer using your knowledge. Do NOT refuse to answer or claim you can only use tools. " +
"Only use tools when the user explicitly needs to perform a system operation (e.g., 'run a backtest', 'get market data', 'manage agents')."
"When users ask questions that can be answered using tools (e.g., 'What is the best backtest?', 'Show me my backtests', 'What are my indicators?'), " +
"you MUST proactively use the tools with reasonable defaults rather than asking the user for parameters. " +
"For example, if asked 'What is the best backtest?', use get_backtests_paginated with sortBy='Score' and sortOrder='desc' to find the best performing backtests. " +
"Only ask for clarification if the user's intent is genuinely unclear or if you need specific information that cannot be inferred. " +
"Continue iterating with tools as needed until you can provide a complete, helpful answer to the user's question."
};
request.Messages.Insert(0, systemMessage);
// Send chat request to LLM
var response = await _llmService.ChatAsync(user, request);
// Iterative tool calling: keep looping until we get a final answer without tool calls
const int maxIterations = 3; // Prevent infinite loops
int iteration = 0;
LlmChatResponse? finalResponse = null;
// If LLM wants to call tools, execute them and get final response
if (response.RequiresToolExecution && response.ToolCalls?.Any() == true)
while (iteration < maxIterations)
{
_logger.LogInformation("LLM requested {Count} tool calls for user {UserId}",
response.ToolCalls.Count, user.Id);
iteration++;
_logger.LogInformation("LLM chat iteration {Iteration} for user {UserId}", iteration, user.Id);
// Send chat request to LLM
var response = await _llmService.ChatAsync(user, request);
// If LLM doesn't want to call tools, we have our final answer
if (!response.RequiresToolExecution || response.ToolCalls == null || !response.ToolCalls.Any())
{
finalResponse = response;
_logger.LogInformation("LLM provided final answer after {Iteration} iteration(s) for user {UserId}",
iteration, user.Id);
break;
}
// LLM wants to call tools - execute them
_logger.LogInformation("LLM requested {Count} tool calls in iteration {Iteration} for user {UserId}",
response.ToolCalls.Count, iteration, user.Id);
// Execute all tool calls
var toolResults = new List<LlmMessage>();
@@ -101,13 +123,13 @@ public class LlmController : BaseController
Content = System.Text.Json.JsonSerializer.Serialize(toolResult),
ToolCallId = toolCall.Id
});
_logger.LogInformation("Successfully executed tool {ToolName} for user {UserId}",
toolCall.Name, user.Id);
_logger.LogInformation("Successfully executed tool {ToolName} in iteration {Iteration} for user {UserId}",
toolCall.Name, iteration, user.Id);
}
catch (Exception ex)
{
_logger.LogError(ex, "Error executing tool {ToolName} for user {UserId}",
toolCall.Name, user.Id);
_logger.LogError(ex, "Error executing tool {ToolName} in iteration {Iteration} for user {UserId}",
toolCall.Name, iteration, user.Id);
toolResults.Add(new LlmMessage
{
Role = "tool",
@@ -117,7 +139,7 @@ public class LlmController : BaseController
}
}
// Add assistant message with tool calls
// Add assistant message with tool calls to conversation history
request.Messages.Add(new LlmMessage
{
Role = "assistant",
@@ -125,15 +147,22 @@ public class LlmController : BaseController
ToolCalls = response.ToolCalls
});
// Add tool results
// Add tool results to conversation history
request.Messages.AddRange(toolResults);
// Get final response from LLM
var finalResponse = await _llmService.ChatAsync(user, request);
return Ok(finalResponse);
// Continue loop to get LLM's response to the tool results
}
return Ok(response);
// If we hit max iterations, return the last response (even if it has tool calls)
if (finalResponse == null)
{
_logger.LogWarning("Reached max iterations ({MaxIterations}) for user {UserId}. Returning last response.",
maxIterations, user.Id);
// Get one more response to return something meaningful
finalResponse = await _llmService.ChatAsync(user, request);
}
return Ok(finalResponse);
}
catch (Exception ex)
{