Files
managing-apps/src/Managing.Api/Controllers/LlmController.cs
cryptooda df27bbdfa1 Add system message to LLM requests and improve indicator type resolution
- Introduced a system message in LlmController to clarify that tools are optional for LLM responses, enhancing user guidance.
- Refactored indicator type resolution in IndicatorTools to support fuzzy matching and provide suggestions for invalid types, improving user experience and error handling.
- Updated methods to utilize the new resolution logic, ensuring consistent handling of indicator types across the application.
2026-01-04 02:00:51 +07:00

180 lines
6.9 KiB
C#

using Managing.Application.Abstractions.Services;
using Microsoft.AspNetCore.Authorization;
using Microsoft.AspNetCore.Mvc;
namespace Managing.Api.Controllers;
/// <summary>
/// Controller for LLM (Large Language Model) operations with MCP tool calling support.
/// Provides endpoints for chat interactions with automatic provider selection and BYOK (Bring Your Own Key) support.
/// </summary>
[ApiController]
[Authorize]
[Route("[controller]")]
[Produces("application/json")]
public class LlmController : BaseController
{
private readonly ILlmService _llmService;
private readonly IMcpService _mcpService;
private readonly ILogger<LlmController> _logger;
public LlmController(
ILlmService llmService,
IMcpService mcpService,
IUserService userService,
ILogger<LlmController> logger) : base(userService)
{
_llmService = llmService;
_mcpService = mcpService;
_logger = logger;
}
/// <summary>
/// Sends a chat message to an LLM with automatic provider selection and MCP tool calling support.
/// Supports both auto mode (backend selects provider) and BYOK (user provides API key).
/// </summary>
/// <param name="request">The chat request with messages and optional provider/API key</param>
/// <returns>The LLM response with tool calls if applicable</returns>
[HttpPost]
[Route("Chat")]
public async Task<ActionResult<LlmChatResponse>> Chat([FromBody] LlmChatRequest request)
{
if (request == null)
{
return BadRequest("Chat request is required");
}
if (request.Messages == null || !request.Messages.Any())
{
return BadRequest("At least one message is required");
}
try
{
var user = await GetUser();
// Get available MCP tools
var availableTools = await _mcpService.GetAvailableToolsAsync();
request.Tools = availableTools.ToList();
// Add system message to clarify that tools are optional and the LLM can respond directly
// Check if a system message already exists
var hasSystemMessage = request.Messages.Any(m => m.Role == "system");
if (!hasSystemMessage)
{
var systemMessage = new LlmMessage
{
Role = "system",
Content = "You are a helpful AI assistant with expertise in quantitative finance, algorithmic trading, and financial mathematics. " +
"You can answer questions directly using your knowledge. " +
"Tools are available for specific operations (backtesting, agent management, market data retrieval, etc.) but are optional. " +
"Use tools only when they are needed for the specific task. " +
"For general questions, explanations, calculations, or discussions, respond directly without using tools."
};
request.Messages.Insert(0, systemMessage);
}
// Send chat request to LLM
var response = await _llmService.ChatAsync(user, request);
// If LLM wants to call tools, execute them and get final response
if (response.RequiresToolExecution && response.ToolCalls?.Any() == true)
{
_logger.LogInformation("LLM requested {Count} tool calls for user {UserId}",
response.ToolCalls.Count, user.Id);
// Execute all tool calls
var toolResults = new List<LlmMessage>();
foreach (var toolCall in response.ToolCalls)
{
try
{
var toolResult = await _mcpService.ExecuteToolAsync(user, toolCall.Name, toolCall.Arguments);
toolResults.Add(new LlmMessage
{
Role = "tool",
Content = System.Text.Json.JsonSerializer.Serialize(toolResult),
ToolCallId = toolCall.Id
});
_logger.LogInformation("Successfully executed tool {ToolName} for user {UserId}",
toolCall.Name, user.Id);
}
catch (Exception ex)
{
_logger.LogError(ex, "Error executing tool {ToolName} for user {UserId}",
toolCall.Name, user.Id);
toolResults.Add(new LlmMessage
{
Role = "tool",
Content = $"Error executing tool: {ex.Message}",
ToolCallId = toolCall.Id
});
}
}
// Add assistant message with tool calls
request.Messages.Add(new LlmMessage
{
Role = "assistant",
Content = response.Content,
ToolCalls = response.ToolCalls
});
// Add tool results
request.Messages.AddRange(toolResults);
// Get final response from LLM
var finalResponse = await _llmService.ChatAsync(user, request);
return Ok(finalResponse);
}
return Ok(response);
}
catch (Exception ex)
{
_logger.LogError(ex, "Error processing chat request for user");
return StatusCode(500, $"Error processing chat request: {ex.Message}");
}
}
/// <summary>
/// Gets the list of available LLM providers configured on the backend.
/// </summary>
/// <returns>List of provider names</returns>
[HttpGet]
[Route("Providers")]
public async Task<ActionResult<IEnumerable<string>>> GetProviders()
{
try
{
var providers = await _llmService.GetAvailableProvidersAsync();
return Ok(providers);
}
catch (Exception ex)
{
_logger.LogError(ex, "Error getting available providers");
return StatusCode(500, $"Error getting available providers: {ex.Message}");
}
}
/// <summary>
/// Gets the list of available MCP tools that the LLM can call.
/// </summary>
/// <returns>List of MCP tools with their descriptions and parameters</returns>
[HttpGet]
[Route("Tools")]
public async Task<ActionResult<IEnumerable<McpToolDefinition>>> GetTools()
{
try
{
var tools = await _mcpService.GetAvailableToolsAsync();
return Ok(tools);
}
catch (Exception ex)
{
_logger.LogError(ex, "Error getting available tools");
return StatusCode(500, $"Error getting available tools: {ex.Message}");
}
}
}