Implement LLM provider configuration and update user settings

- Added functionality to update the default LLM provider for users via a new endpoint in UserController.
- Introduced LlmProvider enum to manage available LLM options: Auto, Gemini, OpenAI, and Claude.
- Updated User and UserEntity models to include DefaultLlmProvider property.
- Enhanced database context and migrations to support the new LLM provider configuration.
- Integrated LLM services into the application bootstrap for dependency injection.
- Updated TypeScript API client to include methods for managing LLM providers and chat requests.
This commit is contained in:
2026-01-03 21:55:55 +07:00
parent fb49190346
commit 6f55566db3
46 changed files with 7900 additions and 3 deletions

View File

@@ -0,0 +1,162 @@
using Managing.Application.Abstractions.Services;
using Microsoft.AspNetCore.Authorization;
using Microsoft.AspNetCore.Mvc;
namespace Managing.Api.Controllers;
/// <summary>
/// Controller for LLM (Large Language Model) operations with MCP tool calling support.
/// Provides endpoints for chat interactions with automatic provider selection and BYOK (Bring Your Own Key) support.
/// </summary>
[ApiController]
[Authorize]
[Route("[controller]")]
[Produces("application/json")]
public class LlmController : BaseController
{
private readonly ILlmService _llmService;
private readonly IMcpService _mcpService;
private readonly ILogger<LlmController> _logger;
public LlmController(
ILlmService llmService,
IMcpService mcpService,
IUserService userService,
ILogger<LlmController> logger) : base(userService)
{
_llmService = llmService;
_mcpService = mcpService;
_logger = logger;
}
/// <summary>
/// Sends a chat message to an LLM with automatic provider selection and MCP tool calling support.
/// Supports both auto mode (backend selects provider) and BYOK (user provides API key).
/// </summary>
/// <param name="request">The chat request with messages and optional provider/API key</param>
/// <returns>The LLM response with tool calls if applicable</returns>
[HttpPost]
[Route("Chat")]
public async Task<ActionResult<LlmChatResponse>> Chat([FromBody] LlmChatRequest request)
{
if (request == null)
{
return BadRequest("Chat request is required");
}
if (request.Messages == null || !request.Messages.Any())
{
return BadRequest("At least one message is required");
}
try
{
var user = await GetUser();
// Get available MCP tools
var availableTools = await _mcpService.GetAvailableToolsAsync();
request.Tools = availableTools.ToList();
// Send chat request to LLM
var response = await _llmService.ChatAsync(user, request);
// If LLM wants to call tools, execute them and get final response
if (response.RequiresToolExecution && response.ToolCalls?.Any() == true)
{
_logger.LogInformation("LLM requested {Count} tool calls for user {UserId}",
response.ToolCalls.Count, user.Id);
// Execute all tool calls
var toolResults = new List<LlmMessage>();
foreach (var toolCall in response.ToolCalls)
{
try
{
var toolResult = await _mcpService.ExecuteToolAsync(user, toolCall.Name, toolCall.Arguments);
toolResults.Add(new LlmMessage
{
Role = "tool",
Content = System.Text.Json.JsonSerializer.Serialize(toolResult),
ToolCallId = toolCall.Id
});
_logger.LogInformation("Successfully executed tool {ToolName} for user {UserId}",
toolCall.Name, user.Id);
}
catch (Exception ex)
{
_logger.LogError(ex, "Error executing tool {ToolName} for user {UserId}",
toolCall.Name, user.Id);
toolResults.Add(new LlmMessage
{
Role = "tool",
Content = $"Error executing tool: {ex.Message}",
ToolCallId = toolCall.Id
});
}
}
// Add assistant message with tool calls
request.Messages.Add(new LlmMessage
{
Role = "assistant",
Content = response.Content,
ToolCalls = response.ToolCalls
});
// Add tool results
request.Messages.AddRange(toolResults);
// Get final response from LLM
var finalResponse = await _llmService.ChatAsync(user, request);
return Ok(finalResponse);
}
return Ok(response);
}
catch (Exception ex)
{
_logger.LogError(ex, "Error processing chat request for user");
return StatusCode(500, $"Error processing chat request: {ex.Message}");
}
}
/// <summary>
/// Gets the list of available LLM providers configured on the backend.
/// </summary>
/// <returns>List of provider names</returns>
[HttpGet]
[Route("Providers")]
public async Task<ActionResult<IEnumerable<string>>> GetProviders()
{
try
{
var providers = await _llmService.GetAvailableProvidersAsync();
return Ok(providers);
}
catch (Exception ex)
{
_logger.LogError(ex, "Error getting available providers");
return StatusCode(500, $"Error getting available providers: {ex.Message}");
}
}
/// <summary>
/// Gets the list of available MCP tools that the LLM can call.
/// </summary>
/// <returns>List of MCP tools with their descriptions and parameters</returns>
[HttpGet]
[Route("Tools")]
public async Task<ActionResult<IEnumerable<McpToolDefinition>>> GetTools()
{
try
{
var tools = await _mcpService.GetAvailableToolsAsync();
return Ok(tools);
}
catch (Exception ex)
{
_logger.LogError(ex, "Error getting available tools");
return StatusCode(500, $"Error getting available tools: {ex.Message}");
}
}
}

View File

@@ -7,6 +7,7 @@ using Managing.Domain.Users;
using MediatR;
using Microsoft.AspNetCore.Authorization;
using Microsoft.AspNetCore.Mvc;
using static Managing.Common.Enums;
namespace Managing.Api.Controllers;
@@ -115,6 +116,31 @@ public class UserController : BaseController
return Ok(updatedUser);
}
/// <summary>
/// Updates the default LLM provider for the current user.
/// </summary>
/// <param name="defaultLlmProvider">The new default LLM provider to set (e.g., "Auto", "Gemini", "OpenAI", "Claude").</param>
/// <returns>The updated user with the new default LLM provider.</returns>
[Authorize]
[HttpPut("default-llm-provider")]
public async Task<ActionResult<User>> UpdateDefaultLlmProvider([FromBody] string defaultLlmProvider)
{
if (string.IsNullOrWhiteSpace(defaultLlmProvider))
{
return BadRequest("Default LLM provider cannot be null or empty.");
}
// Parse string to enum (case-insensitive)
if (!Enum.TryParse<LlmProvider>(defaultLlmProvider, ignoreCase: true, out var providerEnum))
{
return BadRequest($"Invalid LLM provider '{defaultLlmProvider}'. Valid providers are: Auto, Gemini, OpenAI, Claude");
}
var user = await GetUser();
var updatedUser = await _userService.UpdateDefaultLlmProvider(user, providerEnum);
return Ok(updatedUser);
}
/// <summary>
/// Tests the Telegram channel configuration by sending a test message.
/// </summary>

View File

@@ -9,8 +9,6 @@
}
}
},
"InfluxDb": {
"Organization": "managing-org"
},
@@ -28,6 +26,17 @@
"Flagsmith": {
"ApiUrl": "https://flag.kaigen.ai/api/v1/"
},
"Llm": {
"Gemini": {
"DefaultModel": "gemini-2.0-flash"
},
"OpenAI": {
"DefaultModel": "gpt-4o"
},
"Claude": {
"DefaultModel": "claude-haiku-4-5-20251001"
}
},
"N8n": {
},
"Sentry": {