Implement LLM provider configuration and update user settings

- Added functionality to update the default LLM provider for users via a new endpoint in UserController.
- Introduced LlmProvider enum to manage available LLM options: Auto, Gemini, OpenAI, and Claude.
- Updated User and UserEntity models to include DefaultLlmProvider property.
- Enhanced database context and migrations to support the new LLM provider configuration.
- Integrated LLM services into the application bootstrap for dependency injection.
- Updated TypeScript API client to include methods for managing LLM providers and chat requests.
This commit is contained in:
2026-01-03 21:55:55 +07:00
parent fb49190346
commit 6f55566db3
46 changed files with 7900 additions and 3 deletions

View File

@@ -0,0 +1,199 @@
using System.Net.Http.Json;
using System.Text.Json;
using System.Text.Json.Serialization;
using Managing.Application.Abstractions.Services;
using Microsoft.Extensions.Logging;
namespace Managing.Application.LLM.Providers;
/// <summary>
/// OpenAI API provider
/// </summary>
public class OpenAiProvider : ILlmProvider
{
private readonly string _apiKey;
private readonly string _defaultModel;
private readonly HttpClient _httpClient;
private readonly ILogger _logger;
private const string BaseUrl = "https://api.openai.com/v1";
private const string FallbackModel = "gpt-4o";
public string Name => "openai";
public OpenAiProvider(string apiKey, string? defaultModel, IHttpClientFactory? httpClientFactory, ILogger logger)
{
_apiKey = apiKey;
_defaultModel = defaultModel ?? FallbackModel;
_httpClient = httpClientFactory?.CreateClient() ?? new HttpClient();
_httpClient.DefaultRequestHeaders.Add("Authorization", $"Bearer {_apiKey}");
_logger = logger;
}
public async Task<LlmChatResponse> ChatAsync(LlmChatRequest request)
{
var url = $"{BaseUrl}/chat/completions";
var openAiRequest = new
{
model = _defaultModel,
messages = request.Messages.Select(m => new
{
role = m.Role,
content = m.Content,
tool_calls = m.ToolCalls?.Select(tc => new
{
id = tc.Id,
type = "function",
function = new
{
name = tc.Name,
arguments = JsonSerializer.Serialize(tc.Arguments)
}
}),
tool_call_id = m.ToolCallId
}).ToArray(),
temperature = request.Temperature,
max_tokens = request.MaxTokens,
tools = request.Tools?.Any() == true ? request.Tools.Select(t => new
{
type = "function",
function = new
{
name = t.Name,
description = t.Description,
parameters = new
{
type = "object",
properties = t.Parameters.ToDictionary(
p => p.Key,
p => new
{
type = p.Value.Type,
description = p.Value.Description
}
),
required = t.Parameters.Where(p => p.Value.Required).Select(p => p.Key).ToArray()
}
}
}).ToArray() : null
};
var jsonOptions = new JsonSerializerOptions
{
PropertyNamingPolicy = JsonNamingPolicy.SnakeCaseLower,
DefaultIgnoreCondition = JsonIgnoreCondition.WhenWritingNull
};
var response = await _httpClient.PostAsJsonAsync(url, openAiRequest, jsonOptions);
if (!response.IsSuccessStatusCode)
{
var errorContent = await response.Content.ReadAsStringAsync();
_logger.LogError("OpenAI API error: {StatusCode} - {Error}", response.StatusCode, errorContent);
throw new HttpRequestException($"OpenAI API error: {response.StatusCode} - {errorContent}");
}
var openAiResponse = await response.Content.ReadFromJsonAsync<OpenAiResponse>(jsonOptions);
return ConvertFromOpenAiResponse(openAiResponse!);
}
private LlmChatResponse ConvertFromOpenAiResponse(OpenAiResponse response)
{
var choice = response.Choices?.FirstOrDefault();
if (choice == null)
{
return new LlmChatResponse
{
Content = "",
Provider = Name,
Model = response.Model ?? _defaultModel
};
}
var llmResponse = new LlmChatResponse
{
Content = choice.Message?.Content ?? "",
Provider = Name,
Model = response.Model ?? _defaultModel,
Usage = response.Usage != null ? new LlmUsage
{
PromptTokens = response.Usage.PromptTokens,
CompletionTokens = response.Usage.CompletionTokens,
TotalTokens = response.Usage.TotalTokens
} : null
};
if (choice.Message?.ToolCalls?.Any() == true)
{
llmResponse.ToolCalls = choice.Message.ToolCalls.Select(tc => new LlmToolCall
{
Id = tc.Id,
Name = tc.Function.Name,
Arguments = JsonSerializer.Deserialize<Dictionary<string, object>>(tc.Function.Arguments) ?? new()
}).ToList();
llmResponse.RequiresToolExecution = true;
}
return llmResponse;
}
private class OpenAiResponse
{
[JsonPropertyName("id")]
public string? Id { get; set; }
[JsonPropertyName("model")]
public string? Model { get; set; }
[JsonPropertyName("choices")]
public List<OpenAiChoice>? Choices { get; set; }
[JsonPropertyName("usage")]
public OpenAiUsage? Usage { get; set; }
}
private class OpenAiChoice
{
[JsonPropertyName("message")]
public OpenAiMessage? Message { get; set; }
}
private class OpenAiMessage
{
[JsonPropertyName("content")]
public string? Content { get; set; }
[JsonPropertyName("tool_calls")]
public List<OpenAiToolCall>? ToolCalls { get; set; }
}
private class OpenAiToolCall
{
[JsonPropertyName("id")]
public string Id { get; set; } = "";
[JsonPropertyName("function")]
public OpenAiFunction Function { get; set; } = new();
}
private class OpenAiFunction
{
[JsonPropertyName("name")]
public string Name { get; set; } = "";
[JsonPropertyName("arguments")]
public string Arguments { get; set; } = "{}";
}
private class OpenAiUsage
{
[JsonPropertyName("prompt_tokens")]
public int PromptTokens { get; set; }
[JsonPropertyName("completion_tokens")]
public int CompletionTokens { get; set; }
[JsonPropertyName("total_tokens")]
public int TotalTokens { get; set; }
}
}