Enhance LLM chat streaming and progress updates
- Implemented SignalR integration for real-time chat streaming in LlmController, allowing for progress updates during LLM interactions. - Refactored AiChat component to handle streaming responses and display progress updates, including iteration status and tool call results. - Introduced a new ProgressIndicator component to visually represent the current state of chat processing. - Updated AiChatService to manage SignalR connections and handle streaming updates effectively, improving user experience during chat sessions. - Enhanced error handling and messaging for better feedback during chat interactions.
This commit is contained in:
@@ -1,9 +1,11 @@
|
|||||||
using System.Text.Json;
|
using System.Text.Json;
|
||||||
using System.Text.RegularExpressions;
|
using System.Text.RegularExpressions;
|
||||||
using Managing.Application.Abstractions.Services;
|
using Managing.Application.Abstractions.Services;
|
||||||
|
using Managing.Application.Hubs;
|
||||||
using Managing.Domain.Users;
|
using Managing.Domain.Users;
|
||||||
using Microsoft.AspNetCore.Authorization;
|
using Microsoft.AspNetCore.Authorization;
|
||||||
using Microsoft.AspNetCore.Mvc;
|
using Microsoft.AspNetCore.Mvc;
|
||||||
|
using Microsoft.AspNetCore.SignalR;
|
||||||
using Microsoft.Extensions.Caching.Memory;
|
using Microsoft.Extensions.Caching.Memory;
|
||||||
|
|
||||||
namespace Managing.Api.Controllers;
|
namespace Managing.Api.Controllers;
|
||||||
@@ -22,51 +24,50 @@ public class LlmController : BaseController
|
|||||||
private readonly IMcpService _mcpService;
|
private readonly IMcpService _mcpService;
|
||||||
private readonly ILogger<LlmController> _logger;
|
private readonly ILogger<LlmController> _logger;
|
||||||
private readonly IMemoryCache _cache;
|
private readonly IMemoryCache _cache;
|
||||||
|
private readonly IHubContext<LlmHub> _hubContext;
|
||||||
|
|
||||||
public LlmController(
|
public LlmController(
|
||||||
ILlmService llmService,
|
ILlmService llmService,
|
||||||
IMcpService mcpService,
|
IMcpService mcpService,
|
||||||
IUserService userService,
|
IUserService userService,
|
||||||
ILogger<LlmController> logger,
|
ILogger<LlmController> logger,
|
||||||
IMemoryCache cache) : base(userService)
|
IMemoryCache cache,
|
||||||
|
IHubContext<LlmHub> hubContext) : base(userService)
|
||||||
{
|
{
|
||||||
_llmService = llmService;
|
_llmService = llmService;
|
||||||
_mcpService = mcpService;
|
_mcpService = mcpService;
|
||||||
_logger = logger;
|
_logger = logger;
|
||||||
_cache = cache;
|
_cache = cache;
|
||||||
|
_hubContext = hubContext;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// <summary>
|
/// <summary>
|
||||||
/// Sends a chat message to an LLM with streaming progress updates (Server-Sent Events).
|
/// Sends a chat message to an LLM with streaming progress updates via SignalR.
|
||||||
/// Provides real-time updates about iterations, tool calls, and progress similar to Cursor/Claude.
|
/// Provides real-time updates about iterations, tool calls, and progress similar to Cursor/Claude.
|
||||||
|
/// Progress updates are sent via SignalR to the specified connectionId.
|
||||||
/// </summary>
|
/// </summary>
|
||||||
/// <param name="request">The chat request with messages and optional provider/API key</param>
|
/// <param name="request">The chat request with messages, optional provider/API key, and SignalR connectionId</param>
|
||||||
/// <returns>Stream of progress updates ending with final response</returns>
|
/// <returns>OK status - updates are sent via SignalR</returns>
|
||||||
[HttpPost]
|
[HttpPost]
|
||||||
[Route("ChatStream")]
|
[Route("ChatStream")]
|
||||||
[Produces("text/event-stream")]
|
[Consumes("application/json")]
|
||||||
public async IAsyncEnumerable<LlmProgressUpdate> ChatStream([FromBody] LlmChatRequest request)
|
[Produces("application/json")]
|
||||||
|
public async Task<ActionResult> ChatStream([FromBody] LlmChatStreamRequest request)
|
||||||
{
|
{
|
||||||
if (request == null)
|
if (request == null || string.IsNullOrWhiteSpace(request.ConnectionId))
|
||||||
{
|
{
|
||||||
yield return new LlmProgressUpdate
|
return BadRequest("Chat request and connectionId are required");
|
||||||
{
|
|
||||||
Type = "error",
|
|
||||||
Message = "Chat request is required",
|
|
||||||
Error = "Chat request is required"
|
|
||||||
};
|
|
||||||
yield break;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (request.Messages == null || !request.Messages.Any())
|
if (request.Messages == null || !request.Messages.Any())
|
||||||
{
|
{
|
||||||
yield return new LlmProgressUpdate
|
await _hubContext.Clients.Client(request.ConnectionId).SendAsync("ProgressUpdate", new LlmProgressUpdate
|
||||||
{
|
{
|
||||||
Type = "error",
|
Type = "error",
|
||||||
Message = "At least one message is required",
|
Message = "At least one message is required",
|
||||||
Error = "At least one message is required"
|
Error = "At least one message is required"
|
||||||
};
|
});
|
||||||
yield break;
|
return BadRequest("At least one message is required");
|
||||||
}
|
}
|
||||||
|
|
||||||
User? user = null;
|
User? user = null;
|
||||||
@@ -81,258 +82,285 @@ public class LlmController : BaseController
|
|||||||
|
|
||||||
if (user == null)
|
if (user == null)
|
||||||
{
|
{
|
||||||
yield return new LlmProgressUpdate
|
await _hubContext.Clients.Client(request.ConnectionId).SendAsync("ProgressUpdate", new LlmProgressUpdate
|
||||||
{
|
{
|
||||||
Type = "error",
|
Type = "error",
|
||||||
Message = "Error authenticating user",
|
Message = "Error authenticating user",
|
||||||
Error = "Unable to authenticate user"
|
Error = "Unable to authenticate user"
|
||||||
};
|
});
|
||||||
yield break;
|
return Unauthorized("Unable to authenticate user");
|
||||||
}
|
}
|
||||||
|
|
||||||
await foreach (var update in ChatStreamInternal(request, user))
|
// Process in background to avoid blocking the HTTP response
|
||||||
|
_ = Task.Run(async () =>
|
||||||
{
|
{
|
||||||
yield return update;
|
try
|
||||||
}
|
{
|
||||||
|
await ChatStreamInternal(request, user, request.ConnectionId);
|
||||||
|
}
|
||||||
|
catch (Exception ex)
|
||||||
|
{
|
||||||
|
_logger.LogError(ex, "Error processing chat stream for connection {ConnectionId}", request.ConnectionId);
|
||||||
|
await _hubContext.Clients.Client(request.ConnectionId).SendAsync("ProgressUpdate", new LlmProgressUpdate
|
||||||
|
{
|
||||||
|
Type = "error",
|
||||||
|
Message = $"Error processing chat: {ex.Message}",
|
||||||
|
Error = ex.Message
|
||||||
|
});
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
return Ok(new { Message = "Chat stream started", ConnectionId = request.ConnectionId });
|
||||||
}
|
}
|
||||||
|
|
||||||
private async IAsyncEnumerable<LlmProgressUpdate> ChatStreamInternal(LlmChatRequest request, User user)
|
private async Task ChatStreamInternal(LlmChatStreamRequest request, User user, string connectionId)
|
||||||
{
|
{
|
||||||
|
// Convert to LlmChatRequest for service calls
|
||||||
|
var chatRequest = new LlmChatRequest
|
||||||
|
{
|
||||||
|
Messages = request.Messages,
|
||||||
|
Provider = request.Provider,
|
||||||
|
ApiKey = request.ApiKey,
|
||||||
|
Stream = request.Stream,
|
||||||
|
Temperature = request.Temperature,
|
||||||
|
MaxTokens = request.MaxTokens,
|
||||||
|
Tools = request.Tools
|
||||||
|
};
|
||||||
|
|
||||||
yield return new LlmProgressUpdate
|
await SendProgressUpdate(connectionId, new LlmProgressUpdate
|
||||||
{
|
{
|
||||||
Type = "thinking",
|
Type = "thinking",
|
||||||
Message = "Initializing conversation and loading available tools..."
|
Message = "Initializing conversation and loading available tools..."
|
||||||
};
|
});
|
||||||
|
|
||||||
// Get available MCP tools (with caching for 5 minutes)
|
// Get available MCP tools (with caching for 5 minutes)
|
||||||
var availableTools = await _cache.GetOrCreateAsync("mcp_tools", async entry =>
|
var availableTools = await _cache.GetOrCreateAsync("mcp_tools", async entry =>
|
||||||
|
{
|
||||||
|
entry.AbsoluteExpirationRelativeToNow = TimeSpan.FromMinutes(5);
|
||||||
|
return (await _mcpService.GetAvailableToolsAsync()).ToList();
|
||||||
|
});
|
||||||
|
chatRequest.Tools = availableTools;
|
||||||
|
|
||||||
|
await SendProgressUpdate(connectionId, new LlmProgressUpdate
|
||||||
|
{
|
||||||
|
Type = "thinking",
|
||||||
|
Message = $"Loaded {availableTools.Count} available tools. Preparing system context..."
|
||||||
|
});
|
||||||
|
|
||||||
|
// Add or prepend system message to ensure LLM knows it can respond directly
|
||||||
|
var existingSystemMessages = chatRequest.Messages.Where(m => m.Role == "system").ToList();
|
||||||
|
foreach (var msg in existingSystemMessages)
|
||||||
|
{
|
||||||
|
chatRequest.Messages.Remove(msg);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add explicit system message with domain expertise and tool guidance
|
||||||
|
var systemMessage = new LlmMessage
|
||||||
|
{
|
||||||
|
Role = "system",
|
||||||
|
Content = BuildSystemMessage()
|
||||||
|
};
|
||||||
|
chatRequest.Messages.Insert(0, systemMessage);
|
||||||
|
|
||||||
|
// Proactively inject backtest details fetching if user is asking for analysis
|
||||||
|
await InjectBacktestDetailsFetchingIfNeeded(chatRequest, user);
|
||||||
|
|
||||||
|
// Add helpful context extraction message if backtest ID was found
|
||||||
|
AddBacktestContextGuidance(chatRequest);
|
||||||
|
|
||||||
|
// Iterative tool calling: keep looping until we get a final answer without tool calls
|
||||||
|
int maxIterations = DetermineMaxIterations(chatRequest);
|
||||||
|
int iteration = 0;
|
||||||
|
LlmChatResponse? finalResponse = null;
|
||||||
|
const int DelayBetweenIterationsMs = 500;
|
||||||
|
|
||||||
|
await SendProgressUpdate(connectionId, new LlmProgressUpdate
|
||||||
|
{
|
||||||
|
Type = "thinking",
|
||||||
|
Message = $"Starting analysis (up to {maxIterations} iterations may be needed)..."
|
||||||
|
});
|
||||||
|
|
||||||
|
while (iteration < maxIterations)
|
||||||
|
{
|
||||||
|
iteration++;
|
||||||
|
|
||||||
|
await SendProgressUpdate(connectionId, new LlmProgressUpdate
|
||||||
{
|
{
|
||||||
entry.AbsoluteExpirationRelativeToNow = TimeSpan.FromMinutes(5);
|
Type = "iteration_start",
|
||||||
return (await _mcpService.GetAvailableToolsAsync()).ToList();
|
Message = $"Iteration {iteration}/{maxIterations}: Analyzing your request and determining next steps...",
|
||||||
|
Iteration = iteration,
|
||||||
|
MaxIterations = maxIterations
|
||||||
});
|
});
|
||||||
request.Tools = availableTools;
|
|
||||||
|
|
||||||
yield return new LlmProgressUpdate
|
_logger.LogInformation("LLM chat iteration {Iteration}/{MaxIterations} for user {UserId}",
|
||||||
{
|
iteration, maxIterations, user.Id);
|
||||||
Type = "thinking",
|
|
||||||
Message = $"Loaded {availableTools.Count} available tools. Preparing system context..."
|
|
||||||
};
|
|
||||||
|
|
||||||
// Add or prepend system message to ensure LLM knows it can respond directly
|
// Add delay between iterations to avoid rapid bursts and rate limiting
|
||||||
var existingSystemMessages = request.Messages.Where(m => m.Role == "system").ToList();
|
if (iteration > 1)
|
||||||
foreach (var msg in existingSystemMessages)
|
|
||||||
{
|
{
|
||||||
request.Messages.Remove(msg);
|
await SendProgressUpdate(connectionId, new LlmProgressUpdate
|
||||||
|
{
|
||||||
|
Type = "thinking",
|
||||||
|
Message = "Waiting briefly to respect rate limits...",
|
||||||
|
Iteration = iteration,
|
||||||
|
MaxIterations = maxIterations
|
||||||
|
});
|
||||||
|
await Task.Delay(DelayBetweenIterationsMs);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add explicit system message with domain expertise and tool guidance
|
// Trim context if conversation is getting too long
|
||||||
var systemMessage = new LlmMessage
|
TrimConversationContext(chatRequest);
|
||||||
{
|
|
||||||
Role = "system",
|
|
||||||
Content = BuildSystemMessage()
|
|
||||||
};
|
|
||||||
request.Messages.Insert(0, systemMessage);
|
|
||||||
|
|
||||||
// Proactively inject backtest details fetching if user is asking for analysis
|
// Send chat request to LLM
|
||||||
await InjectBacktestDetailsFetchingIfNeeded(request, user);
|
await SendProgressUpdate(connectionId, new LlmProgressUpdate
|
||||||
|
|
||||||
// Add helpful context extraction message if backtest ID was found
|
|
||||||
AddBacktestContextGuidance(request);
|
|
||||||
|
|
||||||
// Iterative tool calling: keep looping until we get a final answer without tool calls
|
|
||||||
int maxIterations = DetermineMaxIterations(request);
|
|
||||||
int iteration = 0;
|
|
||||||
LlmChatResponse? finalResponse = null;
|
|
||||||
const int DelayBetweenIterationsMs = 500;
|
|
||||||
|
|
||||||
yield return new LlmProgressUpdate
|
|
||||||
{
|
{
|
||||||
Type = "thinking",
|
Type = "thinking",
|
||||||
Message = $"Starting analysis (up to {maxIterations} iterations may be needed)..."
|
Message = $"Iteration {iteration}: Sending request to LLM...",
|
||||||
};
|
Iteration = iteration,
|
||||||
|
MaxIterations = maxIterations
|
||||||
|
});
|
||||||
|
|
||||||
while (iteration < maxIterations)
|
var response = await _llmService.ChatAsync(user, chatRequest);
|
||||||
|
|
||||||
|
// If LLM doesn't want to call tools, we have our final answer
|
||||||
|
if (!response.RequiresToolExecution || response.ToolCalls == null || !response.ToolCalls.Any())
|
||||||
{
|
{
|
||||||
iteration++;
|
finalResponse = response;
|
||||||
|
_logger.LogInformation("LLM provided final answer after {Iteration} iteration(s) for user {UserId}",
|
||||||
|
iteration, user.Id);
|
||||||
|
|
||||||
yield return new LlmProgressUpdate
|
await SendProgressUpdate(connectionId, new LlmProgressUpdate
|
||||||
{
|
|
||||||
Type = "iteration_start",
|
|
||||||
Message = $"Iteration {iteration}/{maxIterations}: Analyzing your request and determining next steps...",
|
|
||||||
Iteration = iteration,
|
|
||||||
MaxIterations = maxIterations
|
|
||||||
};
|
|
||||||
|
|
||||||
_logger.LogInformation("LLM chat iteration {Iteration}/{MaxIterations} for user {UserId}",
|
|
||||||
iteration, maxIterations, user.Id);
|
|
||||||
|
|
||||||
// Add delay between iterations to avoid rapid bursts and rate limiting
|
|
||||||
if (iteration > 1)
|
|
||||||
{
|
|
||||||
yield return new LlmProgressUpdate
|
|
||||||
{
|
|
||||||
Type = "thinking",
|
|
||||||
Message = "Waiting briefly to respect rate limits...",
|
|
||||||
Iteration = iteration,
|
|
||||||
MaxIterations = maxIterations
|
|
||||||
};
|
|
||||||
await Task.Delay(DelayBetweenIterationsMs);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Trim context if conversation is getting too long
|
|
||||||
TrimConversationContext(request);
|
|
||||||
|
|
||||||
// Send chat request to LLM
|
|
||||||
yield return new LlmProgressUpdate
|
|
||||||
{
|
{
|
||||||
Type = "thinking",
|
Type = "thinking",
|
||||||
Message = $"Iteration {iteration}: Sending request to LLM...",
|
Message = "Received final response. Preparing answer...",
|
||||||
Iteration = iteration,
|
Iteration = iteration,
|
||||||
MaxIterations = maxIterations
|
MaxIterations = maxIterations
|
||||||
};
|
});
|
||||||
|
|
||||||
var response = await _llmService.ChatAsync(user, request);
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
// If LLM doesn't want to call tools, we have our final answer
|
// LLM wants to call tools - execute them
|
||||||
if (!response.RequiresToolExecution || response.ToolCalls == null || !response.ToolCalls.Any())
|
_logger.LogInformation("LLM requested {Count} tool calls in iteration {Iteration} for user {UserId}",
|
||||||
|
response.ToolCalls.Count, iteration, user.Id);
|
||||||
|
|
||||||
|
await SendProgressUpdate(connectionId, new LlmProgressUpdate
|
||||||
|
{
|
||||||
|
Type = "thinking",
|
||||||
|
Message = $"Iteration {iteration}: LLM requested {response.ToolCalls.Count} tool call(s). Executing tools...",
|
||||||
|
Iteration = iteration,
|
||||||
|
MaxIterations = maxIterations
|
||||||
|
});
|
||||||
|
|
||||||
|
// Execute tool calls sequentially to allow progress updates
|
||||||
|
var toolResults = new List<LlmMessage>();
|
||||||
|
foreach (var toolCall in response.ToolCalls)
|
||||||
|
{
|
||||||
|
await SendProgressUpdate(connectionId, new LlmProgressUpdate
|
||||||
{
|
{
|
||||||
finalResponse = response;
|
Type = "tool_call",
|
||||||
_logger.LogInformation("LLM provided final answer after {Iteration} iteration(s) for user {UserId}",
|
Message = $"Calling tool: {toolCall.Name}",
|
||||||
iteration, user.Id);
|
Iteration = iteration,
|
||||||
|
MaxIterations = maxIterations,
|
||||||
|
ToolName = toolCall.Name,
|
||||||
|
ToolArguments = toolCall.Arguments
|
||||||
|
});
|
||||||
|
|
||||||
yield return new LlmProgressUpdate
|
var (success, result, error) = await ExecuteToolSafely(user, toolCall.Name, toolCall.Arguments, toolCall.Id, iteration, maxIterations);
|
||||||
|
|
||||||
|
if (success && result != null)
|
||||||
|
{
|
||||||
|
_logger.LogInformation("Successfully executed tool {ToolName} in iteration {Iteration} for user {UserId}",
|
||||||
|
toolCall.Name, iteration, user.Id);
|
||||||
|
|
||||||
|
await SendProgressUpdate(connectionId, new LlmProgressUpdate
|
||||||
{
|
{
|
||||||
Type = "thinking",
|
Type = "tool_result",
|
||||||
Message = "Received final response. Preparing answer...",
|
Message = $"Tool {toolCall.Name} completed successfully",
|
||||||
Iteration = iteration,
|
Iteration = iteration,
|
||||||
MaxIterations = maxIterations
|
MaxIterations = maxIterations,
|
||||||
};
|
ToolName = toolCall.Name
|
||||||
|
});
|
||||||
|
|
||||||
break;
|
toolResults.Add(new LlmMessage
|
||||||
}
|
|
||||||
|
|
||||||
// LLM wants to call tools - execute them
|
|
||||||
_logger.LogInformation("LLM requested {Count} tool calls in iteration {Iteration} for user {UserId}",
|
|
||||||
response.ToolCalls.Count, iteration, user.Id);
|
|
||||||
|
|
||||||
yield return new LlmProgressUpdate
|
|
||||||
{
|
|
||||||
Type = "thinking",
|
|
||||||
Message = $"Iteration {iteration}: LLM requested {response.ToolCalls.Count} tool call(s). Executing tools...",
|
|
||||||
Iteration = iteration,
|
|
||||||
MaxIterations = maxIterations
|
|
||||||
};
|
|
||||||
|
|
||||||
// Execute tool calls sequentially to allow progress updates
|
|
||||||
var toolResults = new List<LlmMessage>();
|
|
||||||
foreach (var toolCall in response.ToolCalls)
|
|
||||||
{
|
|
||||||
yield return new LlmProgressUpdate
|
|
||||||
{
|
{
|
||||||
Type = "tool_call",
|
Role = "tool",
|
||||||
Message = $"Calling tool: {toolCall.Name}",
|
Content = JsonSerializer.Serialize(result),
|
||||||
|
ToolCallId = toolCall.Id
|
||||||
|
});
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
_logger.LogError("Error executing tool {ToolName} in iteration {Iteration} for user {UserId}: {Error}",
|
||||||
|
toolCall.Name, iteration, user.Id, error);
|
||||||
|
|
||||||
|
await SendProgressUpdate(connectionId, new LlmProgressUpdate
|
||||||
|
{
|
||||||
|
Type = "tool_result",
|
||||||
|
Message = $"Tool {toolCall.Name} encountered an error: {error}",
|
||||||
Iteration = iteration,
|
Iteration = iteration,
|
||||||
MaxIterations = maxIterations,
|
MaxIterations = maxIterations,
|
||||||
ToolName = toolCall.Name,
|
ToolName = toolCall.Name,
|
||||||
ToolArguments = toolCall.Arguments
|
Error = error
|
||||||
};
|
});
|
||||||
|
|
||||||
var (success, result, error) = await ExecuteToolSafely(user, toolCall.Name, toolCall.Arguments, toolCall.Id, iteration, maxIterations);
|
toolResults.Add(new LlmMessage
|
||||||
|
|
||||||
if (success && result != null)
|
|
||||||
{
|
{
|
||||||
_logger.LogInformation("Successfully executed tool {ToolName} in iteration {Iteration} for user {UserId}",
|
Role = "tool",
|
||||||
toolCall.Name, iteration, user.Id);
|
Content = $"Error executing tool: {error}",
|
||||||
|
ToolCallId = toolCall.Id
|
||||||
yield return new LlmProgressUpdate
|
});
|
||||||
{
|
|
||||||
Type = "tool_result",
|
|
||||||
Message = $"Tool {toolCall.Name} completed successfully",
|
|
||||||
Iteration = iteration,
|
|
||||||
MaxIterations = maxIterations,
|
|
||||||
ToolName = toolCall.Name
|
|
||||||
};
|
|
||||||
|
|
||||||
toolResults.Add(new LlmMessage
|
|
||||||
{
|
|
||||||
Role = "tool",
|
|
||||||
Content = JsonSerializer.Serialize(result),
|
|
||||||
ToolCallId = toolCall.Id
|
|
||||||
});
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
_logger.LogError("Error executing tool {ToolName} in iteration {Iteration} for user {UserId}: {Error}",
|
|
||||||
toolCall.Name, iteration, user.Id, error);
|
|
||||||
|
|
||||||
yield return new LlmProgressUpdate
|
|
||||||
{
|
|
||||||
Type = "tool_result",
|
|
||||||
Message = $"Tool {toolCall.Name} encountered an error: {error}",
|
|
||||||
Iteration = iteration,
|
|
||||||
MaxIterations = maxIterations,
|
|
||||||
ToolName = toolCall.Name,
|
|
||||||
Error = error
|
|
||||||
};
|
|
||||||
|
|
||||||
toolResults.Add(new LlmMessage
|
|
||||||
{
|
|
||||||
Role = "tool",
|
|
||||||
Content = $"Error executing tool: {error}",
|
|
||||||
ToolCallId = toolCall.Id
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
yield return new LlmProgressUpdate
|
|
||||||
{
|
|
||||||
Type = "thinking",
|
|
||||||
Message = $"Iteration {iteration}: All tools completed. Analyzing results...",
|
|
||||||
Iteration = iteration,
|
|
||||||
MaxIterations = maxIterations
|
|
||||||
};
|
|
||||||
|
|
||||||
// Add assistant message with tool calls to conversation history
|
|
||||||
request.Messages.Add(new LlmMessage
|
|
||||||
{
|
|
||||||
Role = "assistant",
|
|
||||||
Content = response.Content,
|
|
||||||
ToolCalls = response.ToolCalls
|
|
||||||
});
|
|
||||||
|
|
||||||
// Add tool results to conversation history
|
|
||||||
request.Messages.AddRange(toolResults);
|
|
||||||
|
|
||||||
// Continue loop to get LLM's response to the tool results
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// If we hit max iterations, return the last response (even if it has tool calls)
|
await SendProgressUpdate(connectionId, new LlmProgressUpdate
|
||||||
if (finalResponse == null)
|
|
||||||
{
|
{
|
||||||
_logger.LogWarning("Reached max iterations ({MaxIterations}) for user {UserId}. Returning last response.",
|
Type = "thinking",
|
||||||
maxIterations, user.Id);
|
Message = $"Iteration {iteration}: All tools completed. Analyzing results...",
|
||||||
|
Iteration = iteration,
|
||||||
|
MaxIterations = maxIterations
|
||||||
|
});
|
||||||
|
|
||||||
yield return new LlmProgressUpdate
|
// Add assistant message with tool calls to conversation history
|
||||||
{
|
chatRequest.Messages.Add(new LlmMessage
|
||||||
Type = "thinking",
|
{
|
||||||
Message = "Reached maximum iterations. Getting final response...",
|
Role = "assistant",
|
||||||
Iteration = maxIterations,
|
Content = response.Content,
|
||||||
MaxIterations = maxIterations
|
ToolCalls = response.ToolCalls
|
||||||
};
|
});
|
||||||
|
|
||||||
finalResponse = await _llmService.ChatAsync(user, request);
|
// Add tool results to conversation history
|
||||||
}
|
chatRequest.Messages.AddRange(toolResults);
|
||||||
|
|
||||||
|
// Continue loop to get LLM's response to the tool results
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we hit max iterations, return the last response (even if it has tool calls)
|
||||||
|
if (finalResponse == null)
|
||||||
|
{
|
||||||
|
_logger.LogWarning("Reached max iterations ({MaxIterations}) for user {UserId}. Returning last response.",
|
||||||
|
maxIterations, user.Id);
|
||||||
|
|
||||||
|
await SendProgressUpdate(connectionId, new LlmProgressUpdate
|
||||||
|
{
|
||||||
|
Type = "thinking",
|
||||||
|
Message = "Reached maximum iterations. Getting final response...",
|
||||||
|
Iteration = maxIterations,
|
||||||
|
MaxIterations = maxIterations
|
||||||
|
});
|
||||||
|
|
||||||
|
finalResponse = await _llmService.ChatAsync(user, chatRequest);
|
||||||
|
}
|
||||||
|
|
||||||
// Send final response
|
// Send final response
|
||||||
yield return new LlmProgressUpdate
|
await SendProgressUpdate(connectionId, new LlmProgressUpdate
|
||||||
{
|
{
|
||||||
Type = "final_response",
|
Type = "final_response",
|
||||||
Message = "Analysis complete!",
|
Message = "Analysis complete!",
|
||||||
Response = finalResponse,
|
Response = finalResponse,
|
||||||
Iteration = iteration,
|
Iteration = iteration,
|
||||||
MaxIterations = maxIterations
|
MaxIterations = maxIterations
|
||||||
};
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/// <summary>
|
/// <summary>
|
||||||
@@ -919,4 +947,30 @@ public class LlmController : BaseController
|
|||||||
_logger.LogWarning("No backtest ID found in conversation messages");
|
_logger.LogWarning("No backtest ID found in conversation messages");
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Helper method to send progress update via SignalR
|
||||||
|
/// </summary>
|
||||||
|
private async Task SendProgressUpdate(string connectionId, LlmProgressUpdate update)
|
||||||
|
{
|
||||||
|
try
|
||||||
|
{
|
||||||
|
await _hubContext.Clients.Client(connectionId).SendAsync("ProgressUpdate", update);
|
||||||
|
}
|
||||||
|
catch (Exception ex)
|
||||||
|
{
|
||||||
|
_logger.LogError(ex, "Error sending progress update to connection {ConnectionId}", connectionId);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Request model for LLM chat streaming via SignalR
|
||||||
|
/// </summary>
|
||||||
|
public class LlmChatStreamRequest : LlmChatRequest
|
||||||
|
{
|
||||||
|
/// <summary>
|
||||||
|
/// SignalR connection ID to send progress updates to
|
||||||
|
/// </summary>
|
||||||
|
public string ConnectionId { get; set; } = string.Empty;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -306,14 +306,26 @@ builder.Services
|
|||||||
OnMessageReceived = context =>
|
OnMessageReceived = context =>
|
||||||
{
|
{
|
||||||
// Skip token extraction for anonymous endpoints to avoid validation errors
|
// Skip token extraction for anonymous endpoints to avoid validation errors
|
||||||
var path = context.Request.Path.Value?.ToLower();
|
var path = context.Request.Path.Value?.ToLower() ?? "";
|
||||||
if (path != null && (path.EndsWith("/create-token") || path.EndsWith("/authenticate")))
|
if (!string.IsNullOrEmpty(path) && (path.EndsWith("/create-token") || path.EndsWith("/authenticate")))
|
||||||
{
|
{
|
||||||
// Clear any token to prevent validation on anonymous endpoints
|
// Clear any token to prevent validation on anonymous endpoints
|
||||||
context.Token = null;
|
context.Token = null;
|
||||||
return Task.CompletedTask;
|
return Task.CompletedTask;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Extract token from query string for SignalR connections
|
||||||
|
// SignalR uses access_token query parameter for WebSocket connections
|
||||||
|
if (path.Contains("/bothub") || path.Contains("/backtesthub") || path.Contains("/llmhub"))
|
||||||
|
{
|
||||||
|
var accessToken = context.Request.Query["access_token"];
|
||||||
|
if (!string.IsNullOrEmpty(accessToken))
|
||||||
|
{
|
||||||
|
context.Token = accessToken;
|
||||||
|
return Task.CompletedTask;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Handle tokens sent without "Bearer " prefix for authenticated endpoints
|
// Handle tokens sent without "Bearer " prefix for authenticated endpoints
|
||||||
// The standard middleware expects "Bearer <token>" but some clients send just the token
|
// The standard middleware expects "Bearer <token>" but some clients send just the token
|
||||||
if (string.IsNullOrEmpty(context.Token))
|
if (string.IsNullOrEmpty(context.Token))
|
||||||
@@ -330,13 +342,6 @@ builder.Services
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// If you want to get the token from a custom header or query string
|
|
||||||
// var accessToken = context.Request.Query["access_token"];
|
|
||||||
// if (!string.IsNullOrEmpty(accessToken) &&
|
|
||||||
// context.HttpContext.Request.Path.StartsWithSegments("/hub"))
|
|
||||||
// {
|
|
||||||
// context.Token = accessToken;
|
|
||||||
// }
|
|
||||||
return Task.CompletedTask;
|
return Task.CompletedTask;
|
||||||
},
|
},
|
||||||
OnAuthenticationFailed = context =>
|
OnAuthenticationFailed = context =>
|
||||||
@@ -438,7 +443,8 @@ builder.Services.AddCors(options =>
|
|||||||
policy
|
policy
|
||||||
.WithOrigins(allowedCorsOrigins)
|
.WithOrigins(allowedCorsOrigins)
|
||||||
.WithMethods("GET", "POST", "PUT", "DELETE", "OPTIONS", "PATCH")
|
.WithMethods("GET", "POST", "PUT", "DELETE", "OPTIONS", "PATCH")
|
||||||
.WithHeaders("Content-Type", "Authorization", "X-Requested-With", "X-Correlation-ID")
|
.WithHeaders("Content-Type", "Authorization", "X-Requested-With", "X-Correlation-ID",
|
||||||
|
"X-SignalR-User-Agent", "x-requested-with", "x-signalr-user-agent") // SignalR-specific headers
|
||||||
.WithExposedHeaders("Token-Expired", "X-Correlation-ID")
|
.WithExposedHeaders("Token-Expired", "X-Correlation-ID")
|
||||||
.AllowCredentials()
|
.AllowCredentials()
|
||||||
.SetPreflightMaxAge(TimeSpan.FromHours(24));
|
.SetPreflightMaxAge(TimeSpan.FromHours(24));
|
||||||
@@ -449,7 +455,8 @@ builder.Services.AddCors(options =>
|
|||||||
policy
|
policy
|
||||||
.AllowAnyMethod()
|
.AllowAnyMethod()
|
||||||
.AllowAnyHeader()
|
.AllowAnyHeader()
|
||||||
.SetIsOriginAllowed(_ => true);
|
.SetIsOriginAllowed(_ => true)
|
||||||
|
.AllowCredentials();
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
@@ -528,13 +535,16 @@ app.Use(async (context, next) =>
|
|||||||
context.Response.Headers.Append("Referrer-Policy", "strict-origin-when-cross-origin");
|
context.Response.Headers.Append("Referrer-Policy", "strict-origin-when-cross-origin");
|
||||||
context.Response.Headers.Append("Permissions-Policy", "geolocation=(), microphone=(), camera=()");
|
context.Response.Headers.Append("Permissions-Policy", "geolocation=(), microphone=(), camera=()");
|
||||||
|
|
||||||
// Content Security Policy - only for non-Swagger endpoints
|
// Content Security Policy - exclude SignalR hubs and other special endpoints
|
||||||
if (!context.Request.Path.StartsWithSegments("/swagger") &&
|
if (!context.Request.Path.StartsWithSegments("/swagger") &&
|
||||||
!context.Request.Path.StartsWithSegments("/health") &&
|
!context.Request.Path.StartsWithSegments("/health") &&
|
||||||
!context.Request.Path.StartsWithSegments("/alive"))
|
!context.Request.Path.StartsWithSegments("/alive") &&
|
||||||
|
!context.Request.Path.StartsWithSegments("/bothub") &&
|
||||||
|
!context.Request.Path.StartsWithSegments("/backtesthub") &&
|
||||||
|
!context.Request.Path.StartsWithSegments("/llmhub"))
|
||||||
{
|
{
|
||||||
context.Response.Headers.Append("Content-Security-Policy",
|
context.Response.Headers.Append("Content-Security-Policy",
|
||||||
"default-src 'self'; script-src 'self' 'unsafe-inline'; style-src 'self' 'unsafe-inline'; img-src 'self' data: https:; font-src 'self' data:;");
|
"default-src 'self'; script-src 'self' 'unsafe-inline'; style-src 'self' 'unsafe-inline'; img-src 'self' data: https:; font-src 'self' data:; connect-src 'self' ws: wss:;");
|
||||||
}
|
}
|
||||||
|
|
||||||
// Remove server header (optional - Kestrel can be configured separately)
|
// Remove server header (optional - Kestrel can be configured separately)
|
||||||
@@ -577,6 +587,7 @@ app.UseEndpoints(endpoints =>
|
|||||||
endpoints.MapControllers();
|
endpoints.MapControllers();
|
||||||
endpoints.MapHub<BotHub>("/bothub");
|
endpoints.MapHub<BotHub>("/bothub");
|
||||||
endpoints.MapHub<BacktestHub>("/backtesthub");
|
endpoints.MapHub<BacktestHub>("/backtesthub");
|
||||||
|
endpoints.MapHub<LlmHub>("/llmhub");
|
||||||
|
|
||||||
endpoints.MapHealthChecks("/health", new HealthCheckOptions
|
endpoints.MapHealthChecks("/health", new HealthCheckOptions
|
||||||
{
|
{
|
||||||
|
|||||||
20
src/Managing.Application/Hubs/LlmHub.cs
Normal file
20
src/Managing.Application/Hubs/LlmHub.cs
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
using Microsoft.AspNetCore.Authorization;
|
||||||
|
using Microsoft.AspNetCore.SignalR;
|
||||||
|
|
||||||
|
namespace Managing.Application.Hubs;
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// SignalR hub for LLM chat streaming with real-time progress updates
|
||||||
|
/// </summary>
|
||||||
|
[Authorize]
|
||||||
|
public class LlmHub : Hub
|
||||||
|
{
|
||||||
|
public override async Task OnConnectedAsync()
|
||||||
|
{
|
||||||
|
await base.OnConnectedAsync();
|
||||||
|
await Clients.Caller.SendAsync("Connected", "Connected to LlmHub!");
|
||||||
|
}
|
||||||
|
|
||||||
|
public string GetConnectionId() => Context.ConnectionId;
|
||||||
|
}
|
||||||
|
|
||||||
@@ -1,13 +1,17 @@
|
|||||||
import { useState, useRef, useEffect } from 'react'
|
import { useState, useRef, useEffect } from 'react'
|
||||||
import { LlmClient } from '../../generated/ManagingApi'
|
import { LlmClient } from '../../generated/ManagingApi'
|
||||||
import { LlmMessage, LlmChatResponse } from '../../generated/ManagingApiTypes'
|
import { LlmMessage, LlmChatResponse, LlmProgressUpdate } from '../../generated/ManagingApiTypes'
|
||||||
import { AiChatService } from '../../services/aiChatService'
|
import { AiChatService } from '../../services/aiChatService'
|
||||||
import useApiUrlStore from '../../app/store/apiStore'
|
import useApiUrlStore from '../../app/store/apiStore'
|
||||||
|
|
||||||
interface Message {
|
interface Message {
|
||||||
role: 'user' | 'assistant' | 'system'
|
role: 'user' | 'assistant' | 'system' | 'progress'
|
||||||
content: string
|
content: string
|
||||||
timestamp: Date
|
timestamp: Date
|
||||||
|
progressType?: string
|
||||||
|
iteration?: number
|
||||||
|
maxIterations?: number
|
||||||
|
toolName?: string
|
||||||
}
|
}
|
||||||
|
|
||||||
interface AiChatProps {
|
interface AiChatProps {
|
||||||
@@ -26,12 +30,13 @@ function AiChat({ onClose }: AiChatProps): JSX.Element {
|
|||||||
const [isLoading, setIsLoading] = useState(false)
|
const [isLoading, setIsLoading] = useState(false)
|
||||||
const [provider, setProvider] = useState<string>('auto')
|
const [provider, setProvider] = useState<string>('auto')
|
||||||
const [availableProviders, setAvailableProviders] = useState<string[]>([])
|
const [availableProviders, setAvailableProviders] = useState<string[]>([])
|
||||||
|
const [currentProgress, setCurrentProgress] = useState<LlmProgressUpdate | null>(null)
|
||||||
const messagesEndRef = useRef<HTMLDivElement>(null)
|
const messagesEndRef = useRef<HTMLDivElement>(null)
|
||||||
const { apiUrl, userToken } = useApiUrlStore()
|
const { apiUrl, userToken } = useApiUrlStore()
|
||||||
|
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
scrollToBottom()
|
scrollToBottom()
|
||||||
}, [messages])
|
}, [messages, currentProgress])
|
||||||
|
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
loadProviders()
|
loadProviders()
|
||||||
@@ -44,7 +49,7 @@ function AiChat({ onClose }: AiChatProps): JSX.Element {
|
|||||||
const loadProviders = async () => {
|
const loadProviders = async () => {
|
||||||
try {
|
try {
|
||||||
const llmClient = new LlmClient({}, apiUrl)
|
const llmClient = new LlmClient({}, apiUrl)
|
||||||
const service = new AiChatService(llmClient)
|
const service = new AiChatService(llmClient, apiUrl)
|
||||||
const providers = await service.getProviders()
|
const providers = await service.getProviders()
|
||||||
setAvailableProviders(['auto', ...providers])
|
setAvailableProviders(['auto', ...providers])
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
@@ -64,14 +69,15 @@ function AiChat({ onClose }: AiChatProps): JSX.Element {
|
|||||||
setMessages(prev => [...prev, userMessage])
|
setMessages(prev => [...prev, userMessage])
|
||||||
setInput('')
|
setInput('')
|
||||||
setIsLoading(true)
|
setIsLoading(true)
|
||||||
|
setCurrentProgress(null)
|
||||||
|
|
||||||
try {
|
try {
|
||||||
const llmClient = new LlmClient({}, apiUrl)
|
const llmClient = new LlmClient({}, apiUrl)
|
||||||
const service = new AiChatService(llmClient)
|
const service = new AiChatService(llmClient, apiUrl)
|
||||||
|
|
||||||
// Convert messages to LlmMessage format
|
// Convert messages to LlmMessage format
|
||||||
const llmMessages: LlmMessage[] = messages
|
const llmMessages: LlmMessage[] = messages
|
||||||
.filter(m => m.role !== 'system' || messages.indexOf(m) === 0) // Include only first system message
|
.filter(m => m.role !== 'system' && m.role !== 'progress') // Exclude system and progress messages
|
||||||
.map(m => ({
|
.map(m => ({
|
||||||
role: m.role,
|
role: m.role,
|
||||||
content: m.content,
|
content: m.content,
|
||||||
@@ -87,18 +93,66 @@ function AiChat({ onClose }: AiChatProps): JSX.Element {
|
|||||||
toolCallId: undefined
|
toolCallId: undefined
|
||||||
})
|
})
|
||||||
|
|
||||||
const response: LlmChatResponse = await service.sendMessage(
|
// Use streaming
|
||||||
|
let finalResponse: LlmChatResponse | null = null
|
||||||
|
let lastUpdate: LlmProgressUpdate | null = null
|
||||||
|
|
||||||
|
for await (const update of service.sendMessageStream(
|
||||||
llmMessages,
|
llmMessages,
|
||||||
provider === 'auto' ? undefined : provider
|
provider === 'auto' ? undefined : provider
|
||||||
)
|
)) {
|
||||||
|
lastUpdate = update
|
||||||
|
setCurrentProgress(update)
|
||||||
|
|
||||||
const assistantMessage: Message = {
|
// Handle different update types
|
||||||
role: 'assistant',
|
if (update.type === 'error') {
|
||||||
content: response.content || 'No response from AI',
|
const errorMessage: Message = {
|
||||||
timestamp: new Date()
|
role: 'assistant',
|
||||||
|
content: `Error: ${update.error || update.message || 'Failed to get response from AI'}`,
|
||||||
|
timestamp: new Date()
|
||||||
|
}
|
||||||
|
setMessages(prev => [...prev, errorMessage])
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if (update.type === 'final_response' && update.response) {
|
||||||
|
finalResponse = update.response
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
setMessages(prev => [...prev, assistantMessage])
|
// Add final response if we got one
|
||||||
|
if (finalResponse) {
|
||||||
|
const assistantMessage: Message = {
|
||||||
|
role: 'assistant',
|
||||||
|
content: finalResponse.content || 'No response from AI',
|
||||||
|
timestamp: new Date()
|
||||||
|
}
|
||||||
|
setMessages(prev => [...prev, assistantMessage])
|
||||||
|
} else if (lastUpdate && lastUpdate.type === 'final_response' && lastUpdate.response) {
|
||||||
|
// Fallback: check lastUpdate in case finalResponse wasn't set
|
||||||
|
const assistantMessage: Message = {
|
||||||
|
role: 'assistant',
|
||||||
|
content: lastUpdate.response.content || 'No response from AI',
|
||||||
|
timestamp: new Date()
|
||||||
|
}
|
||||||
|
setMessages(prev => [...prev, assistantMessage])
|
||||||
|
} else if (lastUpdate && lastUpdate.type === 'error') {
|
||||||
|
// Show error message
|
||||||
|
const errorMessage: Message = {
|
||||||
|
role: 'assistant',
|
||||||
|
content: `Error: ${lastUpdate.error || lastUpdate.message || 'Failed to get response from AI'}`,
|
||||||
|
timestamp: new Date()
|
||||||
|
}
|
||||||
|
setMessages(prev => [...prev, errorMessage])
|
||||||
|
} else {
|
||||||
|
// If we didn't get a final response, show the last progress message
|
||||||
|
const assistantMessage: Message = {
|
||||||
|
role: 'assistant',
|
||||||
|
content: lastUpdate?.message || 'Response incomplete',
|
||||||
|
timestamp: new Date()
|
||||||
|
}
|
||||||
|
setMessages(prev => [...prev, assistantMessage])
|
||||||
|
}
|
||||||
} catch (error: any) {
|
} catch (error: any) {
|
||||||
console.error('Error sending message:', error)
|
console.error('Error sending message:', error)
|
||||||
const errorMessage: Message = {
|
const errorMessage: Message = {
|
||||||
@@ -109,6 +163,7 @@ function AiChat({ onClose }: AiChatProps): JSX.Element {
|
|||||||
setMessages(prev => [...prev, errorMessage])
|
setMessages(prev => [...prev, errorMessage])
|
||||||
} finally {
|
} finally {
|
||||||
setIsLoading(false)
|
setIsLoading(false)
|
||||||
|
setCurrentProgress(null)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -179,7 +234,17 @@ function AiChat({ onClose }: AiChatProps): JSX.Element {
|
|||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
))}
|
))}
|
||||||
{isLoading && (
|
|
||||||
|
{/* Progress Updates */}
|
||||||
|
{isLoading && currentProgress && (
|
||||||
|
<div className="flex justify-start">
|
||||||
|
<div className="bg-base-200 p-3 rounded-lg max-w-[80%]">
|
||||||
|
<ProgressIndicator progress={currentProgress} />
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
|
||||||
|
{isLoading && !currentProgress && (
|
||||||
<div className="flex justify-start">
|
<div className="flex justify-start">
|
||||||
<div className="bg-base-200 p-3 rounded-lg">
|
<div className="bg-base-200 p-3 rounded-lg">
|
||||||
<div className="flex gap-1">
|
<div className="flex gap-1">
|
||||||
@@ -221,4 +286,80 @@ function AiChat({ onClose }: AiChatProps): JSX.Element {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Progress Indicator Component
|
||||||
|
function ProgressIndicator({ progress }: { progress: LlmProgressUpdate }): JSX.Element {
|
||||||
|
const getIcon = () => {
|
||||||
|
switch (progress.type) {
|
||||||
|
case 'iteration_start':
|
||||||
|
return '🔄'
|
||||||
|
case 'thinking':
|
||||||
|
return '💭'
|
||||||
|
case 'tool_call':
|
||||||
|
return '🔧'
|
||||||
|
case 'tool_result':
|
||||||
|
return progress.error ? '❌' : '✅'
|
||||||
|
case 'final_response':
|
||||||
|
return '✨'
|
||||||
|
case 'error':
|
||||||
|
return '⚠️'
|
||||||
|
default:
|
||||||
|
return '⏳'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const getColor = () => {
|
||||||
|
switch (progress.type) {
|
||||||
|
case 'error':
|
||||||
|
return 'text-error'
|
||||||
|
case 'tool_result':
|
||||||
|
return progress.error ? 'text-error' : 'text-success'
|
||||||
|
case 'final_response':
|
||||||
|
return 'text-success'
|
||||||
|
default:
|
||||||
|
return 'text-info'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return (
|
||||||
|
<div className="space-y-2">
|
||||||
|
<div className="flex items-center gap-2">
|
||||||
|
<span className="text-lg">{getIcon()}</span>
|
||||||
|
<span className={`text-sm font-medium ${getColor()}`}>
|
||||||
|
{progress.message}
|
||||||
|
</span>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
{progress.iteration && progress.maxIterations && (
|
||||||
|
<div className="flex items-center gap-2 text-xs text-base-content/60">
|
||||||
|
<progress
|
||||||
|
className="progress progress-primary w-32 h-2"
|
||||||
|
value={progress.iteration}
|
||||||
|
max={progress.maxIterations}
|
||||||
|
/>
|
||||||
|
<span>Iteration {progress.iteration}/{progress.maxIterations}</span>
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
|
||||||
|
{progress.toolName && (
|
||||||
|
<div className="text-xs text-base-content/60 mt-1">
|
||||||
|
<span className="font-mono bg-base-300 px-2 py-1 rounded">
|
||||||
|
{progress.toolName}
|
||||||
|
{progress.toolArguments && Object.keys(progress.toolArguments).length > 0 && (
|
||||||
|
<span className="ml-1 opacity-60">
|
||||||
|
({Object.keys(progress.toolArguments).length} args)
|
||||||
|
</span>
|
||||||
|
)}
|
||||||
|
</span>
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
|
||||||
|
{progress.error && (
|
||||||
|
<div className="text-xs text-error mt-1">
|
||||||
|
{progress.error}
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
export default AiChat
|
export default AiChat
|
||||||
|
|||||||
@@ -1,11 +1,115 @@
|
|||||||
|
import { HubConnection, HubConnectionBuilder } from '@microsoft/signalr'
|
||||||
import { LlmClient } from '../generated/ManagingApi'
|
import { LlmClient } from '../generated/ManagingApi'
|
||||||
import { LlmChatRequest, LlmChatResponse, LlmMessage } from '../generated/ManagingApiTypes'
|
import { LlmChatRequest, LlmChatResponse, LlmMessage, LlmProgressUpdate } from '../generated/ManagingApiTypes'
|
||||||
|
import { Cookies } from 'react-cookie'
|
||||||
|
|
||||||
export class AiChatService {
|
export class AiChatService {
|
||||||
private llmClient: LlmClient
|
private llmClient: LlmClient
|
||||||
|
private baseUrl: string
|
||||||
|
private hubConnection: HubConnection | null = null
|
||||||
|
|
||||||
constructor(llmClient: LlmClient) {
|
constructor(llmClient: LlmClient, baseUrl: string) {
|
||||||
this.llmClient = llmClient
|
this.llmClient = llmClient
|
||||||
|
this.baseUrl = baseUrl
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates and connects to SignalR hub for LLM chat streaming
|
||||||
|
*/
|
||||||
|
async connectToHub(): Promise<HubConnection> {
|
||||||
|
if (this.hubConnection?.state === 'Connected') {
|
||||||
|
return this.hubConnection
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clean up existing connection if any
|
||||||
|
if (this.hubConnection) {
|
||||||
|
try {
|
||||||
|
await this.hubConnection.stop()
|
||||||
|
} catch (e) {
|
||||||
|
// Ignore stop errors
|
||||||
|
}
|
||||||
|
this.hubConnection = null
|
||||||
|
}
|
||||||
|
|
||||||
|
const cookies = new Cookies()
|
||||||
|
const bearerToken = cookies.get('token')
|
||||||
|
|
||||||
|
if (!bearerToken) {
|
||||||
|
throw new Error('No authentication token found. Please log in first.')
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure baseUrl doesn't have trailing slash
|
||||||
|
const baseUrl = this.baseUrl.endsWith('/') ? this.baseUrl.slice(0, -1) : this.baseUrl
|
||||||
|
const hubUrl = `${baseUrl}/llmhub`
|
||||||
|
|
||||||
|
console.log('Connecting to SignalR hub:', hubUrl)
|
||||||
|
|
||||||
|
const connection = new HubConnectionBuilder()
|
||||||
|
.withUrl(hubUrl, {
|
||||||
|
// Pass token via query string (standard for SignalR WebSocket connections)
|
||||||
|
// SignalR will add this as ?access_token=xxx to the negotiation request
|
||||||
|
accessTokenFactory: () => {
|
||||||
|
const token = cookies.get('token')
|
||||||
|
if (!token) {
|
||||||
|
console.error('Token not available in accessTokenFactory')
|
||||||
|
throw new Error('Token expired or not available')
|
||||||
|
}
|
||||||
|
console.log('Providing token for SignalR connection')
|
||||||
|
return token
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.withAutomaticReconnect({
|
||||||
|
nextRetryDelayInMilliseconds: (retryContext) => {
|
||||||
|
// Exponential backoff: 0s, 2s, 10s, 30s
|
||||||
|
if (retryContext.previousRetryCount === 0) return 2000
|
||||||
|
if (retryContext.previousRetryCount === 1) return 10000
|
||||||
|
return 30000
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.build()
|
||||||
|
|
||||||
|
// Add connection event handlers for debugging
|
||||||
|
connection.onclose((error) => {
|
||||||
|
console.log('SignalR connection closed', error)
|
||||||
|
this.hubConnection = null
|
||||||
|
})
|
||||||
|
|
||||||
|
connection.onreconnecting((error) => {
|
||||||
|
console.log('SignalR reconnecting', error)
|
||||||
|
})
|
||||||
|
|
||||||
|
connection.onreconnected((connectionId) => {
|
||||||
|
console.log('SignalR reconnected', connectionId)
|
||||||
|
})
|
||||||
|
|
||||||
|
try {
|
||||||
|
console.log('Starting SignalR connection...')
|
||||||
|
await connection.start()
|
||||||
|
console.log('SignalR connected successfully. Connection ID:', connection.connectionId)
|
||||||
|
this.hubConnection = connection
|
||||||
|
return connection
|
||||||
|
} catch (error: any) {
|
||||||
|
console.error('Failed to connect to SignalR hub:', error)
|
||||||
|
console.error('Error details:', {
|
||||||
|
message: error?.message,
|
||||||
|
stack: error?.stack,
|
||||||
|
hubUrl: hubUrl,
|
||||||
|
hasToken: !!bearerToken
|
||||||
|
})
|
||||||
|
// Clean up on failure
|
||||||
|
this.hubConnection = null
|
||||||
|
throw new Error(`Failed to connect to SignalR hub: ${error?.message || 'Unknown error'}. Check browser console for details.`)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Disconnects from SignalR hub
|
||||||
|
*/
|
||||||
|
async disconnectFromHub(): Promise<void> {
|
||||||
|
if (this.hubConnection) {
|
||||||
|
await this.hubConnection.stop()
|
||||||
|
this.hubConnection = null
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -25,6 +129,122 @@ export class AiChatService {
|
|||||||
return await this.llmClient.llm_Chat(request)
|
return await this.llmClient.llm_Chat(request)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Send a chat message with streaming progress updates via SignalR
|
||||||
|
* Returns an async generator that yields progress updates in real-time
|
||||||
|
*/
|
||||||
|
async *sendMessageStream(
|
||||||
|
messages: LlmMessage[],
|
||||||
|
provider?: string,
|
||||||
|
apiKey?: string
|
||||||
|
): AsyncGenerator<LlmProgressUpdate, void, unknown> {
|
||||||
|
// Connect to SignalR hub
|
||||||
|
const connection = await this.connectToHub()
|
||||||
|
const connectionId = connection.connectionId
|
||||||
|
|
||||||
|
if (!connectionId) {
|
||||||
|
yield {
|
||||||
|
type: 'error',
|
||||||
|
message: 'Failed to get SignalR connection ID',
|
||||||
|
error: 'Connection ID not available'
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
const request = {
|
||||||
|
messages,
|
||||||
|
provider: provider || 'auto',
|
||||||
|
apiKey: apiKey,
|
||||||
|
stream: true,
|
||||||
|
temperature: 0.7,
|
||||||
|
maxTokens: 4096,
|
||||||
|
tools: undefined, // Will be populated by backend
|
||||||
|
connectionId: connectionId
|
||||||
|
}
|
||||||
|
|
||||||
|
// Queue for incoming updates
|
||||||
|
const updateQueue: LlmProgressUpdate[] = []
|
||||||
|
let isComplete = false
|
||||||
|
let resolver: ((update: LlmProgressUpdate) => void) | null = null
|
||||||
|
|
||||||
|
// Set up progress update handler
|
||||||
|
const handler = (update: LlmProgressUpdate) => {
|
||||||
|
if (resolver) {
|
||||||
|
resolver(update)
|
||||||
|
resolver = null
|
||||||
|
} else {
|
||||||
|
updateQueue.push(update)
|
||||||
|
}
|
||||||
|
|
||||||
|
if (update.type === 'final_response' || update.type === 'error') {
|
||||||
|
isComplete = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
connection.on('ProgressUpdate', handler)
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Send chat request to backend
|
||||||
|
const cookies = new Cookies()
|
||||||
|
const bearerToken = cookies.get('token')
|
||||||
|
|
||||||
|
const response = await fetch(`${this.baseUrl}/Llm/ChatStream`, {
|
||||||
|
method: 'POST',
|
||||||
|
headers: {
|
||||||
|
'Content-Type': 'application/json',
|
||||||
|
...(bearerToken ? { Authorization: `Bearer ${bearerToken}` } : {})
|
||||||
|
},
|
||||||
|
body: JSON.stringify(request)
|
||||||
|
})
|
||||||
|
|
||||||
|
if (!response.ok) {
|
||||||
|
const errorText = await response.text()
|
||||||
|
yield {
|
||||||
|
type: 'error',
|
||||||
|
message: `HTTP ${response.status}: ${errorText}`,
|
||||||
|
error: errorText
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Yield updates as they arrive via SignalR
|
||||||
|
while (!isComplete) {
|
||||||
|
// Check if we have queued updates
|
||||||
|
if (updateQueue.length > 0) {
|
||||||
|
const update = updateQueue.shift()!
|
||||||
|
yield update
|
||||||
|
if (update.type === 'final_response' || update.type === 'error') {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Wait for next update
|
||||||
|
const update = await new Promise<LlmProgressUpdate>((resolve) => {
|
||||||
|
resolver = resolve
|
||||||
|
})
|
||||||
|
yield update
|
||||||
|
if (update.type === 'final_response' || update.type === 'error') {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Yield any remaining queued updates
|
||||||
|
while (updateQueue.length > 0) {
|
||||||
|
yield updateQueue.shift()!
|
||||||
|
}
|
||||||
|
} catch (e) {
|
||||||
|
const error = e instanceof Error ? e : new Error(String(e))
|
||||||
|
yield {
|
||||||
|
type: 'error',
|
||||||
|
message: `Error: ${error.message}`,
|
||||||
|
error: error.message
|
||||||
|
}
|
||||||
|
} finally {
|
||||||
|
// Clean up handler
|
||||||
|
connection.off('ProgressUpdate', handler)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get available LLM providers
|
* Get available LLM providers
|
||||||
*/
|
*/
|
||||||
|
|||||||
Reference in New Issue
Block a user