Implement LLM provider configuration and update user settings
- Added functionality to update the default LLM provider for users via a new endpoint in UserController. - Introduced LlmProvider enum to manage available LLM options: Auto, Gemini, OpenAI, and Claude. - Updated User and UserEntity models to include DefaultLlmProvider property. - Enhanced database context and migrations to support the new LLM provider configuration. - Integrated LLM services into the application bootstrap for dependency injection. - Updated TypeScript API client to include methods for managing LLM providers and chat requests.
This commit is contained in:
@@ -1,4 +1,5 @@
|
||||
import { Auth } from '../pages/authPage/auth'
|
||||
import AiChatButton from '../components/organism/AiChatButton'
|
||||
|
||||
import MyRoutes from './routes'
|
||||
|
||||
@@ -6,6 +7,7 @@ const App = () => {
|
||||
return (
|
||||
<Auth>
|
||||
<MyRoutes />
|
||||
<AiChatButton />
|
||||
</Auth>
|
||||
)
|
||||
}
|
||||
|
||||
224
src/Managing.WebApp/src/components/organism/AiChat.tsx
Normal file
224
src/Managing.WebApp/src/components/organism/AiChat.tsx
Normal file
@@ -0,0 +1,224 @@
|
||||
import { useState, useRef, useEffect } from 'react'
|
||||
import { LlmClient } from '../../generated/ManagingApi'
|
||||
import { LlmMessage, LlmChatResponse } from '../../generated/ManagingApiTypes'
|
||||
import { AiChatService } from '../../services/aiChatService'
|
||||
import useApiUrlStore from '../../app/store/apiStore'
|
||||
|
||||
interface Message {
|
||||
role: 'user' | 'assistant' | 'system'
|
||||
content: string
|
||||
timestamp: Date
|
||||
}
|
||||
|
||||
interface AiChatProps {
|
||||
onClose?: () => void
|
||||
}
|
||||
|
||||
function AiChat({ onClose }: AiChatProps): JSX.Element {
|
||||
const [messages, setMessages] = useState<Message[]>([
|
||||
{
|
||||
role: 'system',
|
||||
content: 'You are a helpful AI assistant for the Managing trading platform. You can help users query their backtests, analyze trading strategies, and provide insights.',
|
||||
timestamp: new Date()
|
||||
}
|
||||
])
|
||||
const [input, setInput] = useState('')
|
||||
const [isLoading, setIsLoading] = useState(false)
|
||||
const [provider, setProvider] = useState<string>('auto')
|
||||
const [availableProviders, setAvailableProviders] = useState<string[]>([])
|
||||
const messagesEndRef = useRef<HTMLDivElement>(null)
|
||||
const { apiUrl, userToken } = useApiUrlStore()
|
||||
|
||||
useEffect(() => {
|
||||
scrollToBottom()
|
||||
}, [messages])
|
||||
|
||||
useEffect(() => {
|
||||
loadProviders()
|
||||
}, [])
|
||||
|
||||
const scrollToBottom = () => {
|
||||
messagesEndRef.current?.scrollIntoView({ behavior: 'smooth' })
|
||||
}
|
||||
|
||||
const loadProviders = async () => {
|
||||
try {
|
||||
const llmClient = new LlmClient({}, apiUrl)
|
||||
const service = new AiChatService(llmClient)
|
||||
const providers = await service.getProviders()
|
||||
setAvailableProviders(['auto', ...providers])
|
||||
} catch (error) {
|
||||
console.error('Failed to load providers:', error)
|
||||
}
|
||||
}
|
||||
|
||||
const sendMessage = async () => {
|
||||
if (!input.trim() || isLoading) return
|
||||
|
||||
const userMessage: Message = {
|
||||
role: 'user',
|
||||
content: input,
|
||||
timestamp: new Date()
|
||||
}
|
||||
|
||||
setMessages(prev => [...prev, userMessage])
|
||||
setInput('')
|
||||
setIsLoading(true)
|
||||
|
||||
try {
|
||||
const llmClient = new LlmClient({}, apiUrl)
|
||||
const service = new AiChatService(llmClient)
|
||||
|
||||
// Convert messages to LlmMessage format
|
||||
const llmMessages: LlmMessage[] = messages
|
||||
.filter(m => m.role !== 'system' || messages.indexOf(m) === 0) // Include only first system message
|
||||
.map(m => ({
|
||||
role: m.role,
|
||||
content: m.content,
|
||||
toolCalls: undefined,
|
||||
toolCallId: undefined
|
||||
}))
|
||||
|
||||
// Add the new user message
|
||||
llmMessages.push({
|
||||
role: 'user',
|
||||
content: input,
|
||||
toolCalls: undefined,
|
||||
toolCallId: undefined
|
||||
})
|
||||
|
||||
const response: LlmChatResponse = await service.sendMessage(
|
||||
llmMessages,
|
||||
provider === 'auto' ? undefined : provider
|
||||
)
|
||||
|
||||
const assistantMessage: Message = {
|
||||
role: 'assistant',
|
||||
content: response.content || 'No response from AI',
|
||||
timestamp: new Date()
|
||||
}
|
||||
|
||||
setMessages(prev => [...prev, assistantMessage])
|
||||
} catch (error: any) {
|
||||
console.error('Error sending message:', error)
|
||||
const errorMessage: Message = {
|
||||
role: 'assistant',
|
||||
content: `Error: ${error?.message || 'Failed to get response from AI'}`,
|
||||
timestamp: new Date()
|
||||
}
|
||||
setMessages(prev => [...prev, errorMessage])
|
||||
} finally {
|
||||
setIsLoading(false)
|
||||
}
|
||||
}
|
||||
|
||||
const handleKeyPress = (e: React.KeyboardEvent<HTMLTextAreaElement>) => {
|
||||
if (e.key === 'Enter' && !e.shiftKey) {
|
||||
e.preventDefault()
|
||||
sendMessage()
|
||||
}
|
||||
}
|
||||
|
||||
return (
|
||||
<div className="flex flex-col h-full bg-base-100">
|
||||
{/* Header */}
|
||||
<div className="flex items-center justify-between p-4 border-b border-base-300">
|
||||
<div className="flex items-center gap-3">
|
||||
<div className="w-8 h-8 bg-primary rounded-full flex items-center justify-center">
|
||||
<svg className="w-5 h-5 text-primary-content" fill="none" stroke="currentColor" viewBox="0 0 24 24">
|
||||
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M9.663 17h4.673M12 3v1m6.364 1.636l-.707.707M21 12h-1M4 12H3m3.343-5.657l-.707-.707m2.828 9.9a5 5 0 117.072 0l-.548.547A3.374 3.374 0 0014 18.469V19a2 2 0 11-4 0v-.531c0-.895-.356-1.754-.988-2.386l-.548-.547z" />
|
||||
</svg>
|
||||
</div>
|
||||
<div>
|
||||
<h2 className="font-bold text-lg">AI Assistant</h2>
|
||||
<p className="text-sm text-base-content/60">Powered by MCP</p>
|
||||
</div>
|
||||
</div>
|
||||
<div className="flex items-center gap-2">
|
||||
{/* Provider Selection */}
|
||||
<select
|
||||
value={provider}
|
||||
onChange={(e) => setProvider(e.target.value)}
|
||||
className="select select-sm select-bordered"
|
||||
disabled={isLoading}
|
||||
>
|
||||
{availableProviders.map(p => (
|
||||
<option key={p} value={p}>
|
||||
{p === 'auto' ? 'Auto (Backend Selects)' : p.charAt(0).toUpperCase() + p.slice(1)}
|
||||
</option>
|
||||
))}
|
||||
</select>
|
||||
{onClose && (
|
||||
<button onClick={onClose} className="btn btn-sm btn-ghost btn-circle">
|
||||
<svg className="w-5 h-5" fill="none" stroke="currentColor" viewBox="0 0 24 24">
|
||||
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M6 18L18 6M6 6l12 12" />
|
||||
</svg>
|
||||
</button>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Messages */}
|
||||
<div className="flex-1 overflow-y-auto p-4 space-y-4">
|
||||
{messages.filter(m => m.role !== 'system').map((message, index) => (
|
||||
<div
|
||||
key={index}
|
||||
className={`flex ${message.role === 'user' ? 'justify-end' : 'justify-start'}`}
|
||||
>
|
||||
<div
|
||||
className={`max-w-[80%] p-3 rounded-lg ${
|
||||
message.role === 'user'
|
||||
? 'bg-primary text-primary-content'
|
||||
: 'bg-base-200 text-base-content'
|
||||
}`}
|
||||
>
|
||||
<p className="whitespace-pre-wrap break-words">{message.content}</p>
|
||||
<p className="text-xs opacity-60 mt-1">
|
||||
{message.timestamp.toLocaleTimeString()}
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
))}
|
||||
{isLoading && (
|
||||
<div className="flex justify-start">
|
||||
<div className="bg-base-200 p-3 rounded-lg">
|
||||
<div className="flex gap-1">
|
||||
<span className="loading loading-dots loading-sm"></span>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
<div ref={messagesEndRef} />
|
||||
</div>
|
||||
|
||||
{/* Input */}
|
||||
<div className="p-4 border-t border-base-300">
|
||||
<div className="flex gap-2">
|
||||
<textarea
|
||||
value={input}
|
||||
onChange={(e) => setInput(e.target.value)}
|
||||
onKeyPress={handleKeyPress}
|
||||
placeholder="Ask me anything about your backtests..."
|
||||
className="textarea textarea-bordered flex-1 resize-none"
|
||||
rows={2}
|
||||
disabled={isLoading}
|
||||
/>
|
||||
<button
|
||||
onClick={sendMessage}
|
||||
disabled={isLoading || !input.trim()}
|
||||
className="btn btn-primary"
|
||||
>
|
||||
<svg className="w-5 h-5" fill="none" stroke="currentColor" viewBox="0 0 24 24">
|
||||
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M12 19l9 2-9-18-9 18 9-2zm0 0v-8" />
|
||||
</svg>
|
||||
</button>
|
||||
</div>
|
||||
<p className="text-xs text-base-content/60 mt-2">
|
||||
Press Enter to send, Shift+Enter for new line
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
export default AiChat
|
||||
32
src/Managing.WebApp/src/components/organism/AiChatButton.tsx
Normal file
32
src/Managing.WebApp/src/components/organism/AiChatButton.tsx
Normal file
@@ -0,0 +1,32 @@
|
||||
import { useState } from 'react'
|
||||
import AiChat from './AiChat'
|
||||
|
||||
function AiChatButton(): JSX.Element {
|
||||
const [isOpen, setIsOpen] = useState(false)
|
||||
|
||||
return (
|
||||
<>
|
||||
{/* Floating Chat Button */}
|
||||
{!isOpen && (
|
||||
<button
|
||||
onClick={() => setIsOpen(true)}
|
||||
className="fixed bottom-6 right-6 btn btn-circle btn-primary btn-lg shadow-lg z-50 hover:scale-110 transition-transform"
|
||||
aria-label="Open AI Chat"
|
||||
>
|
||||
<svg className="w-6 h-6" fill="none" stroke="currentColor" viewBox="0 0 24 24">
|
||||
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M8 10h.01M12 10h.01M16 10h.01M9 16H5a2 2 0 01-2-2V6a2 2 0 012-2h14a2 2 0 012 2v8a2 2 0 01-2 2h-5l-5 5v-5z" />
|
||||
</svg>
|
||||
</button>
|
||||
)}
|
||||
|
||||
{/* Chat Window */}
|
||||
{isOpen && (
|
||||
<div className="fixed bottom-6 right-6 w-[400px] h-[600px] bg-base-100 rounded-lg shadow-2xl z-50 border border-base-300 flex flex-col overflow-hidden">
|
||||
<AiChat onClose={() => setIsOpen(false)} />
|
||||
</div>
|
||||
)}
|
||||
</>
|
||||
)
|
||||
}
|
||||
|
||||
export default AiChatButton
|
||||
@@ -2899,6 +2899,127 @@ export class JobClient extends AuthorizedApiBase {
|
||||
}
|
||||
}
|
||||
|
||||
export class LlmClient extends AuthorizedApiBase {
|
||||
private http: { fetch(url: RequestInfo, init?: RequestInit): Promise<Response> };
|
||||
private baseUrl: string;
|
||||
protected jsonParseReviver: ((key: string, value: any) => any) | undefined = undefined;
|
||||
|
||||
constructor(configuration: IConfig, baseUrl?: string, http?: { fetch(url: RequestInfo, init?: RequestInit): Promise<Response> }) {
|
||||
super(configuration);
|
||||
this.http = http ? http : window as any;
|
||||
this.baseUrl = baseUrl ?? "http://localhost:5000";
|
||||
}
|
||||
|
||||
llm_Chat(request: LlmChatRequest): Promise<LlmChatResponse> {
|
||||
let url_ = this.baseUrl + "/Llm/Chat";
|
||||
url_ = url_.replace(/[?&]$/, "");
|
||||
|
||||
const content_ = JSON.stringify(request);
|
||||
|
||||
let options_: RequestInit = {
|
||||
body: content_,
|
||||
method: "POST",
|
||||
headers: {
|
||||
"Content-Type": "application/json",
|
||||
"Accept": "application/json"
|
||||
}
|
||||
};
|
||||
|
||||
return this.transformOptions(options_).then(transformedOptions_ => {
|
||||
return this.http.fetch(url_, transformedOptions_);
|
||||
}).then((_response: Response) => {
|
||||
return this.processLlm_Chat(_response);
|
||||
});
|
||||
}
|
||||
|
||||
protected processLlm_Chat(response: Response): Promise<LlmChatResponse> {
|
||||
const status = response.status;
|
||||
let _headers: any = {}; if (response.headers && response.headers.forEach) { response.headers.forEach((v: any, k: any) => _headers[k] = v); };
|
||||
if (status === 200) {
|
||||
return response.text().then((_responseText) => {
|
||||
let result200: any = null;
|
||||
result200 = _responseText === "" ? null : JSON.parse(_responseText, this.jsonParseReviver) as LlmChatResponse;
|
||||
return result200;
|
||||
});
|
||||
} else if (status !== 200 && status !== 204) {
|
||||
return response.text().then((_responseText) => {
|
||||
return throwException("An unexpected server error occurred.", status, _responseText, _headers);
|
||||
});
|
||||
}
|
||||
return Promise.resolve<LlmChatResponse>(null as any);
|
||||
}
|
||||
|
||||
llm_GetProviders(): Promise<string[]> {
|
||||
let url_ = this.baseUrl + "/Llm/Providers";
|
||||
url_ = url_.replace(/[?&]$/, "");
|
||||
|
||||
let options_: RequestInit = {
|
||||
method: "GET",
|
||||
headers: {
|
||||
"Accept": "application/json"
|
||||
}
|
||||
};
|
||||
|
||||
return this.transformOptions(options_).then(transformedOptions_ => {
|
||||
return this.http.fetch(url_, transformedOptions_);
|
||||
}).then((_response: Response) => {
|
||||
return this.processLlm_GetProviders(_response);
|
||||
});
|
||||
}
|
||||
|
||||
protected processLlm_GetProviders(response: Response): Promise<string[]> {
|
||||
const status = response.status;
|
||||
let _headers: any = {}; if (response.headers && response.headers.forEach) { response.headers.forEach((v: any, k: any) => _headers[k] = v); };
|
||||
if (status === 200) {
|
||||
return response.text().then((_responseText) => {
|
||||
let result200: any = null;
|
||||
result200 = _responseText === "" ? null : JSON.parse(_responseText, this.jsonParseReviver) as string[];
|
||||
return result200;
|
||||
});
|
||||
} else if (status !== 200 && status !== 204) {
|
||||
return response.text().then((_responseText) => {
|
||||
return throwException("An unexpected server error occurred.", status, _responseText, _headers);
|
||||
});
|
||||
}
|
||||
return Promise.resolve<string[]>(null as any);
|
||||
}
|
||||
|
||||
llm_GetTools(): Promise<McpToolDefinition[]> {
|
||||
let url_ = this.baseUrl + "/Llm/Tools";
|
||||
url_ = url_.replace(/[?&]$/, "");
|
||||
|
||||
let options_: RequestInit = {
|
||||
method: "GET",
|
||||
headers: {
|
||||
"Accept": "application/json"
|
||||
}
|
||||
};
|
||||
|
||||
return this.transformOptions(options_).then(transformedOptions_ => {
|
||||
return this.http.fetch(url_, transformedOptions_);
|
||||
}).then((_response: Response) => {
|
||||
return this.processLlm_GetTools(_response);
|
||||
});
|
||||
}
|
||||
|
||||
protected processLlm_GetTools(response: Response): Promise<McpToolDefinition[]> {
|
||||
const status = response.status;
|
||||
let _headers: any = {}; if (response.headers && response.headers.forEach) { response.headers.forEach((v: any, k: any) => _headers[k] = v); };
|
||||
if (status === 200) {
|
||||
return response.text().then((_responseText) => {
|
||||
let result200: any = null;
|
||||
result200 = _responseText === "" ? null : JSON.parse(_responseText, this.jsonParseReviver) as McpToolDefinition[];
|
||||
return result200;
|
||||
});
|
||||
} else if (status !== 200 && status !== 204) {
|
||||
return response.text().then((_responseText) => {
|
||||
return throwException("An unexpected server error occurred.", status, _responseText, _headers);
|
||||
});
|
||||
}
|
||||
return Promise.resolve<McpToolDefinition[]>(null as any);
|
||||
}
|
||||
}
|
||||
|
||||
export class MoneyManagementClient extends AuthorizedApiBase {
|
||||
private http: { fetch(url: RequestInfo, init?: RequestInit): Promise<Response> };
|
||||
private baseUrl: string;
|
||||
@@ -4388,6 +4509,45 @@ export class UserClient extends AuthorizedApiBase {
|
||||
return Promise.resolve<User>(null as any);
|
||||
}
|
||||
|
||||
user_UpdateDefaultLlmProvider(defaultLlmProvider: string): Promise<User> {
|
||||
let url_ = this.baseUrl + "/User/default-llm-provider";
|
||||
url_ = url_.replace(/[?&]$/, "");
|
||||
|
||||
const content_ = JSON.stringify(defaultLlmProvider);
|
||||
|
||||
let options_: RequestInit = {
|
||||
body: content_,
|
||||
method: "PUT",
|
||||
headers: {
|
||||
"Content-Type": "application/json",
|
||||
"Accept": "application/json"
|
||||
}
|
||||
};
|
||||
|
||||
return this.transformOptions(options_).then(transformedOptions_ => {
|
||||
return this.http.fetch(url_, transformedOptions_);
|
||||
}).then((_response: Response) => {
|
||||
return this.processUser_UpdateDefaultLlmProvider(_response);
|
||||
});
|
||||
}
|
||||
|
||||
protected processUser_UpdateDefaultLlmProvider(response: Response): Promise<User> {
|
||||
const status = response.status;
|
||||
let _headers: any = {}; if (response.headers && response.headers.forEach) { response.headers.forEach((v: any, k: any) => _headers[k] = v); };
|
||||
if (status === 200) {
|
||||
return response.text().then((_responseText) => {
|
||||
let result200: any = null;
|
||||
result200 = _responseText === "" ? null : JSON.parse(_responseText, this.jsonParseReviver) as User;
|
||||
return result200;
|
||||
});
|
||||
} else if (status !== 200 && status !== 204) {
|
||||
return response.text().then((_responseText) => {
|
||||
return throwException("An unexpected server error occurred.", status, _responseText, _headers);
|
||||
});
|
||||
}
|
||||
return Promise.resolve<User>(null as any);
|
||||
}
|
||||
|
||||
user_TestTelegramChannel(): Promise<string> {
|
||||
let url_ = this.baseUrl + "/User/telegram-channel/test";
|
||||
url_ = url_.replace(/[?&]$/, "");
|
||||
@@ -4690,6 +4850,7 @@ export interface User {
|
||||
signalAgreementThreshold?: number | null;
|
||||
allowSignalTrendOverride?: boolean | null;
|
||||
defaultExchange?: TradingExchanges | null;
|
||||
defaultLlmProvider?: LlmProvider | null;
|
||||
}
|
||||
|
||||
export enum Confidence {
|
||||
@@ -4699,6 +4860,13 @@ export enum Confidence {
|
||||
None = "None",
|
||||
}
|
||||
|
||||
export enum LlmProvider {
|
||||
Auto = "Auto",
|
||||
Gemini = "Gemini",
|
||||
OpenAI = "OpenAI",
|
||||
Claude = "Claude",
|
||||
}
|
||||
|
||||
export interface Balance {
|
||||
tokenImage?: string | null;
|
||||
tokenName?: string | null;
|
||||
@@ -6064,6 +6232,57 @@ export interface JobStatusTypeSummary {
|
||||
count?: number;
|
||||
}
|
||||
|
||||
export interface LlmChatResponse {
|
||||
content?: string;
|
||||
provider?: string;
|
||||
model?: string;
|
||||
toolCalls?: LlmToolCall[] | null;
|
||||
usage?: LlmUsage | null;
|
||||
requiresToolExecution?: boolean;
|
||||
}
|
||||
|
||||
export interface LlmToolCall {
|
||||
id?: string;
|
||||
name?: string;
|
||||
arguments?: { [key: string]: any; };
|
||||
}
|
||||
|
||||
export interface LlmUsage {
|
||||
promptTokens?: number;
|
||||
completionTokens?: number;
|
||||
totalTokens?: number;
|
||||
}
|
||||
|
||||
export interface LlmChatRequest {
|
||||
messages?: LlmMessage[];
|
||||
provider?: string | null;
|
||||
apiKey?: string | null;
|
||||
stream?: boolean;
|
||||
temperature?: number;
|
||||
maxTokens?: number;
|
||||
tools?: McpToolDefinition[] | null;
|
||||
}
|
||||
|
||||
export interface LlmMessage {
|
||||
role?: string;
|
||||
content?: string;
|
||||
toolCalls?: LlmToolCall[] | null;
|
||||
toolCallId?: string | null;
|
||||
}
|
||||
|
||||
export interface McpToolDefinition {
|
||||
name?: string;
|
||||
description?: string;
|
||||
parameters?: { [key: string]: McpParameterDefinition; };
|
||||
}
|
||||
|
||||
export interface McpParameterDefinition {
|
||||
type?: string;
|
||||
description?: string;
|
||||
required?: boolean;
|
||||
defaultValue?: any | null;
|
||||
}
|
||||
|
||||
export interface ScenarioViewModel {
|
||||
name: string;
|
||||
indicators: IndicatorViewModel[];
|
||||
|
||||
@@ -61,6 +61,7 @@ export interface User {
|
||||
signalAgreementThreshold?: number | null;
|
||||
allowSignalTrendOverride?: boolean | null;
|
||||
defaultExchange?: TradingExchanges | null;
|
||||
defaultLlmProvider?: LlmProvider | null;
|
||||
}
|
||||
|
||||
export enum Confidence {
|
||||
@@ -70,6 +71,13 @@ export enum Confidence {
|
||||
None = "None",
|
||||
}
|
||||
|
||||
export enum LlmProvider {
|
||||
Auto = "Auto",
|
||||
Gemini = "Gemini",
|
||||
OpenAI = "OpenAI",
|
||||
Claude = "Claude",
|
||||
}
|
||||
|
||||
export interface Balance {
|
||||
tokenImage?: string | null;
|
||||
tokenName?: string | null;
|
||||
@@ -1435,6 +1443,57 @@ export interface JobStatusTypeSummary {
|
||||
count?: number;
|
||||
}
|
||||
|
||||
export interface LlmChatResponse {
|
||||
content?: string;
|
||||
provider?: string;
|
||||
model?: string;
|
||||
toolCalls?: LlmToolCall[] | null;
|
||||
usage?: LlmUsage | null;
|
||||
requiresToolExecution?: boolean;
|
||||
}
|
||||
|
||||
export interface LlmToolCall {
|
||||
id?: string;
|
||||
name?: string;
|
||||
arguments?: { [key: string]: any; };
|
||||
}
|
||||
|
||||
export interface LlmUsage {
|
||||
promptTokens?: number;
|
||||
completionTokens?: number;
|
||||
totalTokens?: number;
|
||||
}
|
||||
|
||||
export interface LlmChatRequest {
|
||||
messages?: LlmMessage[];
|
||||
provider?: string | null;
|
||||
apiKey?: string | null;
|
||||
stream?: boolean;
|
||||
temperature?: number;
|
||||
maxTokens?: number;
|
||||
tools?: McpToolDefinition[] | null;
|
||||
}
|
||||
|
||||
export interface LlmMessage {
|
||||
role?: string;
|
||||
content?: string;
|
||||
toolCalls?: LlmToolCall[] | null;
|
||||
toolCallId?: string | null;
|
||||
}
|
||||
|
||||
export interface McpToolDefinition {
|
||||
name?: string;
|
||||
description?: string;
|
||||
parameters?: { [key: string]: McpParameterDefinition; };
|
||||
}
|
||||
|
||||
export interface McpParameterDefinition {
|
||||
type?: string;
|
||||
description?: string;
|
||||
required?: boolean;
|
||||
defaultValue?: any | null;
|
||||
}
|
||||
|
||||
export interface ScenarioViewModel {
|
||||
name: string;
|
||||
indicators: IndicatorViewModel[];
|
||||
|
||||
43
src/Managing.WebApp/src/services/aiChatService.ts
Normal file
43
src/Managing.WebApp/src/services/aiChatService.ts
Normal file
@@ -0,0 +1,43 @@
|
||||
import { LlmClient } from '../generated/ManagingApi'
|
||||
import { LlmChatRequest, LlmChatResponse, LlmMessage } from '../generated/ManagingApiTypes'
|
||||
|
||||
export class AiChatService {
|
||||
private llmClient: LlmClient
|
||||
|
||||
constructor(llmClient: LlmClient) {
|
||||
this.llmClient = llmClient
|
||||
}
|
||||
|
||||
/**
|
||||
* Send a chat message to the AI with MCP tool calling support
|
||||
*/
|
||||
async sendMessage(messages: LlmMessage[], provider?: string, apiKey?: string): Promise<LlmChatResponse> {
|
||||
const request: LlmChatRequest = {
|
||||
messages,
|
||||
provider: provider || 'auto',
|
||||
apiKey: apiKey,
|
||||
stream: false,
|
||||
temperature: 0.7,
|
||||
maxTokens: 4096,
|
||||
tools: undefined // Will be populated by backend
|
||||
}
|
||||
|
||||
return await this.llmClient.llm_Chat(request)
|
||||
}
|
||||
|
||||
/**
|
||||
* Get available LLM providers
|
||||
*/
|
||||
async getProviders(): Promise<string[]> {
|
||||
return await this.llmClient.llm_GetProviders()
|
||||
}
|
||||
|
||||
/**
|
||||
* Get available MCP tools
|
||||
*/
|
||||
async getTools(): Promise<any[]> {
|
||||
return await this.llmClient.llm_GetTools()
|
||||
}
|
||||
}
|
||||
|
||||
export default AiChatService
|
||||
Reference in New Issue
Block a user