Add gitignor
This commit is contained in:
@@ -1,4 +1,5 @@
|
||||
import json
|
||||
from typing import Generator
|
||||
|
||||
from mistralai.client import Mistral
|
||||
from . import config
|
||||
@@ -13,8 +14,12 @@ def reset_history() -> None:
|
||||
_history.clear()
|
||||
|
||||
|
||||
def chat(user_message: str) -> str:
|
||||
"""Envoie un message au LLM, gère les appels d'outils MCP et retourne la réponse."""
|
||||
def chat_stream(user_message: str) -> Generator[str, None, None]:
|
||||
"""Génère la réponse du LLM token par token via streaming.
|
||||
|
||||
Les appels d'outils MCP sont exécutés internement (sans streaming).
|
||||
Seule la réponse textuelle finale est streamée sous forme de chunks.
|
||||
"""
|
||||
from . import mcp_client
|
||||
|
||||
_history.append({"role": "user", "content": user_message})
|
||||
@@ -24,20 +29,30 @@ def chat(user_message: str) -> str:
|
||||
|
||||
while True:
|
||||
messages = [{"role": "system", "content": config.SYSTEM_PROMPT}] + _history
|
||||
|
||||
kwargs: dict = {"model": config.LLM_MODEL, "messages": messages}
|
||||
if tools:
|
||||
kwargs["tools"] = tools
|
||||
|
||||
response = _client.chat.complete(**kwargs)
|
||||
choice = response.choices[0]
|
||||
msg = choice.message
|
||||
accumulated_content = ""
|
||||
tool_calls_received = None
|
||||
|
||||
if msg.tool_calls:
|
||||
# 1. Ajouter le message assistant (avec les appels d'outils) à l'historique
|
||||
for event in _client.chat.stream(**kwargs):
|
||||
ch = event.data.choices[0]
|
||||
delta = ch.delta
|
||||
|
||||
# Yield text chunks (isinstance check guards against Unset sentinel)
|
||||
if isinstance(delta.content, str) and delta.content:
|
||||
accumulated_content += delta.content
|
||||
yield delta.content
|
||||
|
||||
if delta.tool_calls:
|
||||
tool_calls_received = delta.tool_calls
|
||||
|
||||
if tool_calls_received:
|
||||
# Append assistant turn with tool calls to history
|
||||
_history.append({
|
||||
"role": "assistant",
|
||||
"content": msg.content or "",
|
||||
"content": accumulated_content or "",
|
||||
"tool_calls": [
|
||||
{
|
||||
"id": tc.id,
|
||||
@@ -47,12 +62,12 @@ def chat(user_message: str) -> str:
|
||||
"arguments": tc.function.arguments,
|
||||
},
|
||||
}
|
||||
for tc in msg.tool_calls
|
||||
for tc in tool_calls_received
|
||||
],
|
||||
})
|
||||
|
||||
# 2. Exécuter chaque outil et ajouter les résultats
|
||||
for tc in msg.tool_calls:
|
||||
# Execute each tool and append results
|
||||
for tc in tool_calls_received:
|
||||
tool_name = tc.function.name
|
||||
try:
|
||||
args = (
|
||||
@@ -60,7 +75,7 @@ def chat(user_message: str) -> str:
|
||||
if isinstance(tc.function.arguments, str)
|
||||
else tc.function.arguments
|
||||
)
|
||||
print(f" 🔧 [MCP] {tool_name}({_short_args(args)})")
|
||||
print(f"\n 🔧 [MCP] {tool_name}({_short_args(args)})", flush=True)
|
||||
result = manager.call_tool(tool_name, args)
|
||||
except Exception as e:
|
||||
result = f"Erreur lors de l'appel à {tool_name} : {e}"
|
||||
@@ -70,13 +85,17 @@ def chat(user_message: str) -> str:
|
||||
"content": result,
|
||||
"tool_call_id": tc.id,
|
||||
})
|
||||
|
||||
# 3. Reboucler pour obtenir la réponse finale
|
||||
# Loop to get the next (final) response
|
||||
|
||||
else:
|
||||
reply = msg.content or ""
|
||||
_history.append({"role": "assistant", "content": reply})
|
||||
return reply
|
||||
# Pure text response — already yielded chunk by chunk; save to history
|
||||
_history.append({"role": "assistant", "content": accumulated_content})
|
||||
break
|
||||
|
||||
|
||||
def chat(user_message: str) -> str:
|
||||
"""Envoie un message au LLM et retourne la réponse complète (non-streaming)."""
|
||||
return "".join(chat_stream(user_message))
|
||||
|
||||
|
||||
def _short_args(args: dict) -> str:
|
||||
|
||||
Reference in New Issue
Block a user