feat: create Chainlit app for using agent.
Browse files- chainlit_ui.py +54 -0
- src/axiom/agent.py +5 -3
chainlit_ui.py
ADDED
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import chainlit as cl
|
2 |
+
|
3 |
+
from agents.mcp import MCPServerStdio
|
4 |
+
from src.axiom.agent import AxiomAgent
|
5 |
+
|
6 |
+
@cl.on_chat_start
|
7 |
+
async def on_chat_start():
|
8 |
+
|
9 |
+
server1 = MCPServerStdio(
|
10 |
+
name="Documentation MCP",
|
11 |
+
params={
|
12 |
+
"command": "npx",
|
13 |
+
"args": ['-y', '@upstash/context7-mcp@latest']
|
14 |
+
},
|
15 |
+
)
|
16 |
+
server2 = MCPServerStdio(
|
17 |
+
name="Sequential Thinking MCP",
|
18 |
+
params={
|
19 |
+
"command": "npx",
|
20 |
+
"args": ['-y', '@modelcontextprotocol/server-sequential-thinking']
|
21 |
+
},
|
22 |
+
)
|
23 |
+
try:
|
24 |
+
# Manually enter the async context
|
25 |
+
await server1.__aenter__()
|
26 |
+
await server2.__aenter__()
|
27 |
+
except Exception as e:
|
28 |
+
cl.Message(content=f"Failed to start MCP Server: {e}").send()
|
29 |
+
return
|
30 |
+
|
31 |
+
agent = AxiomAgent(mcp_servers=[server1, server2])
|
32 |
+
cl.user_session.set("axiom_agent", agent)
|
33 |
+
cl.user_session.set("chat_history", [])
|
34 |
+
|
35 |
+
@cl.on_message
|
36 |
+
async def on_message(message: cl.Message):
|
37 |
+
agent = cl.user_session.get("axiom_agent")
|
38 |
+
chat_history = cl.user_session.get("chat_history")
|
39 |
+
# Add user message to history
|
40 |
+
chat_history.append({"role": "user", "content": message.content})
|
41 |
+
cl.user_session.set("chat_history", chat_history)
|
42 |
+
|
43 |
+
response_generator = agent.stream_agent(chat_history)
|
44 |
+
|
45 |
+
full_response = ""
|
46 |
+
msg = cl.Message(content="") # Initialize an empty message for streaming
|
47 |
+
|
48 |
+
async for token in response_generator:
|
49 |
+
full_response += token
|
50 |
+
await msg.stream_token(token) # Stream tokens to the UI
|
51 |
+
await msg.send() # Send the final message
|
52 |
+
|
53 |
+
chat_history.append({"role": "assistant", "content": full_response})
|
54 |
+
cl.user_session.set("chat_history", chat_history) # Update chat history
|
src/axiom/agent.py
CHANGED
@@ -22,14 +22,16 @@ class AxiomAgent:
|
|
22 |
model: Optional[str] = None,
|
23 |
tools: Optional[list[Tool]] = None,
|
24 |
mcp_servers: Optional[list[MCPServer]] = None,
|
|
|
|
|
25 |
):
|
26 |
-
self._api_key = settings.GOOGLE_API_KEY
|
27 |
-
self.base_url = settings.BASE_URL
|
28 |
self.model_name = model or settings.DEFAULT_MODEL
|
29 |
|
30 |
self._client: AsyncOpenAI = AsyncOpenAI(
|
31 |
api_key=self._api_key,
|
32 |
-
base_url=self.
|
33 |
)
|
34 |
|
35 |
self.agent = Agent(
|
|
|
22 |
model: Optional[str] = None,
|
23 |
tools: Optional[list[Tool]] = None,
|
24 |
mcp_servers: Optional[list[MCPServer]] = None,
|
25 |
+
api_key: Optional[str] = None,
|
26 |
+
base_url: Optional[str] = None,
|
27 |
):
|
28 |
+
self._api_key = api_key or settings.GOOGLE_API_KEY
|
29 |
+
self.base_url = base_url or settings.BASE_URL
|
30 |
self.model_name = model or settings.DEFAULT_MODEL
|
31 |
|
32 |
self._client: AsyncOpenAI = AsyncOpenAI(
|
33 |
api_key=self._api_key,
|
34 |
+
base_url=self.base_url,
|
35 |
)
|
36 |
|
37 |
self.agent = Agent(
|