Update configuration settings for Axiom 2.0 Agent; Create Axiom Agent class for creating the agent
Browse files- src/axiom/agent.py +70 -0
- src/axiom/config.py +4 -5
- src/axiom/prompts.py +2 -0
src/axiom/agent.py
ADDED
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from openai import AsyncOpenAI
|
2 |
+
from openai.types.responses import ResponseTextDeltaEvent
|
3 |
+
|
4 |
+
from agents import (
|
5 |
+
Agent,
|
6 |
+
OpenAIChatCompletionsModel,
|
7 |
+
RunConfig,
|
8 |
+
Runner,
|
9 |
+
Tool,
|
10 |
+
)
|
11 |
+
from agents.mcp import MCPServer
|
12 |
+
|
13 |
+
from .config import settings
|
14 |
+
from .prompts import AXIOM_AGENT_PROMPT
|
15 |
+
|
16 |
+
from types import AsyncGeneratorType
|
17 |
+
|
18 |
+
class AxiomAgent:
|
19 |
+
def __init__(
|
20 |
+
self,
|
21 |
+
model: str | None = None,
|
22 |
+
tools: list[Tool] | None = None,
|
23 |
+
mcp_servers: list[MCPServer] | None = None,
|
24 |
+
):
|
25 |
+
self._api_key = settings.GOOGLE_API_KEY
|
26 |
+
self.base_url = settings.BASE_URL
|
27 |
+
self.model = model if model else settings.DEFAULT_MODEL
|
28 |
+
|
29 |
+
self.agent = Agent(
|
30 |
+
name="Axiom 2.0",
|
31 |
+
instructions=AXIOM_AGENT_PROMPT,
|
32 |
+
mcp_servers=mcp_servers,
|
33 |
+
tools=tools,
|
34 |
+
)
|
35 |
+
|
36 |
+
def _get_model_config(self):
|
37 |
+
|
38 |
+
client = AsyncOpenAI(
|
39 |
+
api_key=self._api_key,
|
40 |
+
base_url=self.base_url,
|
41 |
+
)
|
42 |
+
model = OpenAIChatCompletionsModel(model=self.model, openai_client=client)
|
43 |
+
return RunConfig(
|
44 |
+
model=model,
|
45 |
+
model_provider=client,
|
46 |
+
tracing_disabled=True,
|
47 |
+
)
|
48 |
+
|
49 |
+
async def run_agent(self, input: str | list[dict[str, str]]):
|
50 |
+
config = self._get_model_config()
|
51 |
+
|
52 |
+
result = await Runner.run(
|
53 |
+
starting_agent=self.agent,
|
54 |
+
input=input,
|
55 |
+
run_config=config
|
56 |
+
)
|
57 |
+
return result.final_output
|
58 |
+
|
59 |
+
async def stream_agent(self, input: str | list[dict[str, str]]) -> AsyncGenerator:
|
60 |
+
config = self._get_model_config()
|
61 |
+
|
62 |
+
result = await Runner.run_streamed(
|
63 |
+
starting_agent=self.agent,
|
64 |
+
input=input,
|
65 |
+
run_config=config
|
66 |
+
)
|
67 |
+
async for event in result.stream_events():
|
68 |
+
if event.type == "raw_response_event" and isinstance(event.data, ResponseTextDeltaEvent):
|
69 |
+
if token:= event.data.delta or "":
|
70 |
+
yield token
|
src/axiom/config.py
CHANGED
@@ -12,8 +12,9 @@ class Settings(BaseSettings):
|
|
12 |
"""
|
13 |
Configuration settings for the Axiom 2.0 Agent.
|
14 |
"""
|
15 |
-
|
16 |
-
|
|
|
17 |
|
18 |
AVAILABLE_MODELS: list[str] = [
|
19 |
"gemini-2.0-flash",
|
@@ -21,10 +22,8 @@ class Settings(BaseSettings):
|
|
21 |
"gemini-2.0-flash-thinking-exp-1219",
|
22 |
"gemini-2.5-pro-exp-03-25",
|
23 |
"gemini-2.5-flash-preview-04-17",
|
24 |
-
|
25 |
]
|
26 |
-
|
27 |
-
|
28 |
MAX_DOCS_TOKEN_LIMIT: int = 20000 # Maximum tokens to retrieve from the documentations
|
29 |
|
30 |
settings = Settings()
|
|
|
12 |
"""
|
13 |
Configuration settings for the Axiom 2.0 Agent.
|
14 |
"""
|
15 |
+
GOOGLE_API_KEY: str
|
16 |
+
DEFAULT_MODEL: str = "gemini-2.0-flash"
|
17 |
+
BASE_URL: str = "https://generativelanguage.googleapis.com/v1beta/openai/"
|
18 |
|
19 |
AVAILABLE_MODELS: list[str] = [
|
20 |
"gemini-2.0-flash",
|
|
|
22 |
"gemini-2.0-flash-thinking-exp-1219",
|
23 |
"gemini-2.5-pro-exp-03-25",
|
24 |
"gemini-2.5-flash-preview-04-17",
|
|
|
25 |
]
|
26 |
+
|
|
|
27 |
MAX_DOCS_TOKEN_LIMIT: int = 20000 # Maximum tokens to retrieve from the documentations
|
28 |
|
29 |
settings = Settings()
|
src/axiom/prompts.py
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
AXIOM_AGENT_PROMPT="""
|
2 |
+
"""
|