ssandy_agents / react_agent.py
Sheshank Joshi
reasoning agent
9fced79
from basic_tools import *
from langgraph.prebuilt import create_react_agent
from utils import *
from langchain_core.messages import SystemMessage, HumanMessage
# Initial System message
system_message = SystemMessage(content="You are a helpful assistant. You are free to utilize the tools present and give back proper answer")
def main(search_query: str = "What is the capital of France?") -> None:
# Initialize the LLM (loaded from the lmstudio server running on localhost:1234)
llm = get_llm(provider="openai_local")
if llm:
web_search_tools = [multiply,
multiply, add, subtract, divide, modulus,
wiki_search, web_search, arxiv_search,
python_repl, analyze_image,
date_filter, analyze_content,
step_by_step_reasoning, translate_text
]
# Create a langgraph react agent with the LLM and tools.
web_search_agent = create_react_agent(
name="Web Search Agent",
model=llm.bind(system_message=system_message),
tools=web_search_tools,
response_format={
"title": "SearchResults",
"description": "Structured JSON object with search results",
"type": "object",
"properties": {
"results": {
"type": "array",
"items": {"type": "string"}
}
},
"required": ["results"]
}
)
# Provide a complete conversation history containing both a system and an initial user message.
# This allows the agent to have a valid first user message. But the message can't be in the form of messages but should be in the form of a dict.
# input_payload = {
# "messages": [
# {"role": "system", "content": system_message.content},
# {"role": "user", "content": f"{search_query}"}
# ]
# }
input_payload = {"messages": [
system_message, HumanMessage(content=f"{search_query}")]}
results = web_search_agent.invoke(input_payload)
print(results)
if __name__ == "__main__":
main("can you find out what is the best place to visit in France")