Spaces:
Runtime error
Runtime error
""" | |
Simple Reasoning and Action Agent using LangGraph and LangChain | |
This agent follows a standard reasoning pattern: | |
1. Think - Analyze the input and determine an approach | |
2. Select - Choose appropriate tools from available options | |
3. Act - Use the selected tools | |
4. Observe - Review results | |
5. Conclude - Generate final response | |
""" | |
import os | |
from typing import Dict, List, Annotated, TypedDict, Union, Tuple, Any | |
from langchain_core.tools import BaseTool | |
from langchain_core.messages import AIMessage, HumanMessage, SystemMessage, ToolMessage | |
from langchain_core.prompts import ChatPromptTemplate | |
from langchain.tools.render import format_tool_to_openai_function | |
from langchain_openai import ChatOpenAI | |
from langchain_core.pydantic_v1 import BaseModel, Field | |
from langgraph.graph import StateGraph, END | |
from langgraph.prebuilt import ToolNode | |
from basic_tools import * | |
from utils import * | |
def get_available_tools(): | |
tools = [multiply, | |
multiply, add, subtract, divide, modulus, | |
wiki_search, web_search, arxiv_search, | |
python_repl, analyze_image, | |
date_filter, analyze_content, | |
step_by_step_reasoning, translate_text | |
] | |
return tools | |
# Define the agent state | |
class AgentState(TypedDict): | |
"""State for the reasoning and action agent.""" | |
messages: List[Union[AIMessage, HumanMessage, SystemMessage, ToolMessage]] | |
# We'll store intermediate steps of reasoning here | |
reasoning: List[str] | |
# Keep track of selected tools | |
selected_tools: List[str] | |
# Store tool results | |
tool_results: Dict[str, Any] | |
# | |
model = get_llm(provider="openai") | |
# System prompts | |
AGENT_SYSTEM_PROMPT = """You are a helpful reasoning and action agent. | |
Your job is to: | |
1. Carefully analyze the user's request | |
2. Think through the problem step by step | |
3. Select appropriate tools from your toolkit | |
4. Use those tools to address the request | |
5. Provide a clear, complete response | |
Available tools: | |
{tool_descriptions} | |
When you need to use a tool, select the most appropriate one based on your reasoning. | |
Always show your reasoning process clearly. | |
""" | |
# ============= Node Functions ============= | |
def think(state: AgentState) -> AgentState: | |
"""Think through the problem and analyze the user request.""" | |
# Extract the user's most recent message | |
user_message = state["messages"][-1] | |
if not isinstance(user_message, HumanMessage): | |
# If the last message isn't from the user, find the most recent one | |
for msg in reversed(state["messages"]): | |
if isinstance(msg, HumanMessage): | |
user_message = msg | |
break | |
# Create a prompt for thinking | |
think_prompt = ChatPromptTemplate.from_messages([ | |
SystemMessage( | |
content="You are analyzing a user request. Think step by step about what the user is asking for and what approach would be best."), | |
("user", "{input}") | |
]) | |
# Generate thinking output | |
think_response = model.invoke( | |
think_prompt.format_messages(input=user_message.content) | |
) | |
# Update state with reasoning | |
reasoning = think_response.content | |
state["reasoning"] = state.get("reasoning", []) + [reasoning] | |
return state | |
def select_tools(state: AgentState) -> AgentState: | |
"""Select appropriate tools based on the reasoning.""" | |
# Get available tools | |
tools = get_available_tools() | |
tool_descriptions = "\n".join( | |
[f"- {tool.name}: {tool.description}" for tool in tools]) | |
# Create a prompt for tool selection | |
select_prompt = ChatPromptTemplate.from_messages([ | |
SystemMessage(content=f"""Based on your analysis, select which tools would be most helpful for this task. | |
Available tools: | |
{tool_descriptions} | |
Return your selection as a comma-separated list of tool names, e.g., "calculator,web_search". | |
Only include tools that are actually needed for this specific request."""), | |
("user", "{reasoning}") | |
]) | |
# Generate tool selection output | |
select_response = model.invoke( | |
select_prompt.format_messages(reasoning=state["reasoning"][-1]) | |
) | |
# Parse the selected tools | |
selected_tools = [ | |
tool_name.strip() | |
for tool_name in select_response.content.split(',') | |
] | |
# Filter to ensure only valid tools are selected | |
valid_tool_names = [tool.name for tool in tools] | |
selected_tools = [ | |
tool for tool in selected_tools if tool in valid_tool_names] | |
# Update state with selected tools | |
state["selected_tools"] = selected_tools | |
# Add a single AIMessage with all tool calls (if any tools selected) | |
if selected_tools: | |
tool_calls = [ | |
{"id": f"call_{i}", "name": tool_name, "args": {}} | |
for i, tool_name in enumerate(selected_tools) | |
] | |
state["messages"].append( | |
AIMessage( | |
content="", | |
tool_calls=tool_calls | |
) | |
) | |
return state | |
# def execute_tools(state: AgentState) -> AgentState: | |
# """Execute the selected tools.""" | |
# # Get all available tools | |
# all_tools = get_available_tools() | |
# # Filter to only use selected tools | |
# selected_tool_names = state["selected_tools"] | |
# tools_to_use = [ | |
# tool for tool in all_tools if tool.name in selected_tool_names] | |
# # Create tool executor | |
# tool_executor = ToolExecutor(tools_to_use) | |
# # Get the most recent reasoning | |
# reasoning = state["reasoning"][-1] | |
# # For each tool, generate a specific input and execute | |
# tool_results = {} | |
# for tool in tools_to_use: | |
# # Create prompt for generating tool input | |
# tool_input_prompt = ChatPromptTemplate.from_messages([ | |
# SystemMessage(content=f"""Generate a specific input for the following tool: | |
# Tool: {tool.name} | |
# Description: {tool.description} | |
# The input should be formatted according to the tool's requirements and contain all necessary information. | |
# Return only the exact input string that should be passed to the tool, nothing else."""), | |
# ("user", "{reasoning}") | |
# ]) | |
# # Generate specific input for this tool | |
# tool_input_response = model.invoke( | |
# tool_input_prompt.format_messages(reasoning=reasoning) | |
# ) | |
# tool_input = tool_input_response.content.strip() | |
# try: | |
# # Execute the tool with the generated input | |
# result = tool_executor.invoke({tool.name: tool_input}) | |
# tool_results[tool.name] = result[tool.name] | |
# # Add tool message to conversation | |
# state["messages"].append( | |
# ToolMessage(content=str(result[tool.name]), name=tool.name) | |
# ) | |
# except Exception as e: | |
# # Handle errors | |
# tool_results[tool.name] = f"Error executing tool: {str(e)}" | |
# state["messages"].append( | |
# ToolMessage( | |
# content=f"Error executing tool: {str(e)}", name=tool.name) | |
# ) | |
# # Update state with tool results | |
# state["tool_results"] = tool_results | |
# return state | |
def generate_response(state: AgentState) -> AgentState: | |
"""Generate a final response based on reasoning and tool outputs.""" | |
# Prepare the context for response generation | |
tool_outputs = "\n".join([ | |
f"{tool_name}: {result}" | |
for tool_name, result in state.get("tool_results", {}).items() | |
]) | |
# Create prompt for response generation | |
response_prompt = ChatPromptTemplate.from_messages([ | |
SystemMessage(content="""Generate a helpful response to the user based on your reasoning and tool outputs. Give exact, to the point and concise one word or number as an answer. | |
No explanation is needed at all. Make sure that if numerical number is asked, you return only a number and nothing else. If you don't know the answer, make a guess from your training data, but don't return None. Return answer in only the language in which the question was asked."""), | |
("user", | |
"User request: {user_request}\n\nReasoning: {reasoning}\n\nTool outputs: {tool_outputs}") | |
]) | |
# Get original user request | |
user_request = None | |
for msg in reversed(state["messages"]): | |
if isinstance(msg, HumanMessage): | |
user_request = msg.content | |
break | |
# Generate final response | |
response = model.invoke( | |
response_prompt.format_messages( | |
user_request=user_request, | |
reasoning=state["reasoning"][-1], | |
tool_outputs=tool_outputs | |
) | |
) | |
# Add the AI response to messages | |
state["messages"].append(AIMessage(content=response.content)) | |
return state | |
# ============= Graph Definition ============= | |
def create_agent_graph(): | |
"""Create and configure the agent graph.""" | |
graph = StateGraph(AgentState) | |
graph.add_node("think", think) | |
graph.add_node("select_tools", select_tools) | |
tools = get_available_tools() | |
tool_node = ToolNode(tools) | |
graph.add_node("execute_tools", tool_node) | |
graph.add_node("generate_response", generate_response) | |
# Conditional edge: if no tools, skip execute_tools | |
def select_tools_next(state: AgentState): | |
if state["selected_tools"]: | |
return "execute_tools" | |
else: | |
return "generate_response" | |
graph.add_edge("think", "select_tools") | |
graph.add_conditional_edges("select_tools", select_tools_next) | |
graph.add_edge("execute_tools", "generate_response") | |
graph.add_edge("generate_response", END) | |
graph.set_entry_point("think") | |
return graph.compile() | |
# ============= Agent Interface ============= | |
class ReasoningAgent: | |
"""Reasoning and action agent main class.""" | |
def __init__(self): | |
self.graph = create_agent_graph() | |
# Initialize with system prompt | |
tools = get_available_tools() | |
tool_descriptions = "\n".join( | |
[f"- {tool.name}: {tool.description}" for tool in tools]) | |
self.messages = [ | |
SystemMessage(content=AGENT_SYSTEM_PROMPT.format( | |
tool_descriptions=tool_descriptions)) | |
] | |
def invoke(self, user_input: str) -> str: | |
"""Process user input and return response.""" | |
# Add user message to history | |
self.messages.append(HumanMessage(content=user_input)) | |
# Initialize state | |
state = {"messages": self.messages, "reasoning": [], | |
"selected_tools": [], "tool_results": {}} | |
# Run the graph | |
result = self.graph.invoke(state) | |
# Update messages | |
self.messages = result["messages"] | |
# Return the last AI message | |
for msg in reversed(result["messages"]): | |
if isinstance(msg, AIMessage): | |
return msg.content | |
# Fallback | |
return "I encountered an issue processing your request." | |
def __call__(self,*args, **kwargs): | |
"""Invoke the agent with user input.""" | |
return self.invoke(*args, **kwargs) | |
# Sample usage | |
if __name__ == "__main__": | |
agent = ReasoningAgent() | |
response = agent.invoke( | |
"What's the weather in New York today and should I take an umbrella?") | |
print(response) | |