File size: 11,289 Bytes
9fced79
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
72ae3ec
 
9fced79
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
"""
Simple Reasoning and Action Agent using LangGraph and LangChain

This agent follows a standard reasoning pattern:
1. Think - Analyze the input and determine an approach
2. Select - Choose appropriate tools from available options
3. Act - Use the selected tools
4. Observe - Review results
5. Conclude - Generate final response
"""

import os
from typing import Dict, List, Annotated, TypedDict, Union, Tuple, Any

from langchain_core.tools import BaseTool
from langchain_core.messages import AIMessage, HumanMessage, SystemMessage, ToolMessage
from langchain_core.prompts import ChatPromptTemplate
from langchain.tools.render import format_tool_to_openai_function
from langchain_openai import ChatOpenAI
from langchain_core.pydantic_v1 import BaseModel, Field

from langgraph.graph import StateGraph, END
from langgraph.prebuilt import ToolNode


from basic_tools import *
from utils import *

def get_available_tools():
    tools = [multiply,
             multiply, add, subtract, divide, modulus,
             wiki_search, web_search, arxiv_search,
             python_repl, analyze_image,
             date_filter, analyze_content,
             step_by_step_reasoning, translate_text
             ]
    return tools


# Define the agent state
class AgentState(TypedDict):
    """State for the reasoning and action agent."""
    messages: List[Union[AIMessage, HumanMessage, SystemMessage, ToolMessage]]
    # We'll store intermediate steps of reasoning here
    reasoning: List[str]
    # Keep track of selected tools
    selected_tools: List[str]
    # Store tool results
    tool_results: Dict[str, Any]


# 
model = get_llm(provider="openai")

# System prompts
AGENT_SYSTEM_PROMPT = """You are a helpful reasoning and action agent.
Your job is to:
1. Carefully analyze the user's request
2. Think through the problem step by step
3. Select appropriate tools from your toolkit
4. Use those tools to address the request
5. Provide a clear, complete response

Available tools:
{tool_descriptions}

When you need to use a tool, select the most appropriate one based on your reasoning.
Always show your reasoning process clearly.
"""


# ============= Node Functions =============

def think(state: AgentState) -> AgentState:
    """Think through the problem and analyze the user request."""

    # Extract the user's most recent message
    user_message = state["messages"][-1]
    if not isinstance(user_message, HumanMessage):
        # If the last message isn't from the user, find the most recent one
        for msg in reversed(state["messages"]):
            if isinstance(msg, HumanMessage):
                user_message = msg
                break

    # Create a prompt for thinking
    think_prompt = ChatPromptTemplate.from_messages([
        SystemMessage(
            content="You are analyzing a user request. Think step by step about what the user is asking for and what approach would be best."),
        ("user", "{input}")
    ])

    # Generate thinking output
    think_response = model.invoke(
        think_prompt.format_messages(input=user_message.content)
    )

    # Update state with reasoning
    reasoning = think_response.content
    state["reasoning"] = state.get("reasoning", []) + [reasoning]

    return state


def select_tools(state: AgentState) -> AgentState:
    """Select appropriate tools based on the reasoning."""

    # Get available tools
    tools = get_available_tools()
    tool_descriptions = "\n".join(
        [f"- {tool.name}: {tool.description}" for tool in tools])

    # Create a prompt for tool selection
    select_prompt = ChatPromptTemplate.from_messages([
        SystemMessage(content=f"""Based on your analysis, select which tools would be most helpful for this task.
Available tools:
{tool_descriptions}

Return your selection as a comma-separated list of tool names, e.g., "calculator,web_search".
Only include tools that are actually needed for this specific request."""),
        ("user", "{reasoning}")
    ])

    # Generate tool selection output
    select_response = model.invoke(
        select_prompt.format_messages(reasoning=state["reasoning"][-1])
    )

    # Parse the selected tools
    selected_tools = [
        tool_name.strip()
        for tool_name in select_response.content.split(',')
    ]

    # Filter to ensure only valid tools are selected
    valid_tool_names = [tool.name for tool in tools]
    selected_tools = [
        tool for tool in selected_tools if tool in valid_tool_names]

    # Update state with selected tools
    state["selected_tools"] = selected_tools

    # Add a single AIMessage with all tool calls (if any tools selected)
    if selected_tools:
        tool_calls = [
            {"id": f"call_{i}", "name": tool_name, "args": {}}
            for i, tool_name in enumerate(selected_tools)
        ]
        state["messages"].append(
            AIMessage(
                content="",
                tool_calls=tool_calls
            )
        )

    return state


# def execute_tools(state: AgentState) -> AgentState:
#     """Execute the selected tools."""

#     # Get all available tools
#     all_tools = get_available_tools()

#     # Filter to only use selected tools
#     selected_tool_names = state["selected_tools"]
#     tools_to_use = [
#         tool for tool in all_tools if tool.name in selected_tool_names]

#     # Create tool executor
#     tool_executor = ToolExecutor(tools_to_use)

#     # Get the most recent reasoning
#     reasoning = state["reasoning"][-1]

#     # For each tool, generate a specific input and execute
#     tool_results = {}
#     for tool in tools_to_use:
#         # Create prompt for generating tool input
#         tool_input_prompt = ChatPromptTemplate.from_messages([
#             SystemMessage(content=f"""Generate a specific input for the following tool:
# Tool: {tool.name}
# Description: {tool.description}

# The input should be formatted according to the tool's requirements and contain all necessary information.
# Return only the exact input string that should be passed to the tool, nothing else."""),
#             ("user", "{reasoning}")
#         ])

#         # Generate specific input for this tool
#         tool_input_response = model.invoke(
#             tool_input_prompt.format_messages(reasoning=reasoning)
#         )
#         tool_input = tool_input_response.content.strip()

#         try:
#             # Execute the tool with the generated input
#             result = tool_executor.invoke({tool.name: tool_input})
#             tool_results[tool.name] = result[tool.name]

#             # Add tool message to conversation
#             state["messages"].append(
#                 ToolMessage(content=str(result[tool.name]), name=tool.name)
#             )
#         except Exception as e:
#             # Handle errors
#             tool_results[tool.name] = f"Error executing tool: {str(e)}"
#             state["messages"].append(
#                 ToolMessage(
#                     content=f"Error executing tool: {str(e)}", name=tool.name)
#             )

#     # Update state with tool results
#     state["tool_results"] = tool_results

#     return state


def generate_response(state: AgentState) -> AgentState:
    """Generate a final response based on reasoning and tool outputs."""

    # Prepare the context for response generation
    tool_outputs = "\n".join([
        f"{tool_name}: {result}"
        for tool_name, result in state.get("tool_results", {}).items()
    ])

    # Create prompt for response generation
    response_prompt = ChatPromptTemplate.from_messages([
        SystemMessage(content="""Generate a helpful response to the user based on your reasoning and tool outputs. Give exact, to the point and concise one word or number as an answer.
                      No explanation is needed at all. Make sure that if numerical number is asked, you return only a number and nothing else. If you don't know the answer, make a guess from your training data, but don't return None. Return answer in only the language in which the question was asked."""),
        ("user",
         "User request: {user_request}\n\nReasoning: {reasoning}\n\nTool outputs: {tool_outputs}")
    ])

    # Get original user request
    user_request = None
    for msg in reversed(state["messages"]):
        if isinstance(msg, HumanMessage):
            user_request = msg.content
            break

    # Generate final response
    response = model.invoke(
        response_prompt.format_messages(
            user_request=user_request,
            reasoning=state["reasoning"][-1],
            tool_outputs=tool_outputs
        )
    )

    # Add the AI response to messages
    state["messages"].append(AIMessage(content=response.content))

    return state


# ============= Graph Definition =============

def create_agent_graph():
    """Create and configure the agent graph."""

    graph = StateGraph(AgentState)

    graph.add_node("think", think)
    graph.add_node("select_tools", select_tools)

    tools = get_available_tools()
    tool_node = ToolNode(tools)
    graph.add_node("execute_tools", tool_node)

    graph.add_node("generate_response", generate_response)

    # Conditional edge: if no tools, skip execute_tools
    def select_tools_next(state: AgentState):
        if state["selected_tools"]:
            return "execute_tools"
        else:
            return "generate_response"

    graph.add_edge("think", "select_tools")
    graph.add_conditional_edges("select_tools", select_tools_next)
    graph.add_edge("execute_tools", "generate_response")
    graph.add_edge("generate_response", END)

    graph.set_entry_point("think")
    return graph.compile()


# ============= Agent Interface =============

class ReasoningAgent:
    """Reasoning and action agent main class."""

    def __init__(self):
        self.graph = create_agent_graph()
        # Initialize with system prompt
        tools = get_available_tools()
        tool_descriptions = "\n".join(
            [f"- {tool.name}: {tool.description}" for tool in tools])
        self.messages = [
            SystemMessage(content=AGENT_SYSTEM_PROMPT.format(
                tool_descriptions=tool_descriptions))
        ]

    def invoke(self, user_input: str) -> str:
        """Process user input and return response."""
        # Add user message to history
        self.messages.append(HumanMessage(content=user_input))

        # Initialize state
        state = {"messages": self.messages, "reasoning": [],
                 "selected_tools": [], "tool_results": {}}

        # Run the graph
        result = self.graph.invoke(state)

        # Update messages
        self.messages = result["messages"]

        # Return the last AI message
        for msg in reversed(result["messages"]):
            if isinstance(msg, AIMessage):
                return msg.content

        # Fallback
        return "I encountered an issue processing your request."
    
    def __call__(self,*args, **kwargs):
        """Invoke the agent with user input."""
        return self.invoke(*args, **kwargs)


# Sample usage
if __name__ == "__main__":
    agent = ReasoningAgent()
    response = agent.invoke(
        "What's the weather in New York today and should I take an umbrella?")
    print(response)