ric9176 commited on
Commit
924d035
·
1 Parent(s): 241f177
.langgraph_api/.langgraph_checkpoint.1.pckl CHANGED
Binary files a/.langgraph_api/.langgraph_checkpoint.1.pckl and b/.langgraph_api/.langgraph_checkpoint.1.pckl differ
 
.langgraph_api/.langgraph_checkpoint.2.pckl CHANGED
Binary files a/.langgraph_api/.langgraph_checkpoint.2.pckl and b/.langgraph_api/.langgraph_checkpoint.2.pckl differ
 
.langgraph_api/.langgraph_ops.pckl CHANGED
Binary files a/.langgraph_api/.langgraph_ops.pckl and b/.langgraph_api/.langgraph_ops.pckl differ
 
.langgraph_api/.langgraph_retry_counter.pckl CHANGED
Binary files a/.langgraph_api/.langgraph_retry_counter.pckl and b/.langgraph_api/.langgraph_retry_counter.pckl differ
 
.langgraph_api/store.pckl CHANGED
Binary files a/.langgraph_api/store.pckl and b/.langgraph_api/store.pckl differ
 
agent/graph.py CHANGED
@@ -9,7 +9,6 @@ from agent.utils.state import AgentState
9
  from agent.utils.nodes import (
10
  call_model,
11
  tool_node,
12
- read_memory,
13
  write_memory,
14
  should_continue
15
  )
@@ -22,7 +21,6 @@ def create_graph_builder():
22
  # Add nodes
23
  builder.add_node("agent", call_model)
24
  builder.add_node("action", tool_node)
25
- builder.add_node("read_memory", read_memory)
26
  builder.add_node("write_memory", write_memory)
27
 
28
  # Set entry point
@@ -37,7 +35,6 @@ def create_graph_builder():
37
  should_continue,
38
  {
39
  "action": "action",
40
- "read_memory": "read_memory",
41
  "write_memory": "write_memory",
42
  END: END
43
  }
@@ -46,9 +43,6 @@ def create_graph_builder():
46
  # Connect action back to agent
47
  builder.add_edge("action", "agent")
48
 
49
- # Memory operations should end after completion
50
- builder.add_edge("read_memory", "agent")
51
-
52
  return builder
53
 
54
  def create_agent_graph_without_memory():
 
9
  from agent.utils.nodes import (
10
  call_model,
11
  tool_node,
 
12
  write_memory,
13
  should_continue
14
  )
 
21
  # Add nodes
22
  builder.add_node("agent", call_model)
23
  builder.add_node("action", tool_node)
 
24
  builder.add_node("write_memory", write_memory)
25
 
26
  # Set entry point
 
35
  should_continue,
36
  {
37
  "action": "action",
 
38
  "write_memory": "write_memory",
39
  END: END
40
  }
 
43
  # Connect action back to agent
44
  builder.add_edge("action", "agent")
45
 
 
 
 
46
  return builder
47
 
48
  def create_agent_graph_without_memory():
agent/utils/nodes.py CHANGED
@@ -100,7 +100,7 @@ def update_memory(state: AgentState, config: RunnableConfig, store: BaseStore):
100
  store.put(namespace, "user_memory", {"memory": new_memory.content})
101
  return state
102
 
103
- def should_continue(state: AgentState) -> Literal["action", "read_memory", "write_memory", END]:
104
  """Determine the next node in the graph."""
105
  if not state["messages"]:
106
  return END
@@ -121,25 +121,9 @@ def should_continue(state: AgentState) -> Literal["action", "read_memory", "writ
121
  # Write memory for longer messages that might contain personal information
122
  if len(last_human_message.content.split()) > 3:
123
  return "write_memory"
124
- # Read memory for short queries to ensure personalized responses
125
- else:
126
- return "read_memory"
127
 
128
  return END
129
 
130
- def read_memory(state: AgentState, config: RunnableConfig, store: BaseStore):
131
- """Read and apply memory context without updating it."""
132
- user_id = config["configurable"].get("session_id", "default")
133
- namespace = ("memory", user_id)
134
- existing_memory = store.get(namespace, "user_memory")
135
-
136
- if existing_memory:
137
- memory_content = existing_memory.value.get('memory')
138
- # Add memory context to state for next model call
139
- state["memory_context"] = memory_content
140
-
141
- return state
142
-
143
  # Define the memory creation prompt
144
  MEMORY_CREATION_PROMPT = """"You are collecting information about the user to personalize your responses.
145
 
@@ -195,34 +179,6 @@ async def write_memory(state: AgentState, config: RunnableConfig, store: BaseSto
195
  # Initialize tool node
196
  tool_node = ToolNode(tool_belt)
197
 
198
- def should_call_memory(state: AgentState) -> Literal["update_memory", "end"]:
199
- """
200
- Determine if we should update memory based on the conversation state.
201
-
202
- Rules for updating memory:
203
- 1. Only update after human messages (not tool responses)
204
- 2. Update if the message might contain personal information
205
- 3. Don't update for simple queries or acknowledgments
206
- """
207
- if not state["messages"]:
208
- return "end"
209
-
210
- last_message = state["messages"][-1]
211
-
212
- # Skip memory update for tool calls
213
- if hasattr(last_message, "additional_kwargs") and last_message.additional_kwargs.get("tool_calls"):
214
- return "agent"
215
-
216
- # Skip memory update for very short messages (likely acknowledgments)
217
- if isinstance(last_message, HumanMessage) and len(last_message.content.split()) <= 3:
218
- return "agent"
219
-
220
- # Update memory for human messages that might contain personal information
221
- if isinstance(last_message, HumanMessage):
222
- return "update_memory"
223
-
224
- return "agent"
225
-
226
  # def route_message(state: MessagesState, config: RunnableConfig, store: BaseStore) -> Literal[END, "update_todos", "update_instructions", "update_profile"]:
227
 
228
  # """Reflect on the memories and chat history to decide whether to update the memory collection."""
 
100
  store.put(namespace, "user_memory", {"memory": new_memory.content})
101
  return state
102
 
103
+ def should_continue(state: AgentState) -> Literal["action", "write_memory", END]:
104
  """Determine the next node in the graph."""
105
  if not state["messages"]:
106
  return END
 
121
  # Write memory for longer messages that might contain personal information
122
  if len(last_human_message.content.split()) > 3:
123
  return "write_memory"
 
 
 
124
 
125
  return END
126
 
 
 
 
 
 
 
 
 
 
 
 
 
 
127
  # Define the memory creation prompt
128
  MEMORY_CREATION_PROMPT = """"You are collecting information about the user to personalize your responses.
129
 
 
179
  # Initialize tool node
180
  tool_node = ToolNode(tool_belt)
181
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
182
  # def route_message(state: MessagesState, config: RunnableConfig, store: BaseStore) -> Literal[END, "update_todos", "update_instructions", "update_profile"]:
183
 
184
  # """Reflect on the memories and chat history to decide whether to update the memory collection."""
example.json → studio_example_message.json RENAMED
File without changes