ric9176 commited on
Commit
719dfe3
·
1 Parent(s): c5eebcf

Add langgraph studio config and refactor

Browse files
.langgraph_api/.langgraph_checkpoint.1.pckl ADDED
Binary file (25.2 kB). View file
 
.langgraph_api/.langgraph_checkpoint.2.pckl ADDED
Binary file (6.73 kB). View file
 
.langgraph_api/.langgraph_ops.pckl ADDED
Binary file (7.94 kB). View file
 
.langgraph_api/.langgraph_retry_counter.pckl ADDED
Binary file (83 Bytes). View file
 
.langgraph_api/store.pckl ADDED
Binary file (6 Bytes). View file
 
.langgraph_api/store.vectors.pckl ADDED
Binary file (6 Bytes). View file
 
agent/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from agent.agent import graph, graph_with_memory
2
+
3
+ __all__ = ["graph", "graph_with_memory"]
agent/agent.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langgraph.graph import StateGraph
2
+ from langgraph.checkpoint.memory import MemorySaver
3
+
4
+ from agent.utils.state import AgentState
5
+ from agent.utils.nodes import call_model, tool_node, should_continue
6
+
7
+ def create_agent_graph():
8
+ # Create the graph
9
+ builder = StateGraph(AgentState)
10
+
11
+ # Add nodes
12
+ builder.add_node("agent", call_model)
13
+ builder.add_node("action", tool_node)
14
+
15
+ # Update edges
16
+ builder.set_entry_point("agent")
17
+ builder.add_conditional_edges(
18
+ "agent",
19
+ should_continue,
20
+ )
21
+ builder.add_edge("action", "agent")
22
+
23
+ # Initialize memory saver for conversation persistence
24
+ memory = MemorySaver()
25
+
26
+ # Compile the graph with memory
27
+ return builder.compile(checkpointer=memory)
28
+
29
+ def create_agent_graph_without_memory():
30
+ # Create the graph
31
+ builder = StateGraph(AgentState)
32
+
33
+ # Add nodes
34
+ builder.add_node("agent", call_model)
35
+ builder.add_node("action", tool_node)
36
+
37
+ # Update edges
38
+ builder.set_entry_point("agent")
39
+ builder.add_conditional_edges(
40
+ "agent",
41
+ should_continue,
42
+ )
43
+ builder.add_edge("action", "agent")
44
+
45
+ # Compile the graph without memory
46
+ return builder.compile()
47
+
48
+ # Create both graph variants
49
+ graph_with_memory = create_agent_graph()
50
+ graph = create_agent_graph_without_memory()
agent/utils/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+
agent/utils/nodes.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain_openai import ChatOpenAI
2
+ from langchain_core.messages import SystemMessage
3
+ from langgraph.graph import END
4
+ from langgraph.prebuilt import ToolNode
5
+
6
+ from agent.utils.tools import tool_belt
7
+ from agent.utils.state import AgentState
8
+
9
+ # Initialize LLM
10
+ llm = ChatOpenAI(model="gpt-4o-mini", temperature=0)
11
+ model = llm.bind_tools(tool_belt)
12
+
13
+ # Define system prompt
14
+ SYSTEM_PROMPT = SystemMessage(content="""
15
+ You are a helpful AI assistant that answers questions clearly and concisely.
16
+ If you don't know something, simply say you don't know.
17
+ Be engaging and professional in your responses.
18
+ Use the retrieve_context tool when you need specific information about events and activities.
19
+ Use the tavily_search tool for general web searches.
20
+ """)
21
+
22
+ def call_model(state: AgentState):
23
+ messages = [SYSTEM_PROMPT] + state["messages"]
24
+ response = model.invoke(messages)
25
+ return {"messages": [response]}
26
+
27
+ # Initialize tool node
28
+ tool_node = ToolNode(tool_belt)
29
+
30
+ # Simple flow control - always go to final
31
+ def should_continue(state):
32
+ last_message = state["messages"][-1]
33
+ if last_message.tool_calls:
34
+ return "action"
35
+ return END
agent/utils/state.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ from typing import Annotated, TypedDict
2
+ from langgraph.graph.message import add_messages
3
+
4
+ class AgentState(TypedDict):
5
+ messages: Annotated[list, add_messages]
6
+ context: list # Store retrieved context
agent/utils/tools.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain_core.tools import tool
2
+ from langchain_community.tools.tavily_search import TavilySearchResults
3
+ from rag import create_rag_pipeline, add_urls_to_vectorstore
4
+
5
+ # Initialize RAG pipeline
6
+ rag_components = create_rag_pipeline(collection_name="london_events")
7
+
8
+ # Add some initial URLs to the vector store
9
+ urls = [
10
+ "https://www.timeout.com/london/things-to-do-in-london-this-weekend",
11
+ "https://www.timeout.com/london/london-events-in-march"
12
+ ]
13
+ add_urls_to_vectorstore(
14
+ rag_components["vector_store"],
15
+ rag_components["text_splitter"],
16
+ urls
17
+ )
18
+
19
+ @tool
20
+ def retrieve_context(query: str) -> list[str]:
21
+ """Searches the knowledge base for relevant information about events and activities. Use this when you need specific details about events."""
22
+ return [doc.page_content for doc in rag_components["retriever"].get_relevant_documents(query)]
23
+
24
+ # Initialize Tavily search tool
25
+ tavily_tool = TavilySearchResults(max_results=5)
26
+
27
+ # Create tool belt
28
+ tool_belt = [tavily_tool, retrieve_context]
app.py CHANGED
@@ -1,93 +1,9 @@
1
  import uuid
2
- from typing import Annotated, TypedDict, Literal
3
- from langchain_openai import ChatOpenAI
4
- from langgraph.graph import StateGraph, START, END
5
- from langgraph.graph.message import MessagesState, add_messages
6
- from langgraph.prebuilt import ToolNode
7
- from langgraph.checkpoint.memory import MemorySaver
8
-
9
- from langchain_core.messages import HumanMessage, AIMessage, SystemMessage
10
  from langchain.schema.runnable.config import RunnableConfig
11
- from langchain_community.tools.tavily_search import TavilySearchResults
12
- from langchain.tools import Tool
13
- from langchain_core.tools import tool
14
-
15
  import chainlit as cl
16
- from rag import create_rag_pipeline, add_urls_to_vectorstore
17
-
18
- # Initialize RAG pipeline
19
- rag_components = create_rag_pipeline(collection_name="london_events")
20
-
21
- # Add some initial URLs to the vector store
22
- urls = [
23
- "https://www.timeout.com/london/things-to-do-in-london-this-weekend",
24
- "https://www.timeout.com/london/london-events-in-march"
25
- ]
26
- add_urls_to_vectorstore(
27
- rag_components["vector_store"],
28
- rag_components["text_splitter"],
29
- urls
30
- )
31
-
32
- class AgentState(TypedDict):
33
- messages: Annotated[list, add_messages]
34
- context: list # Store retrieved context
35
-
36
- # Create a retrieve tool
37
- @tool
38
- def retrieve_context(query: str) -> list[str]:
39
- """Searches the knowledge base for relevant information about events and activities. Use this when you need specific details about events."""
40
- return [doc.page_content for doc in rag_components["retriever"].get_relevant_documents(query)]
41
-
42
- tavily_tool = TavilySearchResults(max_results=5)
43
- tool_belt = [tavily_tool, retrieve_context]
44
-
45
- llm = ChatOpenAI(model="gpt-4o-mini", temperature=0)
46
- model = llm.bind_tools(tool_belt)
47
-
48
- # Define system prompt
49
- SYSTEM_PROMPT = SystemMessage(content="""
50
- You are a helpful AI assistant that answers questions clearly and concisely.
51
- If you don't know something, simply say you don't know.
52
- Be engaging and professional in your responses.
53
- Use the retrieve_context tool when you need specific information about events and activities.
54
- Use the tavily_search tool for general web searches.
55
- """)
56
-
57
- def call_model(state: AgentState):
58
- messages = [SYSTEM_PROMPT] + state["messages"]
59
- response = model.invoke(messages)
60
- return {"messages": [response]}
61
-
62
- tool_node = ToolNode(tool_belt)
63
-
64
- # Simple flow control - always go to final
65
- def should_continue(state):
66
- last_message = state["messages"][-1]
67
- if last_message.tool_calls:
68
- return "action"
69
- return END
70
-
71
- # Create the graph
72
- builder = StateGraph(AgentState)
73
-
74
- # Remove retrieve node and modify graph structure
75
- builder.add_node("agent", call_model)
76
- builder.add_node("action", tool_node)
77
-
78
- # Update edges
79
- builder.set_entry_point("agent")
80
- builder.add_conditional_edges(
81
- "agent",
82
- should_continue,
83
- )
84
- builder.add_edge("action", "agent")
85
-
86
- # Initialize memory saver for conversation persistence
87
- memory = MemorySaver()
88
-
89
- # Compile the graph with memory
90
- graph = builder.compile(checkpointer=memory)
91
 
92
  @cl.on_chat_start
93
  async def on_chat_start():
@@ -109,7 +25,7 @@ async def on_chat_start():
109
  # Initialize empty state with auth
110
  try:
111
  await graph.ainvoke(
112
- {"messages": [], "context": []},
113
  config=config
114
  )
115
  except Exception as e:
@@ -159,7 +75,7 @@ async def on_message(message: cl.Message):
159
 
160
  # Stream the response
161
  async for chunk in graph.astream(
162
- {"messages": current_messages, "context": []},
163
  config=RunnableConfig(
164
  configurable={
165
  "thread_id": session_id,
 
1
  import uuid
2
+ from langchain_core.messages import HumanMessage, AIMessage
 
 
 
 
 
 
 
3
  from langchain.schema.runnable.config import RunnableConfig
 
 
 
 
4
  import chainlit as cl
5
+ from agent import graph_with_memory as graph
6
+ from agent.utils.state import AgentState
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
 
8
  @cl.on_chat_start
9
  async def on_chat_start():
 
25
  # Initialize empty state with auth
26
  try:
27
  await graph.ainvoke(
28
+ AgentState(messages=[], context=[]),
29
  config=config
30
  )
31
  except Exception as e:
 
75
 
76
  # Stream the response
77
  async for chunk in graph.astream(
78
+ AgentState(messages=current_messages, context=[]),
79
  config=RunnableConfig(
80
  configurable={
81
  "thread_id": session_id,
langgraph.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "dependencies": ".",
3
+ "graphs": {
4
+ "agent": "agent:graph"
5
+ },
6
+ "env": ".env"
7
+ }
rag.py CHANGED
@@ -1,6 +1,6 @@
1
- from typing import List, TypedDict
2
  from langchain_core.documents import Document
3
- from langchain_openai import ChatOpenAI, OpenAIEmbeddings
4
  from langchain.text_splitter import RecursiveCharacterTextSplitter
5
  from langchain_qdrant import QdrantVectorStore
6
  from qdrant_client import QdrantClient
 
1
+ from typing import List
2
  from langchain_core.documents import Document
3
+ from langchain_openai import OpenAIEmbeddings
4
  from langchain.text_splitter import RecursiveCharacterTextSplitter
5
  from langchain_qdrant import QdrantVectorStore
6
  from qdrant_client import QdrantClient
tools.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain_core.tools import tool
2
+ from langchain_community.tools.tavily_search import TavilySearchResults
3
+ from rag import create_rag_pipeline, add_urls_to_vectorstore
4
+
5
+ # Initialize RAG pipeline
6
+ rag_components = create_rag_pipeline(collection_name="london_events")
7
+
8
+ # Add some initial URLs to the vector store
9
+ urls = [
10
+ "https://www.timeout.com/london/things-to-do-in-london-this-weekend",
11
+ "https://www.timeout.com/london/london-events-in-march"
12
+ ]
13
+ add_urls_to_vectorstore(
14
+ rag_components["vector_store"],
15
+ rag_components["text_splitter"],
16
+ urls
17
+ )
18
+
19
+ @tool
20
+ def retrieve_context(query: str) -> list[str]:
21
+ """Searches the knowledge base for relevant information about events and activities. Use this when you need specific details about events."""
22
+ return [doc.page_content for doc in rag_components["retriever"].get_relevant_documents(query)]
23
+
24
+ # Initialize Tavily search tool
25
+ tavily_tool = TavilySearchResults(max_results=5)
26
+
27
+ # Create tool belt
28
+ tool_belt = [tavily_tool, retrieve_context]