Nikolay Angelov commited on
Commit
a666cd9
·
1 Parent(s): 224b69c

fix the app

Browse files
Files changed (9) hide show
  1. Gradio_UI.py +92 -0
  2. README.md +83 -74
  3. UI.py +0 -278
  4. app.py +17 -37
  5. main.py +5 -14
  6. requirements.txt +1 -1
  7. tools/__init__.py +4 -0
  8. tools/final_answer.py +0 -14
  9. tools/time_tools.py +13 -0
Gradio_UI.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from typing import Optional
3
+ import os
4
+ from fastapi.responses import RedirectResponse
5
+
6
+ class GradioUI:
7
+ """A Gradio interface that serves as a frontend for the agent API"""
8
+
9
+ def __init__(self, agent=None, file_upload_folder: str = None):
10
+ self.agent = agent
11
+ self.file_upload_folder = file_upload_folder
12
+
13
+ if self.file_upload_folder is not None:
14
+ if not os.path.exists(file_upload_folder):
15
+ os.makedirs(file_upload_folder, exist_ok=True)
16
+
17
+ def create_interface(self):
18
+ with gr.Blocks(css="""
19
+ #main-container {
20
+ display: flex;
21
+ height: 100%;
22
+ }
23
+ #menu-column {
24
+ min-height: 600px;
25
+ border-right: 1px solid #ddd;
26
+ }
27
+ #chat-column {
28
+ min-height: 600px;
29
+ flex-grow: 1;
30
+ }
31
+ .button-container {
32
+ padding: 10px;
33
+ }
34
+ """) as interface:
35
+ with gr.Row(elem_id="main-container"):
36
+ # Left menu strip (1/4 width)
37
+ with gr.Column(scale=1, elem_id="menu-column"):
38
+ gr.Markdown("# Menu")
39
+ with gr.Group(elem_classes="button-container"):
40
+ new_chat_btn = gr.Button("New Chat", variant="primary")
41
+
42
+ # Output area for button actions
43
+ menu_output = gr.HTML(label="Action Result")
44
+
45
+ # Right chat console (3/4 width)
46
+ with gr.Column(scale=3, elem_id="chat-column"):
47
+ gr.Markdown("# AI Assistant")
48
+ chatbot = gr.Chatbot(
49
+ height=500,
50
+ type='messages'
51
+ )
52
+ msg = gr.Textbox(
53
+ placeholder="Ask me anything...",
54
+ show_label=False,
55
+ container=False
56
+ )
57
+ clear = gr.Button("Clear")
58
+
59
+ def handle_new_chat():
60
+ return None, "Started new conversation"
61
+
62
+ def user_input(message, history):
63
+ try:
64
+ response = self.agent.invoke({
65
+ "input": message
66
+ })
67
+ # Update to new message format
68
+ history.append({"role": "user", "content": message})
69
+ history.append({"role": "assistant", "content": response["output"]})
70
+ return "", history
71
+ except Exception as e:
72
+ history.append({"role": "user", "content": message})
73
+ history.append({"role": "assistant", "content": f"Error: {str(e)}"})
74
+ return "", history
75
+
76
+ # Set up event handlers
77
+ msg.submit(user_input, [msg, chatbot], [msg, chatbot])
78
+ clear.click(lambda: None, None, chatbot, queue=False)
79
+ new_chat_btn.click(
80
+ fn=handle_new_chat,
81
+ inputs=[],
82
+ outputs=[chatbot, menu_output]
83
+ )
84
+
85
+ return interface
86
+
87
+ def launch(self, **kwargs):
88
+ """Launch the Gradio interface standalone (for development)"""
89
+ interface = self.create_interface()
90
+ interface.launch(**kwargs)
91
+
92
+ __all__ = ["GradioUI"]
README.md CHANGED
@@ -17,109 +17,118 @@ tags:
17
  - coaching
18
  ---
19
 
20
- # Career Coach Agent 🤖
21
 
22
- An AI-powered career coaching assistant built with FastAPI, Gradio UI, LangChain, and SmolaGents. This application provides an interactive interface for career guidance through both a REST API and a web-based UI.
23
 
24
- ## 🚀 Features
25
 
26
- - **Unified Interface**: Combined FastAPI and Gradio UI on a single port (7860)
27
- - **AI-Powered Responses**: Utilizing Mixtral-8x7B-Instruct-v0.1 model
28
- - **Interactive Chat Interface**: Real-time conversation with the AI agent
29
- - **Multi-tool Integration**: Including webpage visits and time zone conversions
30
- - **ReAct Agent Pattern**: Step-by-step reasoning and tool usage
31
 
32
- ## 🛠️ Technical Stack
33
 
34
- - **Backend Framework**: FastAPI (mounted with Gradio)
35
- - **UI Framework**: Gradio with SmolaGents
36
- - **AI Framework**:
37
- - LangChain ReAct Agent (Backend) - For structured reasoning and tool usage
38
- - SmolaGents (UI) - For enhanced agent interactions and chat interface
39
- - **ML Models**: Hugging Face (Mixtral-8x7B-Instruct-v0.1)
40
- - **Additional Key Libraries**:
41
- - `uvicorn`: ASGI server
42
- - `markdownify`: Web content processing
43
- - `langchain`: AI framework and tools
44
- - `smolagents`: UI agent framework
45
 
46
- ## 📋 Prerequisites
47
 
48
- - Python 3.8+
49
- - Hugging Face account (for model access)
 
 
 
50
 
51
- ## ⚙️ Installation
52
 
53
- 1. Clone the repository:
54
  ```bash
55
- git clone https://github.com/nangelov/career-coach-agent.git
56
- cd career-coach-agent
57
  ```
58
 
59
- 2. Create and activate a virtual environment:
60
- ```bash
61
- python -m venv venv
62
- source venv/bin/activate # On Windows: venv\Scripts\activate
63
- ```
64
 
65
- 3. Install dependencies:
66
  ```bash
67
- pip install -r requirements.txt
 
68
  ```
69
 
70
- 4. (Required) Set up Hugging Face token:
71
  ```bash
72
- export HUGGINGFACEHUB_API_TOKEN=your_token_here
73
  ```
74
 
75
- 5. Run the application:
 
 
 
 
76
  ```bash
77
- python main.py
78
  ```
79
 
80
  The application will be available at:
81
- - Main UI: http://localhost:7860
82
- - API Documentation: http://localhost:7860/docs/
83
-
84
- ## 🌐 Hugging Face Spaces Deployment
85
-
86
- This application is specifically designed to work with Hugging Face Spaces:
87
- - Uses a single port (7860) as required by Spaces
88
- - Combines FastAPI and Gradio on the same port
89
- - API documentation is accessible at `/docs/` on the same port
90
- - All functionality works within Spaces' constraints
91
-
92
- ## 📚 API Documentation
93
-
94
- The API documentation is available at `/docs/` on the same port as the main application (7860). This unified setup ensures compatibility with Hugging Face Spaces while maintaining all functionality.
95
-
96
- ## 🔑 Key Endpoints
 
97
 
98
- All endpoints are available on port 7860:
99
- - `/`: Main Gradio UI
100
- - `/docs/`: API Documentation
101
- - `/agent/query`: Send queries to the AI agent
102
 
103
- ## 🔍 How It Works
 
 
 
 
 
104
 
105
- The application uses a ReAct (Reasoning and Acting) agent pattern, which follows this structure:
106
- 1. **Thought**: The agent reasons about what to do
107
- 2. **Action**: The agent decides which tool to use
108
- 3. **Observation**: The tool returns a result
109
- 4. **Thought**: The agent reasons about the observation
110
- 5. **Action**: The agent either uses another tool or provides a final answer
 
111
 
112
- ## ⚠️ Important Notes
113
 
114
- - The application requires active internet connection for AI model access
115
- - Hugging Face API token is required for model access
116
- - All services run on port 7860 to comply with Hugging Face Spaces requirements
117
- - The UI and API are served from the same port for better integration
 
118
 
119
- ## 🤝 Contributing
120
 
121
- Contributions are welcome! Please feel free to submit a Pull Request.
 
 
 
 
122
 
123
- ## 📄 License
124
 
125
- This project is licensed under the MIT License - see the LICENSE file for details.
 
17
  - coaching
18
  ---
19
 
20
+ # Career Coach AI Assistant
21
 
22
+ A FastAPI and Gradio-based AI assistant that helps users with career development, powered by Mixtral-8x7B-Instruct and LangChain.
23
 
24
+ ## Features
25
 
26
+ - Interactive chat interface built with Gradio
27
+ - Career-focused AI assistant using Mixtral-8x7B-Instruct model
28
+ - FastAPI backend with RESTful endpoints
29
+ - LangChain integration for advanced prompt handling and tool usage
30
+ - System prompts and templates managed through YAML configuration
31
 
32
+ ## Project Structure
33
 
34
+ ```
35
+ career-coach-agent/
36
+ ├── app.py # Main FastAPI application with agent setup
37
+ ├── Gradio_UI.py # Gradio interface implementation
38
+ ├── prompts.yaml # System prompts and templates
39
+ ├── main.py # Application entry point
40
+ └── README.md # This file
41
+ ```
 
 
 
42
 
43
+ ## Requirements
44
 
45
+ - Python 3.12+
46
+ - FastAPI
47
+ - Gradio
48
+ - LangChain
49
+ - Hugging Face Hub API token
50
 
51
+ ## Environment Variables
52
 
53
+ Required environment variables:
54
  ```bash
55
+ HUGGINGFACEHUB_API_TOKEN=your_huggingface_token
 
56
  ```
57
 
58
+ ## Installation
 
 
 
 
59
 
60
+ 1. Clone the repository:
61
  ```bash
62
+ git clone <repository-url>
63
+ cd career-coach-agent
64
  ```
65
 
66
+ 2. Install dependencies:
67
  ```bash
68
+ pip install fastapi gradio langchain langchain_huggingface pydantic python-multipart uvicorn
69
  ```
70
 
71
+ 3. Set up your Hugging Face API token as an environment variable.
72
+
73
+ ## Running the Application
74
+
75
+ Start the application:
76
  ```bash
77
+ uvicorn main:app --reload
78
  ```
79
 
80
  The application will be available at:
81
+ - Web Interface: http://localhost:8000
82
+ - API Documentation: http://localhost:8000/docs
83
+ - Alternative API Documentation: http://localhost:8000/redoc
84
+
85
+ ## API Endpoints
86
+
87
+ ### `/agent/query` (POST)
88
+ Submit queries to the AI assistant.
89
+
90
+ Request body:
91
+ ```json
92
+ {
93
+ "query": "string",
94
+ "thread_id": "string (optional)",
95
+ "context": {} (optional)
96
+ }
97
+ ```
98
 
99
+ ## Features
 
 
 
100
 
101
+ ### AI Assistant
102
+ - Career planning and goal setting
103
+ - Professional development advice
104
+ - Job search strategies
105
+ - Skill development recommendations
106
+ - Industry insights and trends
107
 
108
+ ### Technical Features
109
+ - Real-time chat interface
110
+ - Timezone-aware responses
111
+ - Session management
112
+ - Error handling
113
+ - CORS support
114
+ - Interactive documentation
115
 
116
+ ## Architecture
117
 
118
+ - **FastAPI**: Handles HTTP requests and API endpoints
119
+ - **Gradio**: Provides the web interface
120
+ - **LangChain**: Manages the AI agent and tools
121
+ - **Mixtral-8x7B-Instruct**: Powers the AI responses
122
+ - **YAML Configuration**: Manages system prompts and templates
123
 
124
+ ## Contributing
125
 
126
+ 1. Fork the repository
127
+ 2. Create a feature branch
128
+ 3. Commit your changes
129
+ 4. Push to the branch
130
+ 5. Create a Pull Request
131
 
132
+ ## License
133
 
134
+ [Add your license here]
UI.py DELETED
@@ -1,278 +0,0 @@
1
- import os
2
- import re
3
- from typing import Optional, List, Dict, Any, Callable
4
-
5
- from smolagents.agent_types import AgentAudio, AgentImage, AgentText, handle_agent_output_types
6
- from smolagents.agents import ActionStep, MultiStepAgent
7
- from smolagents.memory import MemoryStep
8
- from smolagents.utils import _is_package_available
9
- import gradio as gr
10
- import requests
11
-
12
-
13
- def pull_messages_from_step(
14
- step_log: MemoryStep,
15
- ):
16
- """Extract ChatMessage objects from agent steps with proper nesting"""
17
- if isinstance(step_log, ActionStep):
18
- # Output the step number
19
- step_number = f"Step {step_log.step_number}" if step_log.step_number is not None else ""
20
- yield gr.ChatMessage(role="assistant", content=f"**{step_number}**")
21
-
22
- # First yield the thought/reasoning from the LLM
23
- if hasattr(step_log, "model_output") and step_log.model_output is not None:
24
- # Clean up the LLM output
25
- model_output = step_log.model_output.strip()
26
- # Remove any trailing <end_code> and extra backticks, handling multiple possible formats
27
- model_output = re.sub(r"```\s*<end_code>", "```", model_output) # handles ```<end_code>
28
- model_output = re.sub(r"<end_code>\s*```", "```", model_output) # handles <end_code>```
29
- model_output = re.sub(r"```\s*\n\s*<end_code>", "```", model_output) # handles ```\n<end_code>
30
- model_output = model_output.strip()
31
- yield gr.ChatMessage(role="assistant", content=model_output)
32
-
33
- # For tool calls, create a parent message
34
- if hasattr(step_log, "tool_calls") and step_log.tool_calls is not None:
35
- first_tool_call = step_log.tool_calls[0]
36
- used_code = first_tool_call.name == "python_interpreter"
37
- parent_id = f"call_{len(step_log.tool_calls)}"
38
-
39
- # Tool call becomes the parent message with timing info
40
- # First we will handle arguments based on type
41
- args = first_tool_call.arguments
42
- if isinstance(args, dict):
43
- content = str(args.get("answer", str(args)))
44
- else:
45
- content = str(args).strip()
46
-
47
- if used_code:
48
- # Clean up the content by removing any end code tags
49
- content = re.sub(r"```.*?\n", "", content) # Remove existing code blocks
50
- content = re.sub(r"\s*<end_code>\s*", "", content) # Remove end_code tags
51
- content = content.strip()
52
- if not content.startswith("```python"):
53
- content = f"```python\n{content}\n```"
54
-
55
- parent_message_tool = gr.ChatMessage(
56
- role="assistant",
57
- content=content,
58
- metadata={
59
- "title": f"🛠️ Used tool {first_tool_call.name}",
60
- "id": parent_id,
61
- "status": "pending",
62
- },
63
- )
64
- yield parent_message_tool
65
-
66
- # Nesting execution logs under the tool call if they exist
67
- if hasattr(step_log, "observations") and (
68
- step_log.observations is not None and step_log.observations.strip()
69
- ): # Only yield execution logs if there's actual content
70
- log_content = step_log.observations.strip()
71
- if log_content:
72
- log_content = re.sub(r"^Execution logs:\s*", "", log_content)
73
- yield gr.ChatMessage(
74
- role="assistant",
75
- content=f"{log_content}",
76
- metadata={"title": "📝 Execution Logs", "parent_id": parent_id, "status": "done"},
77
- )
78
-
79
- # Nesting any errors under the tool call
80
- if hasattr(step_log, "error") and step_log.error is not None:
81
- yield gr.ChatMessage(
82
- role="assistant",
83
- content=str(step_log.error),
84
- metadata={"title": "💥 Error", "parent_id": parent_id, "status": "done"},
85
- )
86
-
87
- # Update parent message metadata to done status without yielding a new message
88
- parent_message_tool.metadata["status"] = "done"
89
-
90
- # Handle standalone errors but not from tool calls
91
- elif hasattr(step_log, "error") and step_log.error is not None:
92
- yield gr.ChatMessage(role="assistant", content=str(step_log.error), metadata={"title": "💥 Error"})
93
-
94
- # Calculate duration and token information
95
- step_footnote = f"{step_number}"
96
- if hasattr(step_log, "input_token_count") and hasattr(step_log, "output_token_count"):
97
- token_str = (
98
- f" | Input-tokens:{step_log.input_token_count:,} | Output-tokens:{step_log.output_token_count:,}"
99
- )
100
- step_footnote += token_str
101
- if hasattr(step_log, "duration"):
102
- step_duration = f" | Duration: {round(float(step_log.duration), 2)}" if step_log.duration else None
103
- step_footnote += step_duration
104
- step_footnote = f"""<span style="color: #bbbbc2; font-size: 12px;">{step_footnote}</span> """
105
- yield gr.ChatMessage(role="assistant", content=f"{step_footnote}")
106
- yield gr.ChatMessage(role="assistant", content="-----")
107
-
108
-
109
- def stream_to_gradio(
110
- agent,
111
- task: str,
112
- reset_agent_memory: bool = False,
113
- additional_args: Optional[dict] = None,
114
- ):
115
- """Runs an agent with the given task and streams the messages from the agent as gradio ChatMessages."""
116
- if not _is_package_available("gradio"):
117
- raise ModuleNotFoundError(
118
- "Please install 'gradio' extra to use the AgentUI: `pip install 'smolagents[gradio]'`"
119
- )
120
-
121
- total_input_tokens = 0
122
- total_output_tokens = 0
123
-
124
- for step_log in agent.run(task, stream=True, reset=reset_agent_memory, additional_args=additional_args):
125
- # Track tokens if model provides them
126
- if hasattr(agent.model, "last_input_token_count"):
127
- total_input_tokens += agent.model.last_input_token_count
128
- total_output_tokens += agent.model.last_output_token_count
129
- if isinstance(step_log, ActionStep):
130
- step_log.input_token_count = agent.model.last_input_token_count
131
- step_log.output_token_count = agent.model.last_output_token_count
132
-
133
- for message in pull_messages_from_step(
134
- step_log,
135
- ):
136
- yield message
137
-
138
- final_answer = step_log # Last log is the run's final_answer
139
- final_answer = handle_agent_output_types(final_answer)
140
-
141
- if isinstance(final_answer, AgentText):
142
- yield gr.ChatMessage(
143
- role="assistant",
144
- content=f"**Final answer:**\n{final_answer.to_string()}\n",
145
- )
146
- elif isinstance(final_answer, AgentImage):
147
- yield gr.ChatMessage(
148
- role="assistant",
149
- content={"path": final_answer.to_string(), "mime_type": "image/png"},
150
- )
151
- elif isinstance(final_answer, AgentAudio):
152
- yield gr.ChatMessage(
153
- role="assistant",
154
- content={"path": final_answer.to_string(), "mime_type": "audio/wav"},
155
- )
156
- else:
157
- yield gr.ChatMessage(role="assistant", content=f"**Final answer:** {str(final_answer)}")
158
-
159
-
160
- class AgentUI:
161
- """A Gradio interface that serves as a frontend for the agent API"""
162
-
163
- def __init__(self, agent=None, file_upload_folder: str = None, api_url: str = "http://localhost:8080"):
164
- self.agent = agent
165
- self.file_upload_folder = file_upload_folder
166
- self.api_url = api_url
167
- self.thread_id = None
168
-
169
- if self.file_upload_folder is not None:
170
- if not os.path.exists(file_upload_folder):
171
- os.makedirs(file_upload_folder, exist_ok=True)
172
-
173
- self.chat_history = []
174
-
175
- def query_agent(self, message: str, history: List[List[str]]):
176
- """Send a query to the agent API and return the response"""
177
- if not message:
178
- return "", history
179
-
180
- try:
181
- # Prepare request payload matching QueryRequest model
182
- payload = {
183
- "query": message,
184
- "thread_id": self.thread_id,
185
- "context": {}
186
- }
187
-
188
- response = requests.post(
189
- f"{self.api_url}/agent/query",
190
- json=payload
191
- )
192
-
193
- if response.status_code == 200:
194
- data = response.json()
195
- # Update thread_id from response
196
- self.thread_id = data.get("thread_id")
197
- return "", history + [[message, data["response"]]]
198
- else:
199
- return "", history + [[message, f"Error: {response.status_code}"]]
200
- except Exception as e:
201
- return "", history + [[message, f"Error: {str(e)}"]]
202
-
203
- def reset_conversation(self):
204
- """Reset the conversation state"""
205
- self.thread_id = None
206
- self.chat_history = []
207
- return "Started new conversation"
208
-
209
- def get_gradio_app(self):
210
- """Get the Gradio app for mounting in FastAPI"""
211
- api_port = int(self.api_url.split(":")[-1])
212
-
213
- with gr.Blocks(css="""
214
- #main-container {
215
- display: flex;
216
- height: 100%;
217
- }
218
- #menu-column {
219
- min-height: 600px;
220
- border-right: 1px solid #ddd;
221
- }
222
- #chat-column {
223
- min-height: 600px;
224
- }
225
- .button-container {
226
- padding: 10px;
227
- }
228
- .gr-button.green-btn {
229
- background-color: #22c55e !important;
230
- color: white !important;
231
- border: none !important;
232
- font-weight: bold;
233
- }
234
- """) as interface:
235
- with gr.Row(elem_id="main-container"):
236
- # Left menu strip (1/4 width)
237
- with gr.Column(scale=1, elem_id="menu-column"):
238
- gr.Markdown("# Menu")
239
- with gr.Group(elem_classes="button-container"):
240
- docs_btn = gr.Button("API Documentation", variant="secondary", elem_classes=["green-btn"])
241
- new_chat_btn = gr.Button("New Chat", variant="primary")
242
-
243
- # Output area for button actions
244
- menu_output = gr.HTML(label="Action Result")
245
-
246
- # Right chat console (3/4 width)
247
- with gr.Column(scale=3, elem_id="chat-column"):
248
- gr.Markdown("# AI Assistant")
249
- chatbot = gr.Chatbot(height=500)
250
- msg = gr.Textbox(
251
- placeholder="Ask me anything...",
252
- show_label=False,
253
- container=False
254
- )
255
- clear = gr.Button("Clear")
256
-
257
- # Set up event handlers
258
- msg.submit(self.query_agent, [msg, chatbot], [msg, chatbot])
259
- clear.click(lambda: ([], None), outputs=[chatbot, menu_output])
260
- new_chat_btn.click(
261
- fn=lambda: ([], self.reset_conversation()),
262
- inputs=[],
263
- outputs=[chatbot, menu_output]
264
- )
265
- docs_btn.click(
266
- fn=lambda: f"<script>window.open('/docs', '_blank')</script>",
267
- inputs=[],
268
- outputs=[menu_output]
269
- )
270
-
271
- return interface
272
-
273
- def launch(self, server_name: str = "0.0.0.0", server_port: int = 7860, **kwargs):
274
- """Launch the Gradio interface standalone (for development)"""
275
- interface = self.get_gradio_app()
276
- interface.launch(server_name=server_name, server_port=server_port, **kwargs)
277
-
278
- __all__ = ["stream_to_gradio", "AgentUI"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app.py CHANGED
@@ -4,22 +4,19 @@ from langchain.agents import AgentExecutor, create_react_agent
4
  from langchain_core.prompts import PromptTemplate
5
 
6
  from tools.visit_webpage import visit_webpage
7
- from tools.final_answer import final_answer
8
 
9
- import gradio as gr
10
- import datetime
11
- import pytz
12
  import os
13
  import yaml
14
- import json
15
  import uuid
16
- from typing import Optional, Type, Dict, Any, List
17
  from pydantic import BaseModel, Field
18
 
19
  from fastapi import FastAPI, HTTPException
20
  from fastapi.middleware.cors import CORSMiddleware
21
- from fastapi.responses import HTMLResponse, RedirectResponse
22
- import requests
 
23
 
24
  # Initialize FastAPI app
25
  app = FastAPI(title="AI Assistant", description="AI Assistant with LangChain and Gradio")
@@ -48,30 +45,22 @@ llm = HuggingFaceEndpoint(
48
  do_sample=True,
49
  return_full_text=False,
50
  model_kwargs={
51
- "stop": ["Human:", "Assistant:", "Observation:"] # Reduced to 3 stop sequences
52
  }
53
  )
54
 
55
- # Define tools
56
- @tool
57
- def get_current_time(timezone: str = "UTC") -> str:
58
- """Get the current time in the specified timezone."""
59
- try:
60
- tz = pytz.timezone(timezone)
61
- current_time = datetime.datetime.now(tz)
62
- return current_time.strftime("%Y-%m-%d %H:%M:%S %Z")
63
- except Exception as e:
64
- return f"Error: {str(e)}"
65
-
66
  # Load system prompt and template
67
  with open("prompts.yaml", 'r') as stream:
68
  prompt_templates = yaml.safe_load(stream)
69
 
70
- # Create the ReAct prompt template
71
- prompt = PromptTemplate.from_template(prompt_templates["template"])
 
 
 
72
 
73
  # Create the agent
74
- tools = [get_current_time]
75
  agent = create_react_agent(
76
  llm=llm,
77
  tools=tools,
@@ -91,26 +80,13 @@ class QueryRequest(BaseModel):
91
  context: Dict[str, Any] = Field(default_factory=dict)
92
 
93
  # API Routes
94
- @app.get("/")
95
- async def root():
96
- return HTMLResponse("<h2>Welcome! Please use the Gradio UI above.</h2>")
97
-
98
- @app.get("/docs", include_in_schema=False)
99
- async def redirect_to_docs():
100
- return RedirectResponse(url="/docs/")
101
-
102
  @app.post("/agent/query")
103
  async def query_agent(request: QueryRequest):
104
  try:
105
- # Generate thread_id if not provided
106
  thread_id = request.thread_id or str(uuid.uuid4())
107
-
108
- # Execute the agent
109
  response = agent_executor.invoke({
110
- "input": request.query,
111
- "system_prompt": prompt_templates["system_prompt"]
112
  })
113
-
114
  return {
115
  "status": "success",
116
  "thread_id": thread_id,
@@ -119,3 +95,7 @@ async def query_agent(request: QueryRequest):
119
  except Exception as e:
120
  print(e)
121
  raise HTTPException(status_code=500, detail=str(e))
 
 
 
 
 
4
  from langchain_core.prompts import PromptTemplate
5
 
6
  from tools.visit_webpage import visit_webpage
7
+ from tools.time_tools import get_current_time
8
 
 
 
 
9
  import os
10
  import yaml
 
11
  import uuid
12
+ from typing import Optional, Dict, Any
13
  from pydantic import BaseModel, Field
14
 
15
  from fastapi import FastAPI, HTTPException
16
  from fastapi.middleware.cors import CORSMiddleware
17
+ import gradio as gr
18
+
19
+ from Gradio_UI import GradioUI
20
 
21
  # Initialize FastAPI app
22
  app = FastAPI(title="AI Assistant", description="AI Assistant with LangChain and Gradio")
 
45
  do_sample=True,
46
  return_full_text=False,
47
  model_kwargs={
48
+ "stop": ["Human:", "Assistant:", "Observation:"]
49
  }
50
  )
51
 
 
 
 
 
 
 
 
 
 
 
 
52
  # Load system prompt and template
53
  with open("prompts.yaml", 'r') as stream:
54
  prompt_templates = yaml.safe_load(stream)
55
 
56
+ # Create the ReAct prompt template with system prompt as a partial variable
57
+ prompt = PromptTemplate.from_template(
58
+ template=prompt_templates["template"],
59
+ partial_variables={"system_prompt": prompt_templates["system_prompt"]}
60
+ )
61
 
62
  # Create the agent
63
+ tools = [get_current_time, visit_webpage]
64
  agent = create_react_agent(
65
  llm=llm,
66
  tools=tools,
 
80
  context: Dict[str, Any] = Field(default_factory=dict)
81
 
82
  # API Routes
 
 
 
 
 
 
 
 
83
  @app.post("/agent/query")
84
  async def query_agent(request: QueryRequest):
85
  try:
 
86
  thread_id = request.thread_id or str(uuid.uuid4())
 
 
87
  response = agent_executor.invoke({
88
+ "input": request.query
 
89
  })
 
90
  return {
91
  "status": "success",
92
  "thread_id": thread_id,
 
95
  except Exception as e:
96
  print(e)
97
  raise HTTPException(status_code=500, detail=str(e))
98
+
99
+ # Create and mount the Gradio interface
100
+ gradio_ui = GradioUI(agent=agent_executor)
101
+ app = gr.mount_gradio_app(app, gradio_ui.create_interface(), path="")
main.py CHANGED
@@ -1,24 +1,15 @@
1
  import uvicorn
2
- from app import app, agent
3
- from UI import AgentUI
4
 
5
  def main():
6
  """
7
- Run FastAPI and Gradio UI on the same port
8
  """
9
- # Configuration
10
  port = 7860
 
11
 
12
- # Create and mount Gradio app
13
- gradio_ui = AgentUI(
14
- agent=agent,
15
- api_url=f"http://localhost:{port}"
16
- )
17
- app.mount("/", gradio_ui.get_gradio_app())
18
-
19
- # Start the combined app
20
- print(f"Starting combined server on port {port}...")
21
- uvicorn.run(app, host="0.0.0.0", port=port)
22
 
23
  if __name__ == "__main__":
24
  main()
 
1
  import uvicorn
2
+ from app import app
 
3
 
4
  def main():
5
  """
6
+ Run the FastAPI server with integrated Gradio UI
7
  """
 
8
  port = 7860
9
+ host = "0.0.0.0"
10
 
11
+ print(f"Starting server on {host}:{port}...")
12
+ uvicorn.run(app, host=host, port=port)
 
 
 
 
 
 
 
 
13
 
14
  if __name__ == "__main__":
15
  main()
requirements.txt CHANGED
@@ -3,7 +3,7 @@ smolagents
3
  requests>=2.31.0
4
  fastapi>=0.104.1
5
  uvicorn[standard]>=0.24.0
6
- gradio>=4.7.1
7
  langchain>=0.1.0
8
  langchain-core>=0.1.0
9
  langchain-community>=0.0.13
 
3
  requests>=2.31.0
4
  fastapi>=0.104.1
5
  uvicorn[standard]>=0.24.0
6
+ gradio
7
  langchain>=0.1.0
8
  langchain-core>=0.1.0
9
  langchain-community>=0.0.13
tools/__init__.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ from .time_tools import get_current_time
2
+ from .visit_webpage import visit_webpage
3
+
4
+ __all__ = ["get_current_time", "visit_webpage"]
tools/final_answer.py DELETED
@@ -1,14 +0,0 @@
1
- from langchain_core.tools import tool
2
- from typing import Any
3
-
4
- @tool
5
- def final_answer(answer: Any) -> Any:
6
- """Provides a final answer to the given problem.
7
-
8
- Args:
9
- answer: The final answer to the problem
10
-
11
- Returns:
12
- The final answer unchanged
13
- """
14
- return answer
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
tools/time_tools.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain_core.tools import tool
2
+ import datetime
3
+ import pytz
4
+
5
+ @tool
6
+ def get_current_time(timezone: str = "UTC") -> str:
7
+ """Get the current time in the specified timezone."""
8
+ try:
9
+ tz = pytz.timezone(timezone)
10
+ current_time = datetime.datetime.now(tz)
11
+ return current_time.strftime("%Y-%m-%d %H:%M:%S %Z")
12
+ except Exception as e:
13
+ return f"Error: {str(e)}"