Commit
·
d42ece6
1
Parent(s):
7c99a35
added all files
Browse files- .env +3 -0
- .gitignore +2 -0
- README.md +236 -14
- agno_kb.py +83 -0
- dynamic_agent.py +76 -0
- mcp_tools.py +202 -0
- requirements.txt +0 -0
- session_files/0c994316/0c994316-4985-4588-9557-3425ede97b70_Updated_Resume_VT.pdf +0 -0
.env
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
NEBIUS_API_KEY = "eyJhbGciOiJIUzI1NiIsImtpZCI6IlV6SXJWd1h0dnprLVRvdzlLZWstc0M1akptWXBvX1VaVkxUZlpnMDRlOFUiLCJ0eXAiOiJKV1QifQ.eyJzdWIiOiJnb29nbGUtb2F1dGgyfDExMDgzMjI4NDU1OTcyMzc4OTEyMyIsInNjb3BlIjoib3BlbmlkIG9mZmxpbmVfYWNjZXNzIiwiaXNzIjoiYXBpX2tleV9pc3N1ZXIiLCJhdWQiOlsiaHR0cHM6Ly9uZWJpdXMtaW5mZXJlbmNlLmV1LmF1dGgwLmNvbS9hcGkvdjIvIl0sImV4cCI6MTkwNjgyMzA1OCwidXVpZCI6IjNmOTliMDMxLWRkNjUtNGYyMS1iZDE0LWViMTQyOTU0Nzg1MCIsIm5hbWUiOiJoYWNrYXRob24iLCJleHBpcmVzX2F0IjoiMjAzMC0wNi0wNFQxNzowNDoxOCswMDAwIn0.mJ1D7AFDdpRnsPDTk14xR0KSP_ND2cUA8DUuR3GevEk"
|
2 |
+
QDRANT_API_KEY = "EvKLIIodeepwz9P8WGsIAGnYgPVKmoIce9oaoxT65lA9G9MCa6keyQ"
|
3 |
+
QDRANT_URL = "https://2d9a7822-188b-4f81-ae14-b1c0fd4fbc6f.us-east4-0.gcp.cloud.qdrant.io:6333/"
|
.gitignore
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
hackathon_venv
|
2 |
+
*__pycache__
|
README.md
CHANGED
@@ -1,14 +1,236 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# **NoCoMind - Dynamic & Customizable AI Agents**
|
2 |
+
|
3 |
+
Welcome to the **NoCoMind - Dynamic & Customizable AI Agents** – a **no-code platform** designed
|
4 |
+
to help you create **smart, domain-adaptable AI agents** effortlessly. Whether you're in
|
5 |
+
**e-commerce (e.g. Flipkart)**, **HR**, **restaurant services**, or **enterprise analytics**, this tool
|
6 |
+
empowers you to build agents that understand and act on both **structured and unstructured data**.
|
7 |
+
|
8 |
+
## **How Our Product Covers the Hackathon Tracks**
|
9 |
+
|
10 |
+
### 🔧 **Track 1: MCP Tool / Server**
|
11 |
+
Our product seamlessly incorporates **custom-built MCP tools** — including a **SQL Query Engine**,
|
12 |
+
**Python Code Generator**, and **Data Visualization Tool** — within the **Gradio app**, enhancing
|
13 |
+
**LLM functionality** by enabling users to perform domain-specific tasks effortlessly through a
|
14 |
+
**no-code interface**.
|
15 |
+
|
16 |
+
### 🧩 **Track 2: Custom Components for Agents**
|
17 |
+
We utilize and customize **Gradio components** to create an intuitive **multi-step agent configuration flow**,
|
18 |
+
including **knowledge base upload pop-ups**, **dynamic tool toggles**, and **immutable agent status indicators**,
|
19 |
+
enhancing user experience and agent customization. Additionally, we’ve added customized **cloud deployment capabilities**
|
20 |
+
that allow users to deploy their configured agents directly to platforms like **GCP**, **AWS**, **Azure**, and **Gradio Cloud**,
|
21 |
+
enabling a seamless transition from **no-code setup** to **production-ready deployment** — all within the same interface.
|
22 |
+
|
23 |
+
|
24 |
+
### 🤖 **Track 3: Agentic Demo Showcase**
|
25 |
+
The core of our product is a **dynamic AI agent** that combines **structured and unstructured data
|
26 |
+
reasoning**, **tool orchestration**, and **real-time visualization generation**, showcasing how
|
27 |
+
**conversational agents** can deliver **complex insights** across multiple industries with minimal
|
28 |
+
setup.
|
29 |
+
|
30 |
+
## 🚀 **Key Features**
|
31 |
+
|
32 |
+
- 🧩 **Multi-Domain Support**
|
33 |
+
Build agents tailored for any industry — from **HR** and **e-commerce** to **food services** and
|
34 |
+
**data analytics**.
|
35 |
+
|
36 |
+
- 🧠 **Structured + Unstructured Data Handling**
|
37 |
+
Your agent understands **databases** and **documents (PDFs, DOCs, etc.)** alike.
|
38 |
+
|
39 |
+
- 🔒 **Secure Cloud Storage**
|
40 |
+
Structured data stored with **encryption** and **access control**.
|
41 |
+
|
42 |
+
- 📄 **Unstructured Data Upload**
|
43 |
+
Upload documents into a **searchable knowledge base** (e.g. company policies, product catalogs).
|
44 |
+
|
45 |
+
- 🛠 **Integrated Tools**
|
46 |
+
- **SQL Query Engine**
|
47 |
+
- **Python Code Generator**
|
48 |
+
- **Data Visualization Tool**
|
49 |
+
|
50 |
+
- 🎨 **No-Code Chat Agent Builder**
|
51 |
+
Intuitive UI for configuring agents **without any code**.
|
52 |
+
|
53 |
+
- 📊 **Smart Visual Insights**
|
54 |
+
Get **instant charts and graphs** generated from your structured data — just ask!
|
55 |
+
|
56 |
+
- ☁️ **Cloud Deployment Ready**
|
57 |
+
Deploy customized agents to platforms like **GCP, AWS, Azure, and Gradio Cloud** demonstrating real-world deployment workflows for agent-based solutions
|
58 |
+
|
59 |
+
|
60 |
+
## 🖥 **Interface Overview**
|
61 |
+
|
62 |
+
### 1. 📚 **Add Knowledge Base**
|
63 |
+
- Enter a **field name** (e.g. HR Policies, Menu Data)
|
64 |
+
- Upload files (**PDF, DOCX, TXT**, etc.)
|
65 |
+
- Hit **Save** to store files
|
66 |
+
- ✅ **Status updates**: `"Knowledge Base Saved"`
|
67 |
+
- ❌ Use **Close** to return to main screen
|
68 |
+
|
69 |
+
### 2. 🛠 **Tools Configuration**
|
70 |
+
- Shows a ✅ message: `"MCP Tools Integrated"`
|
71 |
+
- Displays integrated tools:
|
72 |
+
- **SQL Engine**
|
73 |
+
- **Python Code Generator**
|
74 |
+
- **Visualization Tool**
|
75 |
+
- 🔐 **Show/Hide Database toggle**:
|
76 |
+
- View **structured cloud data** safely
|
77 |
+
- **Privacy-first approach**
|
78 |
+
|
79 |
+
### 3. 🤖 **Add Agent Configuration**
|
80 |
+
- Set:
|
81 |
+
- **Agent Name**
|
82 |
+
- **Description**
|
83 |
+
- **Instructions**
|
84 |
+
- Tools: **MCP Tools** are already pre-integrated
|
85 |
+
- ☑️ Enable **Knowledge Base usage**
|
86 |
+
- 💾 Hit **Save**
|
87 |
+
- ❌ Click **Close** to go back
|
88 |
+
|
89 |
+
### 4. ☁️ **Cloud Configuration**
|
90 |
+
- Set:
|
91 |
+
- **GCP**
|
92 |
+
- **AWS**
|
93 |
+
- **AZURE**
|
94 |
+
- **Gradio Cloud**
|
95 |
+
- **Confirm**
|
96 |
+
- **Close**
|
97 |
+
|
98 |
+
## ✅ **Status Overview**
|
99 |
+
Once each section is configured, a **status card** appears:
|
100 |
+
- 🟢 **Knowledge Base**: Added
|
101 |
+
- 🟢 **Agent**: Added
|
102 |
+
- ✅ **Immutable check-marked status indicators**
|
103 |
+
*(once configured, status can't be reversed)*
|
104 |
+
|
105 |
+
## 💬 **Chat Interface**
|
106 |
+
When setup is complete:
|
107 |
+
- Enter your **query**
|
108 |
+
- Click **Send**
|
109 |
+
- Agent dynamically:
|
110 |
+
- Queries **structured DB or KB**
|
111 |
+
- Runs **Python logic** if needed
|
112 |
+
- Responds with:
|
113 |
+
- **SQL results**
|
114 |
+
- **Python-generated outputs**
|
115 |
+
- **Smart visualizations**
|
116 |
+
|
117 |
+
## 🧠 **Example Use Cases**
|
118 |
+
|
119 |
+
### 🛍 **E-Commerce & Retail** (e.g., Flipkart, Amazon)
|
120 |
+
1. **Product Insights Agent**
|
121 |
+
“Show me the top-selling categories this month.”
|
122 |
+
2. **Inventory Forecasting Bot**
|
123 |
+
“What items are likely to go out of stock next week?”
|
124 |
+
3. **Returns & Complaints Analyzer**
|
125 |
+
“What’s the most common reason for product returns?”
|
126 |
+
4. **Personalized Shopping Assistant**
|
127 |
+
Recommends products based on past user behavior and weather.
|
128 |
+
5. **Competitor Analysis Agent**
|
129 |
+
Extract and summarize info from scraped web pages or uploaded PDFs of competitor catalogs.
|
130 |
+
|
131 |
+
### 🔶 **Human Resources (HR)**
|
132 |
+
6. **Policy Query Agent**
|
133 |
+
“How many casual leaves do I have?” (PDF-based KB)
|
134 |
+
7. **Salary Breakdown Explainer**
|
135 |
+
“Explain my payslip — what is HRA?”
|
136 |
+
8. **Candidate Screening Bot**
|
137 |
+
Upload resumes → Ask: “Show top 5 candidates with Python + AWS experience.”
|
138 |
+
9. **Onboarding Assistant**
|
139 |
+
Automate Q&A for new hires using company manuals.
|
140 |
+
10. **Feedback Analyzer**
|
141 |
+
“Summarize employee feedback from last quarter.”
|
142 |
+
|
143 |
+
### 🟢 **Restaurants & Food Services**
|
144 |
+
11. **Weather-Aware Menu Bot**
|
145 |
+
“Suggest dishes to highlight today based on rainy weather.”
|
146 |
+
12. **Inventory Assistant**
|
147 |
+
“Which ingredients are below reorder level?”
|
148 |
+
13. **Customer Review Analyzer**
|
149 |
+
“Summarize common complaints from Google reviews.”
|
150 |
+
14. **Reservation Agent**
|
151 |
+
Pulls booking data and recommends optimal seating time-slots.
|
152 |
+
15. **Kitchen Prep Forecaster**
|
153 |
+
Predict high-demand dishes for the weekend.
|
154 |
+
|
155 |
+
### 🟣 **Healthcare & Hospitals**
|
156 |
+
16. **Patient History Summarizer**
|
157 |
+
Upload reports → “Summarize patient’s last 3 visits.”
|
158 |
+
17. **Drug Info Agent**
|
159 |
+
“What are side effects of Drug X?” (PDF KB from medical manuals)
|
160 |
+
18. **Doctor Recommendation Tool**
|
161 |
+
“Find a specialist for diabetes + high BP.”
|
162 |
+
19. **Appointment Optimizer**
|
163 |
+
“Show me time slots with the lowest patient load.”
|
164 |
+
20. **Medical Research Assistant**
|
165 |
+
Upload research papers → Ask for summaries, key points.
|
166 |
+
|
167 |
+
### 🟡 **Finance & Banking**
|
168 |
+
21. **Policy Lookup Bot**
|
169 |
+
“What’s the maturity period for Investment Plan Y?” (PDF product brochure)
|
170 |
+
22. **Loan Eligibility Analyzer**
|
171 |
+
“Check if applicant A qualifies for a business loan.”
|
172 |
+
23. **Expense Trend Analyzer**
|
173 |
+
“What are my top 3 spending categories over the last 6 months?”
|
174 |
+
24. **Financial Document Reader**
|
175 |
+
Ask questions over balance sheets, tax PDFs.
|
176 |
+
25. **Credit Risk Agent**
|
177 |
+
“Analyze customer’s credit behavior and flag high-risk cases.”
|
178 |
+
|
179 |
+
### 🧠 **Education & Training**
|
180 |
+
26. **Syllabus Assistant**
|
181 |
+
Upload curriculum → Ask: “What topics are covered in Unit 3?”
|
182 |
+
27. **Mock Interview Bot**
|
183 |
+
Trains candidates using context-aware question-answering.
|
184 |
+
28. **Course Recommendation Engine**
|
185 |
+
Suggests courses based on student interests, uploaded transcript.
|
186 |
+
29. **Quiz Generator**
|
187 |
+
Upload textbook → Generate quiz questions automatically.
|
188 |
+
30. **Learning Progress Tracker**
|
189 |
+
Summarizes performance trends from structured grade data.
|
190 |
+
|
191 |
+
### ⚙️ **Internal Business Operations**
|
192 |
+
31. **Meeting Notes Agent**
|
193 |
+
Upload call transcripts → “Give me action items discussed.”
|
194 |
+
32. **Sales KPI Tracker**
|
195 |
+
“Which region has the highest revenue drop?”
|
196 |
+
33. **Compliance Checker**
|
197 |
+
Upload regulatory documents → “Highlight missing compliance clauses.”
|
198 |
+
34. **SLA Monitoring Agent**
|
199 |
+
“Which clients missed SLA thresholds last quarter?”
|
200 |
+
35. **Document Validator**
|
201 |
+
Cross-check uploaded contracts for specific clauses.
|
202 |
+
|
203 |
+
### 🧩 **Creative & Emerging Use Cases**
|
204 |
+
36. **Event Planning Assistant**
|
205 |
+
Suggest vendors based on past event reports + current preferences.
|
206 |
+
37. **Smart Travel Concierge**
|
207 |
+
Combine weather + preference + KB PDFs → Personalized itineraries.
|
208 |
+
38. **Real Estate Bot**
|
209 |
+
“Compare 3BHK listings by price per square foot in Mumbai.”
|
210 |
+
39. **Legal Document Explainer**
|
211 |
+
“Summarize this rental agreement in simple terms.”
|
212 |
+
40. **Multilingual Assistant**
|
213 |
+
Upload a document in one language, get responses in another.
|
214 |
+
|
215 |
+
## 🛡 **Data Security**
|
216 |
+
- ✅ **Structured data** stored securely in the cloud
|
217 |
+
- ✅ **Zero data leaks**
|
218 |
+
- ✅ Uploaded **unstructured files** are used only for in-session processing
|
219 |
+
|
220 |
+
## ✨ **Why Use This Product?**
|
221 |
+
- **No-Code Simplicity**
|
222 |
+
Build agents in minutes — no developers needed.
|
223 |
+
- **Domain Agnostic**
|
224 |
+
Works across industries and data formats.
|
225 |
+
- **Conversational UX**
|
226 |
+
Query structured or unstructured data like you're chatting with a human.
|
227 |
+
- **Integrated Intelligence**
|
228 |
+
One tool for querying, scripting, and visualizing.
|
229 |
+
|
230 |
+
## 📦 **Getting Started**
|
231 |
+
1. **Launch the platform**
|
232 |
+
2. **Upload your Knowledge Base files**
|
233 |
+
3. **Configure tools**
|
234 |
+
4. **Add your custom agent**
|
235 |
+
5. **Start chatting and unlock insights instantly!**
|
236 |
+
6. **Then deploy your AI Agent into any of the cloud services**
|
agno_kb.py
ADDED
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import json
|
3 |
+
from pathlib import Path
|
4 |
+
from dotenv import load_dotenv
|
5 |
+
from agno.embedder.openai import OpenAIEmbedder
|
6 |
+
from agno.knowledge.pdf import PDFKnowledgeBase, PDFReader
|
7 |
+
from agno.vectordb.qdrant import Qdrant
|
8 |
+
from agno.document.chunking.fixed import FixedSizeChunking
|
9 |
+
|
10 |
+
# Load environment variables
|
11 |
+
load_dotenv()
|
12 |
+
|
13 |
+
QDRANT_URL = os.getenv("QDRANT_URL")
|
14 |
+
QDRANT_API_KEY = os.getenv("QDRANT_API_KEY")
|
15 |
+
|
16 |
+
# embeddings = OpenAIEmbedder(
|
17 |
+
# id="text-embedding-3-large",
|
18 |
+
# dimensions=3072,
|
19 |
+
# api_key=os.getenv("OPENAI_API_KEY")
|
20 |
+
# )
|
21 |
+
|
22 |
+
embeddings = OpenAIEmbedder(
|
23 |
+
id="BAAI/bge-en-icl",
|
24 |
+
dimensions=4096,
|
25 |
+
api_key=os.getenv("NEBIUS_API_KEY"),
|
26 |
+
base_url="https://api.studio.nebius.com/v1/"
|
27 |
+
)
|
28 |
+
|
29 |
+
|
30 |
+
class AgnoKnowledgeBase:
|
31 |
+
def __init__(self, query: str, user_id: str, thread_id: str, agno_kb_config: dict,
|
32 |
+
chunk_size: int = 1000, num_documents: int = 6):
|
33 |
+
self.query = query
|
34 |
+
self.user_id = user_id
|
35 |
+
self.thread_id = thread_id
|
36 |
+
self.agno_kb_config = agno_kb_config
|
37 |
+
self.chunk_size = chunk_size
|
38 |
+
self.num_documents = num_documents
|
39 |
+
|
40 |
+
def setup_knowledge_base(self):
|
41 |
+
print(self.agno_kb_config)
|
42 |
+
agno_kb_config = self.agno_kb_config['knowledge_base']
|
43 |
+
input_data = agno_kb_config.get("input_data", {})
|
44 |
+
sources = input_data.get("source", [])
|
45 |
+
recreate = agno_kb_config.get("recreate", False)
|
46 |
+
collection_name = agno_kb_config.get("collection_name")
|
47 |
+
chunk_size = agno_kb_config.get("chunk_size")
|
48 |
+
overlap = agno_kb_config.get("overlap")
|
49 |
+
num_documents = agno_kb_config.get("num_documents")
|
50 |
+
chunking_strategy_type = agno_kb_config.get("chunking_strategy", "fixed")
|
51 |
+
|
52 |
+
if chunking_strategy_type == "fixed":
|
53 |
+
chunking_strategy = FixedSizeChunking(chunk_size=chunk_size, overlap=overlap)
|
54 |
+
else:
|
55 |
+
raise ValueError(f"Unsupported chunking strategy: {chunking_strategy_type}")
|
56 |
+
|
57 |
+
vector_db = Qdrant(
|
58 |
+
collection=collection_name,
|
59 |
+
embedder=embeddings,
|
60 |
+
url=QDRANT_URL,
|
61 |
+
api_key=QDRANT_API_KEY
|
62 |
+
)
|
63 |
+
|
64 |
+
for source in sources:
|
65 |
+
paths = source.get("path", [])
|
66 |
+
for path in paths:
|
67 |
+
print(f"Loading PDF into Qdrant: {path}")
|
68 |
+
knowledge_base = PDFKnowledgeBase(
|
69 |
+
path=path,
|
70 |
+
vector_db=vector_db,
|
71 |
+
reader=PDFReader(),
|
72 |
+
chunking_strategy=chunking_strategy,
|
73 |
+
num_documents=num_documents
|
74 |
+
)
|
75 |
+
knowledge_base.load(recreate=recreate)
|
76 |
+
|
77 |
+
return PDFKnowledgeBase(
|
78 |
+
path=None,
|
79 |
+
vector_db=vector_db,
|
80 |
+
reader=PDFReader(),
|
81 |
+
chunking_strategy=chunking_strategy,
|
82 |
+
num_documents=num_documents
|
83 |
+
)
|
dynamic_agent.py
ADDED
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import traceback
|
3 |
+
from dotenv import load_dotenv
|
4 |
+
from agno.agent import Agent
|
5 |
+
from agno.storage.agent.sqlite import SqliteAgentStorage
|
6 |
+
from agno.memory.agent import AgentMemory
|
7 |
+
from agno.models.nebius import Nebius
|
8 |
+
|
9 |
+
load_dotenv()
|
10 |
+
NEBIUS_API_KEY = os.getenv("NEBIUS_API_KEY")
|
11 |
+
DB_NAME = "hackathon.db"
|
12 |
+
|
13 |
+
storage = SqliteAgentStorage(table_name="hackathon_storage", db_file=DB_NAME)
|
14 |
+
memory = AgentMemory()
|
15 |
+
|
16 |
+
class AgentFactory:
|
17 |
+
def __init__(self, user_id, thread_id, agent_config: dict, knowledge_base):
|
18 |
+
self.user_id = user_id
|
19 |
+
self.thread_id = thread_id
|
20 |
+
self.agent_config = agent_config
|
21 |
+
self.knowledge_base = knowledge_base
|
22 |
+
|
23 |
+
async def routing_agent(self):
|
24 |
+
try:
|
25 |
+
routing_agent = Agent(
|
26 |
+
model=Nebius(
|
27 |
+
id="meta-llama/Meta-Llama-3.1-405B-Instruct",
|
28 |
+
temperature=0,
|
29 |
+
api_key=NEBIUS_API_KEY,
|
30 |
+
base_url="https://api.studio.nebius.com/v1/"
|
31 |
+
),
|
32 |
+
name="Routing Agent",
|
33 |
+
description="You are a helpful routing assistant. This agent is responsible for routing the user's message to the appropriate agent. Based on the question it has to provide response. If question relates to 'plot', 'chart', 'graph', 'visualize', 'visualization', 'visual','bar chart', 'line chart', 'pie chart', 'scatter plot', 'histogram', 'heatmap', 'dashboard', 'show me', 'display', 'draw', 'create chart','generate plot', 'make graph', 'data visualization', 'analytics','trends', 'comparison chart', 'infographic'. If the question relates to the visualization like above key points then respond with 'visualization' else respond 'normal'.",
|
34 |
+
instructions=[
|
35 |
+
"You should only respond with 'normal' or 'visualization'.",
|
36 |
+
"DO NOT add any delimiter between the response and the word 'normal' or 'visualization'.",
|
37 |
+
"Your response should be one word accordingly.",
|
38 |
+
],
|
39 |
+
show_tool_calls=True,
|
40 |
+
markdown=True,
|
41 |
+
debug_mode=True
|
42 |
+
)
|
43 |
+
return routing_agent
|
44 |
+
except Exception as e:
|
45 |
+
print("Error creating routing agent:", traceback.format_exc())
|
46 |
+
raise e
|
47 |
+
|
48 |
+
async def normal_and_reasoning_agent(self, tools, model_name) -> Agent:
|
49 |
+
try:
|
50 |
+
agent = Agent(
|
51 |
+
model=Nebius(
|
52 |
+
id=model_name, #meta-llama/Meta-Llama-3.1-405B-Instruct #Qwen/Qwen3-235B-A22B #Qwen/Qwen3-30B-A3B
|
53 |
+
temperature=0,
|
54 |
+
api_key=NEBIUS_API_KEY,
|
55 |
+
base_url="https://api.studio.nebius.com/v1/"
|
56 |
+
),
|
57 |
+
name=self.agent_config["name"],
|
58 |
+
description=self.agent_config["description"],
|
59 |
+
instructions=self.agent_config["instructions"],
|
60 |
+
tools=tools,
|
61 |
+
show_tool_calls=True,
|
62 |
+
markdown=True,
|
63 |
+
debug_mode=True,
|
64 |
+
knowledge=self.knowledge_base,
|
65 |
+
search_knowledge=True,
|
66 |
+
storage=storage,
|
67 |
+
memory=memory,
|
68 |
+
user_id=self.user_id,
|
69 |
+
add_history_to_messages=True,
|
70 |
+
session_id=self.thread_id,
|
71 |
+
num_history_responses=10
|
72 |
+
)
|
73 |
+
return agent
|
74 |
+
except Exception as e:
|
75 |
+
print("Error creating agent:", traceback.format_exc())
|
76 |
+
raise e
|
mcp_tools.py
ADDED
@@ -0,0 +1,202 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import sqlite3
|
3 |
+
import traceback
|
4 |
+
import os
|
5 |
+
import re
|
6 |
+
import uuid
|
7 |
+
from agno.tools import tool
|
8 |
+
import pandas as pd
|
9 |
+
from matplotlib import pyplot as plt
|
10 |
+
import seaborn as sns
|
11 |
+
import plotly.express as px
|
12 |
+
|
13 |
+
# --- DB Functions ---
|
14 |
+
def init_product_db():
|
15 |
+
conn = sqlite3.connect("flipkart_mobiles.db")
|
16 |
+
cursor = conn.cursor()
|
17 |
+
cursor.execute('''
|
18 |
+
CREATE TABLE IF NOT EXISTS mobiles (
|
19 |
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
20 |
+
brand TEXT,
|
21 |
+
color TEXT,
|
22 |
+
model TEXT,
|
23 |
+
memory TEXT,
|
24 |
+
storage TEXT,
|
25 |
+
rating REAL,
|
26 |
+
selling_price REAL,
|
27 |
+
original_price REAL
|
28 |
+
)
|
29 |
+
''')
|
30 |
+
conn.commit()
|
31 |
+
conn.close()
|
32 |
+
|
33 |
+
def read_products():
|
34 |
+
conn = sqlite3.connect("flipkart_mobiles.db")
|
35 |
+
cursor = conn.cursor()
|
36 |
+
cursor.execute("SELECT * FROM mobiles")
|
37 |
+
rows = cursor.fetchall()
|
38 |
+
conn.close()
|
39 |
+
return rows
|
40 |
+
|
41 |
+
# --- Tool Wrappers ---
|
42 |
+
DB_PATH = "flipkart_mobiles.db"
|
43 |
+
TABLE_NAME = "mobiles"
|
44 |
+
|
45 |
+
@tool(show_result=True, stop_after_tool_call=True)
|
46 |
+
def get_columns_info_from_database(columns: str = "*"):
|
47 |
+
"""
|
48 |
+
Database Schema: brand, color, model, memory, storage, rating, selling_price, original_price
|
49 |
+
Table: mobiles
|
50 |
+
|
51 |
+
Query the 'mobiles' table selecting specified columns dynamically.
|
52 |
+
|
53 |
+
Input:
|
54 |
+
- columns: a comma-separated string of column names to select, e.g. "brand, model, rating"
|
55 |
+
If "*", selects all columns.
|
56 |
+
|
57 |
+
Returns:
|
58 |
+
- Formatted string of rows with selected columns.
|
59 |
+
"""
|
60 |
+
if columns.strip() != "*":
|
61 |
+
if not re.fullmatch(r"[a-zA-Z0-9_,\s]+", columns):
|
62 |
+
return "Invalid columns format."
|
63 |
+
|
64 |
+
conn = sqlite3.connect(DB_PATH)
|
65 |
+
cursor = conn.cursor()
|
66 |
+
|
67 |
+
# Build query string dynamically
|
68 |
+
query = f"SELECT {columns} FROM {TABLE_NAME}"
|
69 |
+
|
70 |
+
try:
|
71 |
+
cursor.execute(query)
|
72 |
+
rows = cursor.fetchall()
|
73 |
+
|
74 |
+
# Get column names from cursor description
|
75 |
+
col_names = [desc[0] for desc in cursor.description]
|
76 |
+
|
77 |
+
output_lines = []
|
78 |
+
for row in rows:
|
79 |
+
row_dict = dict(zip(col_names, row))
|
80 |
+
formatted_row = ", ".join(f"{col}: {row_dict[col]}" for col in col_names)
|
81 |
+
output_lines.append(formatted_row)
|
82 |
+
|
83 |
+
return "\n".join(output_lines) if output_lines else "No rows found."
|
84 |
+
except Exception as e:
|
85 |
+
return f"Query error: {str(e)}"
|
86 |
+
finally:
|
87 |
+
conn.close()
|
88 |
+
|
89 |
+
@tool(show_result=True, stop_after_tool_call=True)
|
90 |
+
def generate_python_code(python_code: str) -> str:
|
91 |
+
"""
|
92 |
+
You are a Python data scientist. Take the table and columns information from the chat history or agent memory.
|
93 |
+
|
94 |
+
Your task is to generate a valid Python script from the following response.
|
95 |
+
This table and columns information can be in raw English or structured format from the chat history or agent memory like:
|
96 |
+
- user: task - description
|
97 |
+
- tabular strings
|
98 |
+
- JSON-like text
|
99 |
+
- general descriptive statistics
|
100 |
+
|
101 |
+
You must:
|
102 |
+
1. Convert the data into a pandas DataFrame (use variable name `df`)
|
103 |
+
2. Select an appropriate chart (bar chart, pie chart, line chart, etc.) based on the user's query
|
104 |
+
3. Use matplotlib, seaborn, or plotly to plot. Any one of it to create the chart or graph or plot
|
105 |
+
4. Save the chart using the variable `image_path` to a PNG file
|
106 |
+
5. Return only the Python code — no comments, no markdown
|
107 |
+
|
108 |
+
### Rules:
|
109 |
+
- Do not use `plt.show()` or any GUI renderer
|
110 |
+
- Use clear axis labels and title
|
111 |
+
- Save the figure using `plt.savefig(image_path)`
|
112 |
+
- `df` must be used for all data manipulations
|
113 |
+
- You must generate the full Python code block
|
114 |
+
- execute that Python code and return the path to the saved image folder.
|
115 |
+
- Create an image into the "plots" folder.
|
116 |
+
|
117 |
+
Example code:
|
118 |
+
```python
|
119 |
+
import pandas as pd
|
120 |
+
import matplotlib.pyplot as plt
|
121 |
+
|
122 |
+
data = [
|
123 |
+
{"id": 1, "name": "Alice", "task": "NLP"},
|
124 |
+
{"id": 2, "name": "Bob", "task": "Vision"},
|
125 |
+
{"id": 3, "name": "Alice", "task": "NLP"}
|
126 |
+
]
|
127 |
+
|
128 |
+
df = pd.DataFrame(data)
|
129 |
+
task_counts = df["task"].value_counts()
|
130 |
+
|
131 |
+
plt.figure(figsize=(6, 4))
|
132 |
+
task_counts.plot(kind="bar", color="skyblue")
|
133 |
+
plt.xlabel("Task")
|
134 |
+
plt.ylabel("Count")
|
135 |
+
plt.title("Task Distribution")
|
136 |
+
plt.savefig(image_path)
|
137 |
+
```
|
138 |
+
"""
|
139 |
+
return python_code
|
140 |
+
|
141 |
+
@tool(show_result=True, stop_after_tool_call=True)
|
142 |
+
def visualization_tool(python_code: str) -> str:
|
143 |
+
""" This function is for taking the python code as input from chat history or agent memory and cleaning it accordingly so that it can be executed, then executing it and returning the image path.
|
144 |
+
"""
|
145 |
+
try:
|
146 |
+
cleaned_code = re.sub(r"^```(?:python)?|```$", "", python_code.strip(), flags=re.MULTILINE)
|
147 |
+
image_path = f"plots/{uuid.uuid4().hex}.png"
|
148 |
+
os.makedirs("plots", exist_ok=True)
|
149 |
+
exec_context = {
|
150 |
+
"pd": pd,
|
151 |
+
"plt": plt,
|
152 |
+
"sns": sns,
|
153 |
+
"px": px,
|
154 |
+
"image_path": image_path
|
155 |
+
}
|
156 |
+
exec(cleaned_code, exec_context)
|
157 |
+
return image_path
|
158 |
+
except Exception:
|
159 |
+
return f"Error executing visualization code:\n{traceback.format_exc()}"
|
160 |
+
|
161 |
+
# --- Init DB ---
|
162 |
+
init_product_db()
|
163 |
+
|
164 |
+
# --- Define Toolkit ---
|
165 |
+
toolkit = [
|
166 |
+
get_columns_info_from_database,
|
167 |
+
generate_python_code,
|
168 |
+
visualization_tool
|
169 |
+
]
|
170 |
+
|
171 |
+
# --- Gradio UI ---
|
172 |
+
tabbed = gr.TabbedInterface(
|
173 |
+
interface_list=[
|
174 |
+
gr.Interface(
|
175 |
+
fn=get_columns_info_from_database.entrypoint,
|
176 |
+
inputs=[
|
177 |
+
gr.Textbox(label="Columns (comma separated, or * for all)", value="*")
|
178 |
+
],
|
179 |
+
outputs=gr.Textbox(label="Query Result"),
|
180 |
+
title="Query Products"
|
181 |
+
),
|
182 |
+
gr.Interface(
|
183 |
+
fn=generate_python_code.entrypoint,
|
184 |
+
inputs=[
|
185 |
+
gr.Textbox(label="Python code for Visualization", lines=10)
|
186 |
+
],
|
187 |
+
outputs=gr.Textbox(label="Python Code for Visualization"),
|
188 |
+
title="Python Code Generation"
|
189 |
+
),
|
190 |
+
gr.Interface(
|
191 |
+
fn=visualization_tool.entrypoint,
|
192 |
+
inputs=[
|
193 |
+
gr.Textbox(label="Visualization", lines=10)
|
194 |
+
],
|
195 |
+
outputs=gr.Textbox(label="Saved Image Path"),
|
196 |
+
title="Auto Visualization"
|
197 |
+
)
|
198 |
+
],
|
199 |
+
tab_names=["Query Products", "Python Code Generation", "Auto Visualization"]
|
200 |
+
)
|
201 |
+
|
202 |
+
tabbed.launch(mcp_server=True)
|
requirements.txt
ADDED
Binary file (530 Bytes). View file
|
|
session_files/0c994316/0c994316-4985-4588-9557-3425ede97b70_Updated_Resume_VT.pdf
ADDED
Binary file (87.7 kB). View file
|
|