Spaces:
Runtime error
Runtime error
Cline
commited on
Commit
·
0af0a55
0
Parent(s):
Initial commit
Browse files- README.md +34 -0
- agents/__pycache__/central_ai_hub_test.cpython-311-pytest-7.4.3.pyc +0 -0
- agents/__pycache__/central_ai_hub_test.cpython-39.pyc +0 -0
- agents/__pycache__/code_analysis_agent.cpython-312.pyc +0 -0
- agents/__pycache__/code_analysis_agent.cpython-39.pyc +0 -0
- agents/__pycache__/code_generation_agent.cpython-312.pyc +0 -0
- agents/__pycache__/code_generation_agent.cpython-39.pyc +0 -0
- agents/__pycache__/error_fixing_agent.cpython-312.pyc +0 -0
- agents/__pycache__/error_fixing_agent.cpython-39.pyc +0 -0
- agents/__pycache__/planning_agent.cpython-312.pyc +0 -0
- agents/__pycache__/planning_agent.cpython-39.pyc +0 -0
- agents/__pycache__/q_learning_agent.cpython-312.pyc +0 -0
- agents/__pycache__/q_learning_agent.cpython-39.pyc +0 -0
- agents/__pycache__/reasoning_agent.cpython-312.pyc +0 -0
- agents/__pycache__/reasoning_agent.cpython-39.pyc +0 -0
- agents/__pycache__/testing_agent.cpython-312.pyc +0 -0
- agents/__pycache__/testing_agent.cpython-39.pyc +0 -0
- agents/__pycache__/web_browsing_agent.cpython-312.pyc +0 -0
- agents/__pycache__/web_browsing_agent.cpython-39.pyc +0 -0
- agents/central_ai_hub_test.py +59 -0
- agents/code_analysis_agent.py +182 -0
- agents/code_generation_agent.py +47 -0
- agents/error_fixing_agent.py +49 -0
- agents/planning_agent.py +86 -0
- agents/q_learning_agent.py +72 -0
- agents/reasoning_agent.py +135 -0
- agents/setup_environment.sh +39 -0
- agents/test_runner.py +36 -0
- agents/testing_agent.py +91 -0
- agents/web_browsing_agent.py +211 -0
- app.py +47 -0
- core/__pycache__/central_ai_hub.cpython-311.pyc +0 -0
- core/__pycache__/central_ai_hub.cpython-312.pyc +0 -0
- core/__pycache__/central_ai_hub.cpython-39.pyc +0 -0
- core/central_ai_hub.py +154 -0
- core/knowledge_management_layer.py +254 -0
- logs/code_analysis_agent.log +30 -0
- requirements.txt +4 -0
- temp_analysis_script.py +11 -0
README.md
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Central AI Hub
|
2 |
+
|
3 |
+
This is a Hugging Face Space for the Central AI Hub.
|
4 |
+
|
5 |
+
## How to Run
|
6 |
+
|
7 |
+
1. Make sure you have the required dependencies installed. You can install them using:
|
8 |
+
|
9 |
+
```bash
|
10 |
+
pip install -r requirements.txt
|
11 |
+
```
|
12 |
+
2. To run the space, execute the following command:
|
13 |
+
|
14 |
+
```bash
|
15 |
+
python app.py
|
16 |
+
```
|
17 |
+
|
18 |
+
This will start the Gradio interface, which you can access in your browser.
|
19 |
+
|
20 |
+
3. You can optionally provide a local path to the Qwen2.5-14B GGUF model by setting the `LOCAL_MODEL_PATH` environment variable. For example:
|
21 |
+
|
22 |
+
```bash
|
23 |
+
LOCAL_MODEL_PATH=/path/to/your/model.gguf python app.py
|
24 |
+
```
|
25 |
+
|
26 |
+
If the `LOCAL_MODEL_PATH` environment variable is not set, the model will be downloaded from Hugging Face Hub and cached in the `.cache` directory.
|
27 |
+
|
28 |
+
## Usage
|
29 |
+
|
30 |
+
1. Select a task type from the dropdown menu.
|
31 |
+
2. Enter the task content in the text box.
|
32 |
+
3. Enter any task requirements, separated by commas.
|
33 |
+
4. Click the "Submit Task" button.
|
34 |
+
5. The task status will be displayed in the output text box.
|
agents/__pycache__/central_ai_hub_test.cpython-311-pytest-7.4.3.pyc
ADDED
Binary file (9.18 kB). View file
|
|
agents/__pycache__/central_ai_hub_test.cpython-39.pyc
ADDED
Binary file (5.2 kB). View file
|
|
agents/__pycache__/code_analysis_agent.cpython-312.pyc
ADDED
Binary file (10.9 kB). View file
|
|
agents/__pycache__/code_analysis_agent.cpython-39.pyc
ADDED
Binary file (6.5 kB). View file
|
|
agents/__pycache__/code_generation_agent.cpython-312.pyc
ADDED
Binary file (2.64 kB). View file
|
|
agents/__pycache__/code_generation_agent.cpython-39.pyc
ADDED
Binary file (1.97 kB). View file
|
|
agents/__pycache__/error_fixing_agent.cpython-312.pyc
ADDED
Binary file (2.7 kB). View file
|
|
agents/__pycache__/error_fixing_agent.cpython-39.pyc
ADDED
Binary file (1.96 kB). View file
|
|
agents/__pycache__/planning_agent.cpython-312.pyc
ADDED
Binary file (4.87 kB). View file
|
|
agents/__pycache__/planning_agent.cpython-39.pyc
ADDED
Binary file (3.58 kB). View file
|
|
agents/__pycache__/q_learning_agent.cpython-312.pyc
ADDED
Binary file (4.45 kB). View file
|
|
agents/__pycache__/q_learning_agent.cpython-39.pyc
ADDED
Binary file (3.41 kB). View file
|
|
agents/__pycache__/reasoning_agent.cpython-312.pyc
ADDED
Binary file (6.02 kB). View file
|
|
agents/__pycache__/reasoning_agent.cpython-39.pyc
ADDED
Binary file (4.23 kB). View file
|
|
agents/__pycache__/testing_agent.cpython-312.pyc
ADDED
Binary file (4.65 kB). View file
|
|
agents/__pycache__/testing_agent.cpython-39.pyc
ADDED
Binary file (3.04 kB). View file
|
|
agents/__pycache__/web_browsing_agent.cpython-312.pyc
ADDED
Binary file (12.4 kB). View file
|
|
agents/__pycache__/web_browsing_agent.cpython-39.pyc
ADDED
Binary file (7.35 kB). View file
|
|
agents/central_ai_hub_test.py
ADDED
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import sys
|
3 |
+
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..')))
|
4 |
+
import pytest
|
5 |
+
import asyncio
|
6 |
+
from core.central_ai_hub import CentralAIHub
|
7 |
+
from utils.llm_orchestrator import LLMOrchestrator
|
8 |
+
|
9 |
+
@pytest.fixture
|
10 |
+
async def hub():
|
11 |
+
"""Fixture to create a CentralAIHub instance."""
|
12 |
+
api_key = os.getenv('LLM_API_KEY')
|
13 |
+
hub = CentralAIHub(api_key)
|
14 |
+
await hub.start()
|
15 |
+
yield hub
|
16 |
+
await hub.shutdown()
|
17 |
+
|
18 |
+
@pytest.mark.asyncio
|
19 |
+
async def test_system_initialization(hub):
|
20 |
+
"""Test system initialization and LLM connection."""
|
21 |
+
assert hub.llm_orchestrator is not None
|
22 |
+
assert isinstance(hub.llm_orchestrator, LLMOrchestrator)
|
23 |
+
assert len(hub.agents) > 0
|
24 |
+
|
25 |
+
@pytest.mark.asyncio
|
26 |
+
async def test_task_delegation(hub):
|
27 |
+
"""Test task delegation functionality."""
|
28 |
+
task = {
|
29 |
+
'type': 'code_analysis',
|
30 |
+
'content': 'def hello(): print("Hello, World!")',
|
31 |
+
'requirements': ['code understanding', 'static analysis']
|
32 |
+
}
|
33 |
+
task_id = await hub.delegate_task(task)
|
34 |
+
assert task_id is not None
|
35 |
+
status = await hub.get_task_status(task_id)
|
36 |
+
assert status['status'] in ['active', 'completed']
|
37 |
+
|
38 |
+
@pytest.mark.asyncio
|
39 |
+
async def test_agent_selection(hub):
|
40 |
+
"""Test agent selection logic."""
|
41 |
+
task = {
|
42 |
+
'type': 'code_generation',
|
43 |
+
'content': 'Create a simple REST API',
|
44 |
+
'requirements': ['code generation', 'API design']
|
45 |
+
}
|
46 |
+
agent_id = await hub.select_agent(task)
|
47 |
+
assert agent_id in hub.agents
|
48 |
+
|
49 |
+
@pytest.mark.asyncio
|
50 |
+
async def test_error_handling(hub):
|
51 |
+
"""Test error handling capabilities."""
|
52 |
+
with pytest.raises(ValueError):
|
53 |
+
await hub.initialize_agent('non_existent_agent')
|
54 |
+
|
55 |
+
with pytest.raises(Exception):
|
56 |
+
await hub.delegate_task(None)
|
57 |
+
|
58 |
+
if __name__ == '__main__':
|
59 |
+
pytest.main(['-v', 'central_ai_hub_test.py'])
|
agents/code_analysis_agent.py
ADDED
@@ -0,0 +1,182 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import ast
|
2 |
+
from typing import Dict, Any, List
|
3 |
+
from loguru import logger
|
4 |
+
import os
|
5 |
+
import re
|
6 |
+
|
7 |
+
|
8 |
+
class CodeAnalysisAgent:
|
9 |
+
def __init__(self):
|
10 |
+
"""Initialize the Code Analysis Agent."""
|
11 |
+
logger.info("Initializing CodeAnalysisAgent")
|
12 |
+
self.capabilities = [
|
13 |
+
"code_analysis",
|
14 |
+
"static_analysis",
|
15 |
+
"repository_analysis",
|
16 |
+
"code_summarization",
|
17 |
+
"vulnerability_detection",
|
18 |
+
"debugging",
|
19 |
+
"bug_detection",
|
20 |
+
"code_smell_detection"
|
21 |
+
]
|
22 |
+
self.setup_logger()
|
23 |
+
|
24 |
+
async def find_bugs(self, file_path: str) -> List[str]:
|
25 |
+
"""Find potential bugs in a code file using static analysis."""
|
26 |
+
logger.info(f"Finding bugs in file {file_path}")
|
27 |
+
bugs = []
|
28 |
+
try:
|
29 |
+
with open(file_path, "r") as f:
|
30 |
+
source_code = f.read()
|
31 |
+
|
32 |
+
tree = ast.parse(source_code)
|
33 |
+
|
34 |
+
# Example: Find unclosed files
|
35 |
+
for node in ast.walk(tree):
|
36 |
+
if isinstance(node, ast.Call) and isinstance(
|
37 |
+
node.func, ast.Name) and node.func.id == 'open':
|
38 |
+
if not any(isinstance(parent, ast.With)
|
39 |
+
for parent in ast.walk(node)):
|
40 |
+
bugs.append(
|
41 |
+
f"Line {node.lineno}: Potential unclosed file")
|
42 |
+
|
43 |
+
# Add more bug detection patterns here
|
44 |
+
|
45 |
+
except Exception as e:
|
46 |
+
logger.error(f"Error finding bugs in file {file_path}: {str(e)}")
|
47 |
+
|
48 |
+
return bugs
|
49 |
+
|
50 |
+
def setup_logger(self):
|
51 |
+
"""Configure logging for the agent."""
|
52 |
+
logger.add("logs/code_analysis_agent.log", rotation="500 MB")
|
53 |
+
|
54 |
+
async def analyze_repository(self, repo_path: str) -> Dict[str, Any]:
|
55 |
+
"""Analyze a code repository."""
|
56 |
+
logger.info(f"Analyzing repository at {repo_path}")
|
57 |
+
try:
|
58 |
+
code_files = self.collect_code_files(repo_path)
|
59 |
+
analysis_results = []
|
60 |
+
|
61 |
+
for file_path in code_files:
|
62 |
+
analysis_results.append(await self.analyze_file(file_path))
|
63 |
+
|
64 |
+
summary = self.generate_repository_summary(analysis_results)
|
65 |
+
logger.info(f"Finished analyzing repository at {repo_path}")
|
66 |
+
return {
|
67 |
+
"status": "success",
|
68 |
+
"results": analysis_results,
|
69 |
+
"summary": summary
|
70 |
+
}
|
71 |
+
except Exception as e:
|
72 |
+
logger.error(f"Error analyzing repository {repo_path}: {str(e)}")
|
73 |
+
return {
|
74 |
+
"status": "error",
|
75 |
+
"message": str(e)
|
76 |
+
}
|
77 |
+
|
78 |
+
def collect_code_files(self, repo_path: str) -> List[str]:
|
79 |
+
"""Collect all code files in a repository."""
|
80 |
+
code_files = []
|
81 |
+
for root, _, files in os.walk(repo_path):
|
82 |
+
for file in files:
|
83 |
+
if file.endswith(
|
84 |
+
".py"): # Currently only supports Python files
|
85 |
+
code_files.append(os.path.join(root, file))
|
86 |
+
return code_files
|
87 |
+
|
88 |
+
async def analyze_file(self, file_path: str) -> Dict[str, Any]:
|
89 |
+
"""Analyze a single code file."""
|
90 |
+
logger.info(f"Analyzing file {file_path}")
|
91 |
+
try:
|
92 |
+
with open(file_path, "r") as f:
|
93 |
+
source_code = f.read()
|
94 |
+
|
95 |
+
# Basic static analysis using AST
|
96 |
+
tree = ast.parse(source_code)
|
97 |
+
|
98 |
+
# Extract functions and classes
|
99 |
+
functions = [node.name for node in ast.walk(
|
100 |
+
tree) if isinstance(node, ast.FunctionDef)]
|
101 |
+
classes = [node.name for node in ast.walk(
|
102 |
+
tree) if isinstance(node, ast.ClassDef)]
|
103 |
+
|
104 |
+
# Basic complexity analysis (Cyclomatic Complexity)
|
105 |
+
complexity = self.calculate_complexity(tree)
|
106 |
+
|
107 |
+
# Identify potential vulnerabilities (basic example)
|
108 |
+
vulnerabilities = self.detect_vulnerabilities(source_code)
|
109 |
+
|
110 |
+
return {
|
111 |
+
"file_path": file_path,
|
112 |
+
"functions": functions,
|
113 |
+
"classes": classes,
|
114 |
+
"complexity": complexity,
|
115 |
+
"vulnerabilities": vulnerabilities
|
116 |
+
}
|
117 |
+
except Exception as e:
|
118 |
+
logger.error(f"Error analyzing file {file_path}: {str(e)}")
|
119 |
+
return {
|
120 |
+
"file_path": file_path,
|
121 |
+
"error": str(e)
|
122 |
+
}
|
123 |
+
|
124 |
+
def calculate_complexity(self, tree: ast.AST) -> int:
|
125 |
+
"""Calculate the Cyclomatic Complexity of a code snippet."""
|
126 |
+
complexity = 1
|
127 |
+
for node in ast.walk(tree):
|
128 |
+
if isinstance(node, (ast.If, ast.While, ast.For,
|
129 |
+
ast.AsyncFor, ast.With, ast.AsyncWith, ast.Try)):
|
130 |
+
complexity += 1
|
131 |
+
return complexity
|
132 |
+
|
133 |
+
def detect_vulnerabilities(self, source_code: str) -> List[str]:
|
134 |
+
"""Detect potential vulnerabilities in the code (basic example)."""
|
135 |
+
vulnerabilities = []
|
136 |
+
|
137 |
+
# Detect SQL injection patterns
|
138 |
+
if re.search(r"SELECT \* FROM .* WHERE .*", source_code):
|
139 |
+
vulnerabilities.append("Potential SQL injection vulnerability")
|
140 |
+
|
141 |
+
# Detect hardcoded credentials
|
142 |
+
if re.search(r"password\s*=\s*['\"].*['\"]",
|
143 |
+
source_code, re.IGNORECASE):
|
144 |
+
vulnerabilities.append("Potential hardcoded credentials")
|
145 |
+
|
146 |
+
return vulnerabilities
|
147 |
+
|
148 |
+
def detect_code_smells(self, tree: ast.AST) -> List[str]:
|
149 |
+
"""Detect potential code smells in the code."""
|
150 |
+
code_smells = []
|
151 |
+
# Example: Long function
|
152 |
+
for node in ast.walk(tree):
|
153 |
+
if isinstance(node, ast.FunctionDef) and len(node.body) > 50:
|
154 |
+
code_smells.append(f"Line {node.lineno}: Long function")
|
155 |
+
# Example: Large class
|
156 |
+
for node in ast.walk(tree):
|
157 |
+
if isinstance(node, ast.ClassDef) and len(node.body) > 100:
|
158 |
+
code_smells.append(f"Line {node.lineno}: Large class")
|
159 |
+
return code_smells
|
160 |
+
|
161 |
+
def generate_repository_summary(
|
162 |
+
self, analysis_results: List[Dict[str, Any]]) -> Dict[str, Any]:
|
163 |
+
"""Generate a summary of the repository analysis."""
|
164 |
+
total_files = len(analysis_results)
|
165 |
+
total_functions = sum(len(result.get("functions", []))
|
166 |
+
for result in analysis_results)
|
167 |
+
total_classes = sum(len(result.get("classes", []))
|
168 |
+
for result in analysis_results)
|
169 |
+
average_complexity = sum(
|
170 |
+
result.get(
|
171 |
+
"complexity",
|
172 |
+
0) for result in analysis_results) / total_files if total_files > 0 else 0
|
173 |
+
total_vulnerabilities = sum(
|
174 |
+
len(result.get("vulnerabilities", [])) for result in analysis_results)
|
175 |
+
|
176 |
+
return {
|
177 |
+
"total_files": total_files,
|
178 |
+
"total_functions": total_functions,
|
179 |
+
"total_classes": total_classes,
|
180 |
+
"average_complexity": average_complexity,
|
181 |
+
"total_vulnerabilities": total_vulnerabilities
|
182 |
+
}
|
agents/code_generation_agent.py
ADDED
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Dict, Any
|
2 |
+
from loguru import logger
|
3 |
+
from utils.llm_orchestrator import LLMOrchestrator
|
4 |
+
|
5 |
+
|
6 |
+
class CodeGenerationAgent:
|
7 |
+
def __init__(self, llm_api_key: str):
|
8 |
+
"""Initialize the Code Generation Agent."""
|
9 |
+
logger.info("Initializing CodeGenerationAgent")
|
10 |
+
self.llm_orchestrator = LLMOrchestrator(llm_api_key)
|
11 |
+
self.capabilities = [
|
12 |
+
"code_generation",
|
13 |
+
"code_completion",
|
14 |
+
"code_modification",
|
15 |
+
"code_refactoring"
|
16 |
+
]
|
17 |
+
self.setup_logger()
|
18 |
+
|
19 |
+
def setup_logger(self):
|
20 |
+
"""Configure logging for the agent."""
|
21 |
+
logger.add("logs/code_generation_agent.log", rotation="500 MB")
|
22 |
+
|
23 |
+
async def generate_code(self, description: str,
|
24 |
+
language: str = "python") -> Dict[str, Any]:
|
25 |
+
"""Generate code based on a natural language description."""
|
26 |
+
logger.info(f"Generating code for description: {description}")
|
27 |
+
try:
|
28 |
+
prompt = f"""
|
29 |
+
Generate {language} code that accomplishes the following:
|
30 |
+
{description}
|
31 |
+
|
32 |
+
Provide only the code, without any introductory or explanatory text.
|
33 |
+
"""
|
34 |
+
code = await self.llm_orchestrator.generate_completion(prompt)
|
35 |
+
|
36 |
+
logger.info(f"Code generated successfully:\n{code}")
|
37 |
+
return {
|
38 |
+
"status": "success",
|
39 |
+
"code": code
|
40 |
+
}
|
41 |
+
|
42 |
+
except Exception as e:
|
43 |
+
logger.error(f"Error generating code: {str(e)}")
|
44 |
+
return {
|
45 |
+
"status": "error",
|
46 |
+
"message": str(e)
|
47 |
+
}
|
agents/error_fixing_agent.py
ADDED
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Dict, Any, List
|
2 |
+
from loguru import logger
|
3 |
+
from utils.llm_orchestrator import LLMOrchestrator
|
4 |
+
|
5 |
+
|
6 |
+
class ErrorFixingAgent:
|
7 |
+
def __init__(self, llm_api_key: str):
|
8 |
+
"""Initialize the Error Fixing Agent."""
|
9 |
+
logger.info("Initializing ErrorFixingAgent")
|
10 |
+
self.llm_orchestrator = LLMOrchestrator(llm_api_key)
|
11 |
+
self.capabilities = [
|
12 |
+
"error_fixing",
|
13 |
+
"patch_generation",
|
14 |
+
"code_repair"
|
15 |
+
]
|
16 |
+
self.setup_logger()
|
17 |
+
|
18 |
+
def setup_logger(self):
|
19 |
+
"""Configure logging for the agent."""
|
20 |
+
logger.add("logs/error_fixing_agent.log", rotation="500 MB")
|
21 |
+
|
22 |
+
async def fix_errors(
|
23 |
+
self, code: str, error_messages: List[str]) -> Dict[str, Any]:
|
24 |
+
"""Generate code patches to fix detected errors."""
|
25 |
+
logger.info(f"Attempting to fix errors: {error_messages}")
|
26 |
+
try:
|
27 |
+
prompt = f"""
|
28 |
+
Fix the following errors in the code:
|
29 |
+
{code}
|
30 |
+
|
31 |
+
Errors:
|
32 |
+
{chr(10).join(error_messages)}
|
33 |
+
|
34 |
+
Provide only the corrected code, without any introductory or explanatory text.
|
35 |
+
"""
|
36 |
+
fixed_code = await self.llm_orchestrator.generate_completion(prompt)
|
37 |
+
|
38 |
+
logger.info(f"Code with fixes generated:\n{fixed_code}")
|
39 |
+
return {
|
40 |
+
"status": "success",
|
41 |
+
"fixed_code": fixed_code
|
42 |
+
}
|
43 |
+
|
44 |
+
except Exception as e:
|
45 |
+
logger.error(f"Error fixing code: {str(e)}")
|
46 |
+
return {
|
47 |
+
"status": "error",
|
48 |
+
"message": str(e)
|
49 |
+
}
|
agents/planning_agent.py
ADDED
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Dict, Any, List
|
2 |
+
from loguru import logger
|
3 |
+
from utils.llm_orchestrator import LLMOrchestrator
|
4 |
+
|
5 |
+
|
6 |
+
class PlanningAgent:
|
7 |
+
def __init__(self, llm_api_key: str):
|
8 |
+
"""Initialize the Planning Agent."""
|
9 |
+
logger.info("Initializing PlanningAgent")
|
10 |
+
self.llm_orchestrator = LLMOrchestrator(llm_api_key)
|
11 |
+
self.capabilities = [
|
12 |
+
"task_planning",
|
13 |
+
"goal_decomposition",
|
14 |
+
"plan_refinement",
|
15 |
+
"task_prioritization"
|
16 |
+
]
|
17 |
+
self.setup_logger()
|
18 |
+
|
19 |
+
def setup_logger(self):
|
20 |
+
"""Configure logging for the agent."""
|
21 |
+
logger.add("logs/planning_agent.log", rotation="500 MB")
|
22 |
+
|
23 |
+
async def generate_plan(
|
24 |
+
self, goal: str, available_agents: List[str]) -> Dict[str, Any]:
|
25 |
+
"""Generate a task plan based on a high-level goal."""
|
26 |
+
logger.info(f"Generating plan for goal: {goal}")
|
27 |
+
try:
|
28 |
+
prompt = f"""
|
29 |
+
You are an expert planner. Generate a detailed task plan to achieve the following goal:
|
30 |
+
Goal: {goal}
|
31 |
+
|
32 |
+
Available agents: {', '.join(available_agents)}
|
33 |
+
|
34 |
+
Think step-by-step and explain your reasoning for each step.
|
35 |
+
The plan should be a list of steps, each with:
|
36 |
+
- A clear description of the task.
|
37 |
+
- The agent best suited to execute the task.
|
38 |
+
- Any necessary input or parameters for the task.
|
39 |
+
|
40 |
+
Example:
|
41 |
+
1. Task: Summarize the latest news on topic X.
|
42 |
+
Agent: web_browsing_agent
|
43 |
+
Input: topic=X
|
44 |
+
Reasoning: To get the latest news, we need to use the web_browsing_agent to search for news on topic X.
|
45 |
+
2. Task: Analyze the sentiment of the news summary.
|
46 |
+
Agent: data_analysis_agent
|
47 |
+
Input: summary from step 1
|
48 |
+
Reasoning: To analyze the sentiment, we can use the data_analysis_agent to process the summary from the previous step.
|
49 |
+
"""
|
50 |
+
plan_str = await self.llm_orchestrator.generate_completion(prompt)
|
51 |
+
plan = self.parse_plan(plan_str)
|
52 |
+
logger.info(f"Plan generated successfully: {plan}")
|
53 |
+
return {
|
54 |
+
"status": "success",
|
55 |
+
"plan": plan
|
56 |
+
}
|
57 |
+
|
58 |
+
except Exception as e:
|
59 |
+
logger.error(f"Error generating plan: {str(e)}")
|
60 |
+
return {
|
61 |
+
"status": "error",
|
62 |
+
"message": str(e)
|
63 |
+
}
|
64 |
+
|
65 |
+
def parse_plan(self, plan_str: str) -> List[Dict[str, Any]]:
|
66 |
+
"""Parse the plan generated by the LLM into a structured format."""
|
67 |
+
plan = []
|
68 |
+
steps = plan_str.strip().split("\n")
|
69 |
+
current_step = {}
|
70 |
+
|
71 |
+
for line in steps:
|
72 |
+
if line.startswith(tuple(f"{i}." for i in range(1, 10))):
|
73 |
+
if current_step:
|
74 |
+
plan.append(current_step)
|
75 |
+
current_step = {"task": line.split("Task: ")[1]}
|
76 |
+
elif "Agent: " in line:
|
77 |
+
current_step["agent"] = line.split("Agent: ")[1]
|
78 |
+
elif "Input: " in line:
|
79 |
+
current_step["input"] = line.split("Input: ")[1]
|
80 |
+
elif "Reasoning: " in line:
|
81 |
+
current_step["reasoning"] = line.split("Reasoning: ")[1]
|
82 |
+
|
83 |
+
if current_step:
|
84 |
+
plan.append(current_step)
|
85 |
+
|
86 |
+
return plan
|
agents/q_learning_agent.py
ADDED
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import random
|
3 |
+
from loguru import logger
|
4 |
+
from typing import Dict, Any, List
|
5 |
+
|
6 |
+
|
7 |
+
class QLearningAgent:
|
8 |
+
def __init__(self, learning_rate: float = 0.1,
|
9 |
+
discount_factor: float = 0.9, exploration_rate: float = 0.1):
|
10 |
+
"""Initialize the Q-Learning Agent."""
|
11 |
+
self.learning_rate = learning_rate
|
12 |
+
self.discount_factor = discount_factor
|
13 |
+
self.exploration_rate = exploration_rate
|
14 |
+
self.q_table = {} # Initialize Q-table as an empty dictionary
|
15 |
+
self.setup_logger()
|
16 |
+
|
17 |
+
def setup_logger(self):
|
18 |
+
"""Configure logging for the agent."""
|
19 |
+
logger.add("logs/q_learning_agent.log", rotation="500 MB")
|
20 |
+
|
21 |
+
def get_q_value(self, state: Dict[str, Any], action: str) -> float:
|
22 |
+
"""Get the Q-value for a given state-action pair."""
|
23 |
+
state_key = self.serialize_state(state)
|
24 |
+
if state_key not in self.q_table:
|
25 |
+
self.q_table[state_key] = {}
|
26 |
+
|
27 |
+
return self.q_table[state_key].get(
|
28 |
+
action, 0.0) # Default Q-value is 0.0
|
29 |
+
|
30 |
+
def set_q_value(self, state: Dict[str, Any], action: str, value: float):
|
31 |
+
"""Set the Q-value for a given state-action pair."""
|
32 |
+
state_key = self.serialize_state(state)
|
33 |
+
if state_key not in self.q_table:
|
34 |
+
self.q_table[state_key] = {}
|
35 |
+
self.q_table[state_key][action] = value
|
36 |
+
|
37 |
+
def choose_action(self, state: Dict[str, Any],
|
38 |
+
available_actions: List[str]) -> str:
|
39 |
+
"""Choose an action based on the current state and Q-table."""
|
40 |
+
if random.random() < self.exploration_rate:
|
41 |
+
# Explore: choose a random action
|
42 |
+
return random.choice(available_actions)
|
43 |
+
else:
|
44 |
+
# Exploit: choose the action with the highest Q-value
|
45 |
+
q_values = [self.get_q_value(state, action)
|
46 |
+
for action in available_actions]
|
47 |
+
max_q = max(q_values)
|
48 |
+
# If multiple actions have the same max Q-value, choose randomly
|
49 |
+
# among them
|
50 |
+
best_actions = [
|
51 |
+
action for action,
|
52 |
+
q in zip(
|
53 |
+
available_actions,
|
54 |
+
q_values) if q == max_q]
|
55 |
+
return random.choice(best_actions)
|
56 |
+
|
57 |
+
def update_q_table(self, state: Dict[str, Any], action: str,
|
58 |
+
reward: float, next_state: Dict[str, Any], next_actions: List[str]):
|
59 |
+
"""Update the Q-table based on the observed reward and next state."""
|
60 |
+
current_q = self.get_q_value(state, action)
|
61 |
+
max_next_q = max([self.get_q_value(next_state, next_action)
|
62 |
+
for next_action in next_actions], default=0)
|
63 |
+
new_q = current_q + self.learning_rate * \
|
64 |
+
(reward + self.discount_factor * max_next_q - current_q)
|
65 |
+
self.set_q_value(state, action, new_q)
|
66 |
+
logger.info(
|
67 |
+
f"Q-table updated for state-action pair: ({self.serialize_state(state)}, {action})")
|
68 |
+
|
69 |
+
def serialize_state(self, state: Dict[str, Any]) -> str:
|
70 |
+
"""Serialize the state into a string representation for use as a dictionary key."""
|
71 |
+
# Convert the state dictionary to a string representation
|
72 |
+
return str(state)
|
agents/reasoning_agent.py
ADDED
@@ -0,0 +1,135 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Dict, Any, List
|
2 |
+
from loguru import logger
|
3 |
+
from utils.llm_orchestrator import LLMOrchestrator
|
4 |
+
|
5 |
+
|
6 |
+
class ReasoningAgent:
|
7 |
+
def __init__(self, llm_api_key: str):
|
8 |
+
"""Initialize the Reasoning Agent."""
|
9 |
+
logger.info("Initializing ReasoningAgent")
|
10 |
+
self.llm_orchestrator = LLMOrchestrator(llm_api_key)
|
11 |
+
self.capabilities = [
|
12 |
+
"step_by_step_reasoning",
|
13 |
+
"context_management",
|
14 |
+
"agent_coordination",
|
15 |
+
"result_aggregation"
|
16 |
+
]
|
17 |
+
self.setup_logger()
|
18 |
+
|
19 |
+
def setup_logger(self):
|
20 |
+
"""Configure logging for the agent."""
|
21 |
+
logger.add("logs/reasoning_agent.log", rotation="500 MB")
|
22 |
+
|
23 |
+
async def perform_reasoning(
|
24 |
+
self, goal: str, available_agents: List[Dict[str, Any]]) -> Dict[str, Any]:
|
25 |
+
"""Perform step-by-step reasoning to achieve a goal."""
|
26 |
+
logger.info(f"Performing reasoning for goal: {goal}")
|
27 |
+
try:
|
28 |
+
context = {
|
29 |
+
"goal": goal,
|
30 |
+
"available_agents": available_agents,
|
31 |
+
"steps": []
|
32 |
+
}
|
33 |
+
|
34 |
+
# Example of a simple reasoning process with 2 steps.
|
35 |
+
# This can be made more sophisticated based on the specific needs.
|
36 |
+
for step in range(2):
|
37 |
+
prompt = self.generate_reasoning_prompt(context)
|
38 |
+
response = await self.llm_orchestrator.generate_completion(prompt)
|
39 |
+
|
40 |
+
logger.info(f"Reasoning step {step + 1}: {response}")
|
41 |
+
|
42 |
+
# Placeholder for action execution based on reasoning
|
43 |
+
action = self.extract_action(response)
|
44 |
+
|
45 |
+
if action and action["agent"] != "reasoning_agent":
|
46 |
+
# Here we simulate executing an action with another agent
|
47 |
+
# In a real scenario, this would involve calling the
|
48 |
+
# appropriate agent
|
49 |
+
action_result = await self.execute_agent_action(action, context)
|
50 |
+
context["steps"].append({
|
51 |
+
"step": step + 1,
|
52 |
+
"prompt": prompt,
|
53 |
+
"response": response,
|
54 |
+
"action": action,
|
55 |
+
"action_result": action_result
|
56 |
+
})
|
57 |
+
else:
|
58 |
+
context["steps"].append({
|
59 |
+
"step": step + 1,
|
60 |
+
"prompt": prompt,
|
61 |
+
"response": response,
|
62 |
+
"action": action
|
63 |
+
})
|
64 |
+
|
65 |
+
return {
|
66 |
+
"status": "success",
|
67 |
+
"reasoning_process": context["steps"],
|
68 |
+
"result": "Reasoning process completed." # Placeholder for final result
|
69 |
+
}
|
70 |
+
|
71 |
+
except Exception as e:
|
72 |
+
logger.error(f"Error during reasoning: {str(e)}")
|
73 |
+
return {
|
74 |
+
"status": "error",
|
75 |
+
"message": str(e)
|
76 |
+
}
|
77 |
+
|
78 |
+
def generate_reasoning_prompt(self, context: Dict[str, Any]) -> str:
|
79 |
+
"""Generate a prompt for the LLM to guide the reasoning process."""
|
80 |
+
prompt = f"""
|
81 |
+
Goal: {context['goal']}
|
82 |
+
Available Agents: {', '.join([agent['name'] for agent in context['available_agents']])}
|
83 |
+
|
84 |
+
Reasoning Steps:
|
85 |
+
"""
|
86 |
+
|
87 |
+
for step in context["steps"]:
|
88 |
+
prompt += f"- Step {step['step']}: {step['response']}\n"
|
89 |
+
if "action" in step and step["action"]:
|
90 |
+
prompt += f" Action: {step['action']}\n"
|
91 |
+
if "action_result" in step and step["action_result"]:
|
92 |
+
prompt += f" Result: {step['action_result']}\n"
|
93 |
+
|
94 |
+
prompt += "What is the next logical step to achieve the goal? Explain your reasoning."
|
95 |
+
|
96 |
+
return prompt
|
97 |
+
|
98 |
+
def extract_action(self, response: str) -> Dict[str, Any]:
|
99 |
+
"""Extract the next action to be taken based on the LLM's response."""
|
100 |
+
# Basic implementation: Assume the last line of the response contains
|
101 |
+
# the action
|
102 |
+
lines = response.strip().split("\n")
|
103 |
+
last_line = lines[-1]
|
104 |
+
if ":" in last_line:
|
105 |
+
parts = last_line.split(":")
|
106 |
+
agent = parts[0].strip()
|
107 |
+
parameters = parts[1].strip() if len(parts) > 1 else ""
|
108 |
+
return {
|
109 |
+
"agent": agent,
|
110 |
+
"parameters": parameters
|
111 |
+
}
|
112 |
+
else:
|
113 |
+
return None
|
114 |
+
|
115 |
+
async def execute_agent_action(
|
116 |
+
self, action: Dict[str, Any], context: Dict[str, Any]) -> str:
|
117 |
+
"""Simulate executing an action with another agent."""
|
118 |
+
# This is a placeholder for actual agent execution
|
119 |
+
# In a real scenario, this method would call the appropriate agent based on action["agent"]
|
120 |
+
# and pass the necessary parameters from action["parameters"]
|
121 |
+
|
122 |
+
# Find the agent in the available agents list
|
123 |
+
agent_info = next(
|
124 |
+
(agent for agent in context["available_agents"]
|
125 |
+
if agent["name"] == action["agent"]),
|
126 |
+
None)
|
127 |
+
|
128 |
+
if agent_info:
|
129 |
+
logger.info(
|
130 |
+
f"Executing action with agent: {action['agent']} with parameters: {action['parameters']}")
|
131 |
+
# Simulate an action result
|
132 |
+
return f"Result of action with {action['agent']}: Success"
|
133 |
+
else:
|
134 |
+
logger.error(f"Agent {action['agent']} not found.")
|
135 |
+
return "Error: Agent not found"
|
agents/setup_environment.sh
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
# setup_environment.sh
|
4 |
+
echo "Setting up Agentic AI Environment..."
|
5 |
+
|
6 |
+
# Create and activate conda environment
|
7 |
+
conda create -n agentic_ai python=3.9 -y
|
8 |
+
source activate agentic_ai
|
9 |
+
|
10 |
+
# Install requirements
|
11 |
+
pip install -r requirements.txt
|
12 |
+
|
13 |
+
# Create conda env directories
|
14 |
+
mkdir -p $CONDA_PREFIX/etc/conda/activate.d
|
15 |
+
mkdir -p $CONDA_PREFIX/etc/conda/deactivate.d
|
16 |
+
|
17 |
+
# Create activation script
|
18 |
+
cat > $CONDA_PREFIX/etc/conda/activate.d/env_vars.sh << EOL
|
19 |
+
#!/bin/bash
|
20 |
+
export OPENAI_API_KEY='your-openai-key-here'
|
21 |
+
export SERPAPI_KEY='your-serpapi-key-here'
|
22 |
+
# Add more API keys as needed
|
23 |
+
EOL
|
24 |
+
|
25 |
+
# Create deactivation script
|
26 |
+
cat > $CONDA_PREFIX/etc/conda/deactivate.d/env_vars.sh << EOL
|
27 |
+
#!/bin/bash
|
28 |
+
unset OPENAI_API_KEY
|
29 |
+
unset SERPAPI_KEY
|
30 |
+
# Add more unset commands for additional API keys
|
31 |
+
EOL
|
32 |
+
|
33 |
+
# Make scripts executable
|
34 |
+
chmod +x $CONDA_PREFIX/etc/conda/activate.d/env_vars.sh
|
35 |
+
chmod +x $CONDA_PREFIX/etc/conda/deactivate.d/env_vars.sh
|
36 |
+
|
37 |
+
echo "Environment setup complete!"
|
38 |
+
echo "Please edit $CONDA_PREFIX/etc/conda/activate.d/env_vars.sh to add your actual API keys"
|
39 |
+
echo "Then run: source activate agentic_ai"
|
agents/test_runner.py
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import unittest
|
2 |
+
import sys
|
3 |
+
import io
|
4 |
+
import contextlib
|
5 |
+
import os
|
6 |
+
|
7 |
+
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
8 |
+
|
9 |
+
def run_tests(test_code):
|
10 |
+
"""Execute the generated test cases."""
|
11 |
+
try:
|
12 |
+
# Redirect stdout to capture test results
|
13 |
+
with io.StringIO() as buf, contextlib.redirect_stdout(buf):
|
14 |
+
# Execute the test code
|
15 |
+
exec(test_code, globals())
|
16 |
+
|
17 |
+
# Find test suite and run it
|
18 |
+
for var_name, var_value in globals().items():
|
19 |
+
if isinstance(var_value, type) and issubclass(
|
20 |
+
var_value, unittest.TestCase):
|
21 |
+
suite = unittest.TestLoader().loadTestsFromTestCase(var_value)
|
22 |
+
unittest.TextTestRunner(stream=buf, verbosity=2).run(suite)
|
23 |
+
|
24 |
+
test_output = buf.getvalue()
|
25 |
+
|
26 |
+
print(test_output)
|
27 |
+
return 0 # Return 0 to indicate successful execution
|
28 |
+
|
29 |
+
except Exception as e:
|
30 |
+
print(f"Error executing tests: {str(e)}")
|
31 |
+
return 1 # Return 1 to indicate an error
|
32 |
+
|
33 |
+
if __name__ == "__main__":
|
34 |
+
test_code = sys.stdin.read()
|
35 |
+
exit_code = run_tests(test_code)
|
36 |
+
sys.exit(exit_code)
|
agents/testing_agent.py
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Dict, Any, List
|
2 |
+
from utils.llm_orchestrator import LLMOrchestrator
|
3 |
+
from loguru import logger
|
4 |
+
from utils import llm_orchestrator
|
5 |
+
import unittest
|
6 |
+
import io
|
7 |
+
import contextlib
|
8 |
+
import asyncio
|
9 |
+
import sys
|
10 |
+
|
11 |
+
class TestingAgent:
|
12 |
+
def __init__(self, llm_orchestrator: LLMOrchestrator):
|
13 |
+
"""Initialize the Testing Agent."""
|
14 |
+
logger.info("Initializing TestingAgent")
|
15 |
+
self.llm_orchestrator = llm_orchestrator
|
16 |
+
self.capabilities = [
|
17 |
+
"test_generation",
|
18 |
+
"test_execution",
|
19 |
+
"test_analysis"
|
20 |
+
]
|
21 |
+
self.setup_logger()
|
22 |
+
|
23 |
+
def setup_logger(self):
|
24 |
+
"""Configure logging for the agent."""
|
25 |
+
logger.add("logs/testing_agent.log", rotation="500 MB")
|
26 |
+
|
27 |
+
async def generate_tests(
|
28 |
+
self, code: str, focus_areas: List[str] = None) -> Dict[str, Any]:
|
29 |
+
"""Generate test cases for the given code."""
|
30 |
+
logger.info(f"Generating tests for code:\n{code}")
|
31 |
+
try:
|
32 |
+
prompt = f"""
|
33 |
+
Generate test cases for the following code:
|
34 |
+
```python
|
35 |
+
{code}
|
36 |
+
```
|
37 |
+
|
38 |
+
Focus on the following areas:
|
39 |
+
{', '.join(focus_areas) if focus_areas else 'General functionality'}
|
40 |
+
|
41 |
+
Provide the test cases in a format compatible with Python's unittest framework.
|
42 |
+
"""
|
43 |
+
test_code = await self.llm_orchestrator.generate_completion(prompt)
|
44 |
+
|
45 |
+
logger.info(f"Test cases generated:\n{test_code}")
|
46 |
+
return {
|
47 |
+
"status": "success",
|
48 |
+
"test_code": test_code
|
49 |
+
}
|
50 |
+
|
51 |
+
except Exception as e:
|
52 |
+
logger.error(f"Error generating tests: {str(e)}")
|
53 |
+
return {
|
54 |
+
"status": "error",
|
55 |
+
"message": str(e)
|
56 |
+
}
|
57 |
+
|
58 |
+
async def execute_tests(self, test_code: str) -> Dict[str, Any]:
|
59 |
+
"""Execute the generated test cases in a separate process."""
|
60 |
+
logger.info(f"Executing tests in a separate process:\n{test_code}")
|
61 |
+
try:
|
62 |
+
process = await asyncio.create_subprocess_exec(
|
63 |
+
sys.executable, # Use the same Python interpreter
|
64 |
+
"agents/test_runner.py",
|
65 |
+
stdin=asyncio.subprocess.PIPE,
|
66 |
+
stdout=asyncio.subprocess.PIPE,
|
67 |
+
stderr=asyncio.subprocess.PIPE,
|
68 |
+
)
|
69 |
+
|
70 |
+
stdout, stderr = await process.communicate(input=test_code.encode())
|
71 |
+
test_output = stdout.decode()
|
72 |
+
error_output = stderr.decode()
|
73 |
+
|
74 |
+
if process.returncode == 0:
|
75 |
+
logger.info(f"Test execution completed:\n{test_output}")
|
76 |
+
return {
|
77 |
+
"status": "success",
|
78 |
+
"results": test_output
|
79 |
+
}
|
80 |
+
else:
|
81 |
+
logger.error(f"Error executing tests:\n{error_output}")
|
82 |
+
return {
|
83 |
+
"status": "error",
|
84 |
+
"message": error_output
|
85 |
+
}
|
86 |
+
except Exception as e:
|
87 |
+
logger.error(f"Error executing tests: {str(e)}")
|
88 |
+
return {
|
89 |
+
"status": "error",
|
90 |
+
"message": str(e)
|
91 |
+
}
|
agents/web_browsing_agent.py
ADDED
@@ -0,0 +1,211 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import aiohttp
|
2 |
+
from bs4 import BeautifulSoup
|
3 |
+
from typing import Dict, Any, List
|
4 |
+
from loguru import logger
|
5 |
+
from utils.llm_orchestrator import LLMOrchestrator
|
6 |
+
import asyncio
|
7 |
+
from urllib.parse import urljoin, urlparse
|
8 |
+
|
9 |
+
|
10 |
+
class WebBrowsingAgent:
|
11 |
+
def __init__(self, llm_api_key: str):
|
12 |
+
"""Initialize the Web Browsing Agent."""
|
13 |
+
logger.info("Initializing WebBrowsingAgent")
|
14 |
+
self.llm_orchestrator = LLMOrchestrator(llm_api_key)
|
15 |
+
self.session = None
|
16 |
+
self.setup_logger()
|
17 |
+
self.visited_urls = set()
|
18 |
+
self.headers = {
|
19 |
+
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
|
20 |
+
}
|
21 |
+
self.capabilities = [
|
22 |
+
"web_browsing",
|
23 |
+
"data_collection",
|
24 |
+
"content_processing",
|
25 |
+
"information_extraction",
|
26 |
+
"link_crawling"
|
27 |
+
]
|
28 |
+
|
29 |
+
def setup_logger(self):
|
30 |
+
"""Configure logging for the agent."""
|
31 |
+
logger.add("logs/web_browsing_agent.log", rotation="500 MB")
|
32 |
+
|
33 |
+
async def initialize(self):
|
34 |
+
"""Initialize the aiohttp session."""
|
35 |
+
logger.info("Initializing aiohttp session")
|
36 |
+
if not self.session:
|
37 |
+
self.session = aiohttp.ClientSession(headers=self.headers)
|
38 |
+
|
39 |
+
async def execute(self, task: Dict[str, Any]) -> Dict[str, Any]:
|
40 |
+
"""Execute a web browsing task."""
|
41 |
+
logger.info(f"Executing task: {task}")
|
42 |
+
await self.initialize()
|
43 |
+
|
44 |
+
if 'url' not in task:
|
45 |
+
logger.error("URL not provided in task")
|
46 |
+
raise ValueError("URL not provided in task")
|
47 |
+
|
48 |
+
try:
|
49 |
+
content = await self.collect_data(task['url'])
|
50 |
+
processed_data = await self.process_content(content, task)
|
51 |
+
logger.info(f"Successfully executed task: {task}")
|
52 |
+
return {
|
53 |
+
'status': 'success',
|
54 |
+
'data': processed_data,
|
55 |
+
'url': task['url']
|
56 |
+
}
|
57 |
+
except Exception as e:
|
58 |
+
logger.error(f"Error executing task: {str(e)}")
|
59 |
+
return {
|
60 |
+
'status': 'error',
|
61 |
+
'error': str(e),
|
62 |
+
'url': task['url']
|
63 |
+
}
|
64 |
+
|
65 |
+
async def collect_data(self, url: str, retries: int = 3,
|
66 |
+
delay: int = 1) -> Dict[str, Any]:
|
67 |
+
"""Collect data from a URL with error handling and retries."""
|
68 |
+
for attempt in range(retries):
|
69 |
+
try:
|
70 |
+
async with self.session.get(url) as response:
|
71 |
+
if response.status == 200:
|
72 |
+
html = await response.text()
|
73 |
+
soup = BeautifulSoup(html, 'html.parser')
|
74 |
+
|
75 |
+
# Extract various types of content
|
76 |
+
text_content = soup.get_text(separator=' ', strip=True)
|
77 |
+
links = [
|
78 |
+
link.get('href') for link in soup.find_all(
|
79 |
+
'a', href=True)]
|
80 |
+
images = [
|
81 |
+
img.get('src') for img in soup.find_all(
|
82 |
+
'img', src=True)]
|
83 |
+
|
84 |
+
# Process links to get absolute URLs
|
85 |
+
processed_links = [urljoin(url, link)
|
86 |
+
for link in links]
|
87 |
+
|
88 |
+
logger.info(f"Successfully collected data from {url}")
|
89 |
+
return {
|
90 |
+
'url': url,
|
91 |
+
'text_content': text_content,
|
92 |
+
'links': processed_links,
|
93 |
+
'images': images,
|
94 |
+
'status_code': response.status,
|
95 |
+
'headers': dict(response.headers)
|
96 |
+
}
|
97 |
+
else:
|
98 |
+
logger.error(
|
99 |
+
f"HTTP {response.status}: Failed to fetch {url} on attempt {attempt + 1}")
|
100 |
+
if attempt < retries - 1:
|
101 |
+
# Exponential backoff
|
102 |
+
await asyncio.sleep(delay * (2 ** attempt))
|
103 |
+
else:
|
104 |
+
raise Exception(
|
105 |
+
f"HTTP {response.status}: Failed to fetch {url} after multiple retries")
|
106 |
+
except aiohttp.ClientError as e:
|
107 |
+
logger.error(
|
108 |
+
f"Network error on attempt {attempt + 1} for {url}: {str(e)}")
|
109 |
+
if attempt < retries - 1:
|
110 |
+
# Exponential backoff
|
111 |
+
await asyncio.sleep(delay * (2 ** attempt))
|
112 |
+
else:
|
113 |
+
raise Exception(
|
114 |
+
f"Network error: Failed to fetch {url} after multiple retries")
|
115 |
+
except aiohttp.HttpProcessingError as e:
|
116 |
+
logger.error(
|
117 |
+
f"HTTP processing error on attempt {attempt + 1} for {url}: {str(e)}")
|
118 |
+
if attempt < retries - 1:
|
119 |
+
# Exponential backoff
|
120 |
+
await asyncio.sleep(delay * (2 ** attempt))
|
121 |
+
else:
|
122 |
+
raise Exception(
|
123 |
+
f"HTTP processing error: Failed to fetch {url} after multiple retries")
|
124 |
+
except Exception as e:
|
125 |
+
logger.error(
|
126 |
+
f"Unexpected error on attempt {attempt + 1} for {url}: {str(e)}")
|
127 |
+
if attempt < retries - 1:
|
128 |
+
# Exponential backoff
|
129 |
+
await asyncio.sleep(delay * (2 ** attempt))
|
130 |
+
else:
|
131 |
+
raise Exception(
|
132 |
+
f"Unexpected error: Failed to fetch {url} after multiple retries")
|
133 |
+
|
134 |
+
async def process_content(
|
135 |
+
self, content: Dict[str, Any], task: Dict[str, Any]) -> Dict[str, Any]:
|
136 |
+
"""Process collected content using LLM."""
|
137 |
+
logger.info(f"Processing content for {content['url']}")
|
138 |
+
try:
|
139 |
+
# Generate summary of the content
|
140 |
+
summary = await self.llm_orchestrator.generate_completion(
|
141 |
+
f"Summarize the following content:\n{content['text_content'][:1000]}..."
|
142 |
+
)
|
143 |
+
|
144 |
+
# Extract key information based on task requirements
|
145 |
+
extracted_info = await self.extract_relevant_information(content, task)
|
146 |
+
|
147 |
+
logger.info(f"Successfully processed content for {content['url']}")
|
148 |
+
return {
|
149 |
+
'summary': summary,
|
150 |
+
'extracted_info': extracted_info,
|
151 |
+
'metadata': {
|
152 |
+
'url': content['url'],
|
153 |
+
'num_links': len(content['links']),
|
154 |
+
'num_images': len(content['images'])
|
155 |
+
}
|
156 |
+
}
|
157 |
+
except Exception as e:
|
158 |
+
logger.error(f"Error processing content: {str(e)}")
|
159 |
+
raise
|
160 |
+
|
161 |
+
async def extract_relevant_information(
|
162 |
+
self, content: Dict[str, Any], task: Dict[str, Any]) -> Dict[str, Any]:
|
163 |
+
"""Extract relevant information based on task requirements."""
|
164 |
+
logger.info(f"Extracting relevant information for {content['url']}")
|
165 |
+
# Use LLM to extract specific information based on task requirements
|
166 |
+
prompt = f"""
|
167 |
+
Extract relevant information from the following content based on these requirements:
|
168 |
+
Task requirements: {task.get('requirements', 'general information')}
|
169 |
+
|
170 |
+
Content:
|
171 |
+
{content['text_content'][:1500]}...
|
172 |
+
"""
|
173 |
+
|
174 |
+
extracted_info = await self.llm_orchestrator.generate_completion(prompt)
|
175 |
+
logger.info(f"Successfully extracted information for {content['url']}")
|
176 |
+
return {'extracted_information': extracted_info}
|
177 |
+
|
178 |
+
async def crawl_links(self, base_url: str,
|
179 |
+
max_depth: int = 2) -> List[Dict[str, Any]]:
|
180 |
+
"""Crawl links starting from a base URL up to a maximum depth."""
|
181 |
+
logger.info(f"Crawling links from {base_url} up to depth {max_depth}")
|
182 |
+
results = []
|
183 |
+
|
184 |
+
async def crawl(url: str, depth: int):
|
185 |
+
if depth > max_depth or url in self.visited_urls:
|
186 |
+
return
|
187 |
+
|
188 |
+
self.visited_urls.add(url)
|
189 |
+
try:
|
190 |
+
content = await self.collect_data(url)
|
191 |
+
results.append(content)
|
192 |
+
|
193 |
+
if depth < max_depth:
|
194 |
+
tasks = []
|
195 |
+
for link in content['links']:
|
196 |
+
if link not in self.visited_urls:
|
197 |
+
tasks.append(crawl(link, depth + 1))
|
198 |
+
await asyncio.gather(*tasks)
|
199 |
+
except Exception as e:
|
200 |
+
logger.error(f"Error crawling {url}: {str(e)}")
|
201 |
+
|
202 |
+
await crawl(base_url, 0)
|
203 |
+
logger.info(f"Finished crawling links from {base_url}")
|
204 |
+
return results
|
205 |
+
|
206 |
+
async def shutdown(self):
|
207 |
+
"""Cleanup resources."""
|
208 |
+
logger.info("Shutting down WebBrowsingAgent")
|
209 |
+
if self.session:
|
210 |
+
await self.session.close()
|
211 |
+
self.session = None
|
app.py
ADDED
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import asyncio
|
2 |
+
import os
|
3 |
+
import gradio as gr
|
4 |
+
from core.central_ai_hub import CentralAIHub
|
5 |
+
from loguru import logger
|
6 |
+
|
7 |
+
# Initialize the Central AI Hub
|
8 |
+
hub = None
|
9 |
+
|
10 |
+
async def initialize_hub():
|
11 |
+
global hub
|
12 |
+
if hub is None:
|
13 |
+
logger.info("Initializing Central AI Hub...")
|
14 |
+
# Check if a local model path is provided as an environment variable
|
15 |
+
model_path = os.getenv("LOCAL_MODEL_PATH")
|
16 |
+
hub = CentralAIHub(model_path=model_path)
|
17 |
+
await hub.start()
|
18 |
+
logger.info("Central AI Hub initialized.")
|
19 |
+
|
20 |
+
async def process_task(task_type, task_content, task_requirements):
|
21 |
+
await initialize_hub()
|
22 |
+
task = {
|
23 |
+
'type': task_type,
|
24 |
+
'content': task_content,
|
25 |
+
'requirements': task_requirements.split(',') if task_requirements else []
|
26 |
+
}
|
27 |
+
task_id = await hub.delegate_task(task)
|
28 |
+
status = await hub.get_task_status(task_id)
|
29 |
+
return f"Task ID: {task_id}, Status: {status['status']}, Result: {status.get('result', 'N/A')}"
|
30 |
+
|
31 |
+
if __name__ == "__main__":
|
32 |
+
with gr.Blocks() as demo:
|
33 |
+
gr.Markdown("# Central AI Hub")
|
34 |
+
with gr.Row():
|
35 |
+
task_type = gr.Dropdown(choices=['code_analysis', 'code_generation', 'error_fixing'], label="Task Type")
|
36 |
+
task_content = gr.Textbox(label="Task Content")
|
37 |
+
task_requirements = gr.Textbox(label="Task Requirements (comma separated)")
|
38 |
+
submit_button = gr.Button("Submit Task")
|
39 |
+
output_text = gr.Textbox(label="Task Status")
|
40 |
+
|
41 |
+
submit_button.click(
|
42 |
+
process_task,
|
43 |
+
inputs=[task_type, task_content, task_requirements],
|
44 |
+
outputs=[output_text]
|
45 |
+
)
|
46 |
+
|
47 |
+
demo.launch(server_name="0.0.0.0", server_port=7860)
|
core/__pycache__/central_ai_hub.cpython-311.pyc
ADDED
Binary file (8.12 kB). View file
|
|
core/__pycache__/central_ai_hub.cpython-312.pyc
ADDED
Binary file (7.87 kB). View file
|
|
core/__pycache__/central_ai_hub.cpython-39.pyc
ADDED
Binary file (10.8 kB). View file
|
|
core/central_ai_hub.py
ADDED
@@ -0,0 +1,154 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Central AI Hub for coordinating all AI agents and operations."""
|
2 |
+
import os
|
3 |
+
import uuid
|
4 |
+
import asyncio
|
5 |
+
from loguru import logger
|
6 |
+
from utils.llm_orchestrator import LLMOrchestrator
|
7 |
+
from ctransformers import AutoModelForCausalLM
|
8 |
+
import torch
|
9 |
+
from huggingface_hub import hf_hub_download
|
10 |
+
|
11 |
+
class CentralAIHub:
|
12 |
+
def __init__(self, api_key=None, model_path=None):
|
13 |
+
"""Initialize the Central AI Hub."""
|
14 |
+
self.api_key = api_key
|
15 |
+
self.model_path = model_path
|
16 |
+
self.cache_dir = os.path.join(os.getcwd(), ".cache")
|
17 |
+
os.makedirs(self.cache_dir, exist_ok=True)
|
18 |
+
self.llm = None
|
19 |
+
self.llm_orchestrator = None
|
20 |
+
self.agents = {
|
21 |
+
'code_analysis': 'CodeAnalysisAgent',
|
22 |
+
'code_generation': 'CodeGenerationAgent',
|
23 |
+
'error_fixing': 'ErrorFixingAgent'
|
24 |
+
}
|
25 |
+
self.tasks = {}
|
26 |
+
self.active_agents = {}
|
27 |
+
self._initialized = False
|
28 |
+
self.max_retries = 3
|
29 |
+
self.retry_delay = 2
|
30 |
+
|
31 |
+
async def _initialize_llm_client(self):
|
32 |
+
"""Initialize LLM client with retry mechanism and proper model selection."""
|
33 |
+
try:
|
34 |
+
if self.model_path:
|
35 |
+
# Load local model
|
36 |
+
logger.info(f"Loading local model from {self.model_path}")
|
37 |
+
self.llm = AutoModelForCausalLM.from_pretrained(
|
38 |
+
self.model_path,
|
39 |
+
model_type="qwen",
|
40 |
+
cache_dir=self.cache_dir,
|
41 |
+
local_files_only=True
|
42 |
+
)
|
43 |
+
logger.info(f"Local model loaded successfully")
|
44 |
+
else:
|
45 |
+
# Download model from Hugging Face Hub
|
46 |
+
model_name = "Qwen/Qwen2.5-14B-Instruct-GGUF"
|
47 |
+
model_filename = "Qwen2.5-14B_Uncensored_Instruct-Q8_0.gguf"
|
48 |
+
cached_model_path = os.path.join(self.cache_dir, model_filename)
|
49 |
+
if not os.path.exists(cached_model_path):
|
50 |
+
logger.info(f"Downloading model {model_filename} from Hugging Face Hub")
|
51 |
+
hf_hub_download(
|
52 |
+
repo_id=model_name,
|
53 |
+
filename=model_filename,
|
54 |
+
cache_dir=self.cache_dir,
|
55 |
+
local_files_only=False
|
56 |
+
)
|
57 |
+
logger.info(f"Model downloaded to {cached_model_path}")
|
58 |
+
else:
|
59 |
+
logger.info(f"Using cached model at {cached_model_path}")
|
60 |
+
self.llm = AutoModelForCausalLM.from_pretrained(
|
61 |
+
cached_model_path,
|
62 |
+
model_type="qwen",
|
63 |
+
local_files_only=True
|
64 |
+
)
|
65 |
+
logger.info(f"Model loaded successfully")
|
66 |
+
self.llm_orchestrator = LLMOrchestrator(self.llm)
|
67 |
+
return True
|
68 |
+
except Exception as e:
|
69 |
+
logger.error(f"Failed to initialize LLM client: {e}")
|
70 |
+
return False
|
71 |
+
|
72 |
+
async def start(self):
|
73 |
+
"""Start the Central AI Hub and initialize agents only after successful LLM connection."""
|
74 |
+
|
75 |
+
if self._initialized:
|
76 |
+
return
|
77 |
+
|
78 |
+
logger.info("Starting Central AI Hub...")
|
79 |
+
|
80 |
+
if not await self._initialize_llm_client(): # Initialize LLM client first
|
81 |
+
raise Exception("Failed to initialize LLM client.")
|
82 |
+
|
83 |
+
for agent_type, agent_class in self.agents.items():
|
84 |
+
try:
|
85 |
+
await self.initialize_agent(agent_class)
|
86 |
+
logger.info(f"Initialized {agent_class}")
|
87 |
+
except Exception as e:
|
88 |
+
logger.error(f"Failed to initialize agent {agent_class}: {e}")
|
89 |
+
raise # Re-raise the exception to halt the startup
|
90 |
+
|
91 |
+
self._initialized = True
|
92 |
+
logger.info("Central AI Hub initialization complete.")
|
93 |
+
|
94 |
+
async def delegate_task(self, task):
|
95 |
+
"""Delegate a task to appropriate agent."""
|
96 |
+
if not task:
|
97 |
+
raise Exception("Task cannot be None")
|
98 |
+
|
99 |
+
task_id = str(uuid.uuid4())
|
100 |
+
agent_type = await self.select_agent(task)
|
101 |
+
|
102 |
+
if not agent_type:
|
103 |
+
raise Exception(f"No suitable agent found for task type: {task['type']}")
|
104 |
+
|
105 |
+
self.tasks[task_id] = {
|
106 |
+
'status': 'active',
|
107 |
+
'task': task,
|
108 |
+
'agent': agent_type,
|
109 |
+
'result': None
|
110 |
+
}
|
111 |
+
|
112 |
+
# Process task asynchronously
|
113 |
+
asyncio.create_task(self._process_task(task_id))
|
114 |
+
|
115 |
+
return task_id
|
116 |
+
|
117 |
+
async def _process_task(self, task_id):
|
118 |
+
"""Process a task asynchronously."""
|
119 |
+
task_info = self.tasks[task_id]
|
120 |
+
try:
|
121 |
+
# Simulate task processing
|
122 |
+
await asyncio.sleep(2) # Simulated work
|
123 |
+
task_info['status'] = 'completed'
|
124 |
+
task_info['result'] = "Task processed successfully"
|
125 |
+
logger.info(f"Task {task_id} completed")
|
126 |
+
except Exception as e:
|
127 |
+
task_info['status'] = 'failed'
|
128 |
+
task_info['error'] = str(e)
|
129 |
+
logger.error(f"Error processing task {task_id}: {str(e)}")
|
130 |
+
|
131 |
+
async def get_task_status(self, task_id):
|
132 |
+
"""Get status of a task."""
|
133 |
+
task_info = self.tasks.get(task_id, {'status': 'not_found'})
|
134 |
+
return task_info
|
135 |
+
|
136 |
+
async def select_agent(self, task):
|
137 |
+
"""Select appropriate agent for task."""
|
138 |
+
return self.agents.get(task['type'])
|
139 |
+
|
140 |
+
async def initialize_agent(self, agent_id):
|
141 |
+
"""Initialize an agent."""
|
142 |
+
if agent_id not in self.agents.values():
|
143 |
+
raise ValueError(f"Agent {agent_id} not found")
|
144 |
+
self.active_agents[agent_id] = True
|
145 |
+
|
146 |
+
async def shutdown(self):
|
147 |
+
"""Shutdown the Central AI Hub."""
|
148 |
+
logger.info("Shutting down Central AI Hub...")
|
149 |
+
# Clean up active agents
|
150 |
+
self.active_agents.clear()
|
151 |
+
# Cancel any pending tasks
|
152 |
+
for task_id, task in self.tasks.items():
|
153 |
+
if task['status'] == 'active':
|
154 |
+
task['status'] = 'cancelled'
|
core/knowledge_management_layer.py
ADDED
@@ -0,0 +1,254 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Dict, List, Any
|
2 |
+
import networkx as nx
|
3 |
+
from rdflib import Graph, Literal, RDF, URIRef
|
4 |
+
from loguru import logger
|
5 |
+
from utils.llm_orchestrator import LLMOrchestrator
|
6 |
+
import json
|
7 |
+
from datetime import datetime
|
8 |
+
|
9 |
+
|
10 |
+
class KnowledgeManagementLayer:
|
11 |
+
def __init__(self, llm_api_key: str):
|
12 |
+
"""Initialize the Knowledge Management Layer."""
|
13 |
+
self.llm_orchestrator = LLMOrchestrator(llm_api_key)
|
14 |
+
self.knowledge_graph = nx.DiGraph()
|
15 |
+
self.rdf_graph = Graph()
|
16 |
+
self.setup_logger()
|
17 |
+
|
18 |
+
def setup_logger(self):
|
19 |
+
"""Configure logging for the knowledge management layer."""
|
20 |
+
logger.add("logs/knowledge_management.log", rotation="500 MB")
|
21 |
+
|
22 |
+
async def update_knowledge_graph(
|
23 |
+
self, new_info: Dict[str, Any]) -> Dict[str, str]:
|
24 |
+
"""Update the knowledge graph with new information."""
|
25 |
+
logger.info("Updating knowledge graph with new information")
|
26 |
+
entities_added = 0
|
27 |
+
relations_added = 0
|
28 |
+
try:
|
29 |
+
# Process new information using LLM
|
30 |
+
processed_info = await self.process_information(new_info)
|
31 |
+
|
32 |
+
# Add nodes and edges to the graph
|
33 |
+
for entity in processed_info['entities']:
|
34 |
+
self.knowledge_graph.add_node(
|
35 |
+
entity['id'],
|
36 |
+
**entity['attributes']
|
37 |
+
)
|
38 |
+
entities_added += 1
|
39 |
+
|
40 |
+
for relation in processed_info['relations']:
|
41 |
+
self.knowledge_graph.add_edge(
|
42 |
+
relation['source'],
|
43 |
+
relation['target'],
|
44 |
+
**relation['attributes']
|
45 |
+
)
|
46 |
+
relations_added += 1
|
47 |
+
|
48 |
+
# Update RDF graph
|
49 |
+
await self.update_rdf_graph(processed_info)
|
50 |
+
|
51 |
+
logger.info(
|
52 |
+
f"Successfully updated knowledge graph: Added {entities_added} entities and {relations_added} relations")
|
53 |
+
return {
|
54 |
+
'status': 'success',
|
55 |
+
'message': f"Added {entities_added} entities and {relations_added} relations"
|
56 |
+
}
|
57 |
+
except Exception as e:
|
58 |
+
logger.error(f"Error updating knowledge graph: {str(e)}")
|
59 |
+
logger.error(
|
60 |
+
f"Processed {entities_added} entities and {relations_added} relations before error")
|
61 |
+
return {
|
62 |
+
'status': 'error',
|
63 |
+
'message': str(e)
|
64 |
+
}
|
65 |
+
|
66 |
+
async def process_information(
|
67 |
+
self, info: Dict[str, Any]) -> Dict[str, Any]:
|
68 |
+
"""Process raw information using LLM to extract entities and relations."""
|
69 |
+
logger.info("Processing information to extract entities and relations")
|
70 |
+
try:
|
71 |
+
# Generate prompt for entity extraction
|
72 |
+
entity_prompt = f"""
|
73 |
+
Extract entities and their attributes from the following information:
|
74 |
+
{json.dumps(info, indent=2)}
|
75 |
+
|
76 |
+
Return the entities in the following format:
|
77 |
+
- Entity ID
|
78 |
+
- Entity Type
|
79 |
+
- Attributes (key-value pairs)
|
80 |
+
"""
|
81 |
+
|
82 |
+
entity_response = await self.llm_orchestrator.generate_completion(entity_prompt)
|
83 |
+
entities = self.parse_llm_response(entity_response, 'entities')
|
84 |
+
logger.info(f"Extracted {len(entities)} entities")
|
85 |
+
|
86 |
+
# Generate prompt for relation extraction
|
87 |
+
relation_prompt = f"""
|
88 |
+
Extract relations between entities from the following information:
|
89 |
+
{json.dumps(info, indent=2)}
|
90 |
+
|
91 |
+
Entities found:
|
92 |
+
{json.dumps(entities, indent=2)}
|
93 |
+
|
94 |
+
Return the relations in the following format:
|
95 |
+
- Source Entity ID
|
96 |
+
- Target Entity ID
|
97 |
+
- Relation Type
|
98 |
+
- Attributes (key-value pairs)
|
99 |
+
"""
|
100 |
+
|
101 |
+
relation_response = await self.llm_orchestrator.generate_completion(relation_prompt)
|
102 |
+
relations = self.parse_llm_response(relation_response, 'relations')
|
103 |
+
logger.info(f"Extracted {len(relations)} relations")
|
104 |
+
|
105 |
+
return {
|
106 |
+
'entities': entities,
|
107 |
+
'relations': relations
|
108 |
+
}
|
109 |
+
except Exception as e:
|
110 |
+
logger.error(f"Error processing information: {str(e)}")
|
111 |
+
raise
|
112 |
+
|
113 |
+
async def update_rdf_graph(self, processed_info: Dict[str, Any]):
|
114 |
+
"""Update the RDF graph with processed information."""
|
115 |
+
try:
|
116 |
+
for entity in processed_info['entities']:
|
117 |
+
subject = URIRef(f"entity:{entity['id']}")
|
118 |
+
self.rdf_graph.add(
|
119 |
+
(subject, RDF.type, URIRef(f"type:{entity['type']}")))
|
120 |
+
|
121 |
+
for key, value in entity['attributes'].items():
|
122 |
+
self.rdf_graph.add(
|
123 |
+
(subject, URIRef(f"attribute:{key}"), Literal(value)))
|
124 |
+
|
125 |
+
for relation in processed_info['relations']:
|
126 |
+
subject = URIRef(f"entity:{relation['source']}")
|
127 |
+
obj = URIRef(f"entity:{relation['target']}")
|
128 |
+
predicate = URIRef(f"relation:{relation['type']}")
|
129 |
+
self.rdf_graph.add((subject, predicate, obj))
|
130 |
+
|
131 |
+
for key, value in relation['attributes'].items():
|
132 |
+
self.rdf_graph.add(
|
133 |
+
(predicate, URIRef(f"attribute:{key}"), Literal(value)))
|
134 |
+
except Exception as e:
|
135 |
+
logger.error(f"Error updating RDF graph: {str(e)}")
|
136 |
+
raise
|
137 |
+
|
138 |
+
async def query_knowledge(self, query: Dict[str, Any]) -> Dict[str, Any]:
|
139 |
+
"""Query the knowledge graph based on specific criteria."""
|
140 |
+
try:
|
141 |
+
# Generate SPARQL query using LLM
|
142 |
+
sparql_prompt = f"""
|
143 |
+
Generate a SPARQL query for the following search criteria:
|
144 |
+
{json.dumps(query, indent=2)}
|
145 |
+
"""
|
146 |
+
|
147 |
+
sparql_query = await self.llm_orchestrator.generate_completion(sparql_prompt)
|
148 |
+
|
149 |
+
# Execute query on RDF graph
|
150 |
+
results = self.rdf_graph.query(sparql_query)
|
151 |
+
|
152 |
+
# Process and format results
|
153 |
+
formatted_results = await self.format_query_results(results)
|
154 |
+
|
155 |
+
return {
|
156 |
+
'status': 'success',
|
157 |
+
'results': formatted_results
|
158 |
+
}
|
159 |
+
except Exception as e:
|
160 |
+
logger.error(f"Error querying knowledge graph: {str(e)}")
|
161 |
+
return {
|
162 |
+
'status': 'error',
|
163 |
+
'message': str(e)
|
164 |
+
}
|
165 |
+
|
166 |
+
async def generate_insights(
|
167 |
+
self, context: Dict[str, Any]) -> List[Dict[str, Any]]:
|
168 |
+
"""Generate insights from the knowledge graph."""
|
169 |
+
try:
|
170 |
+
# Extract relevant subgraph based on context
|
171 |
+
subgraph = self.extract_relevant_subgraph(context)
|
172 |
+
|
173 |
+
# Generate insights using LLM
|
174 |
+
insight_prompt = f"""
|
175 |
+
Generate insights from the following knowledge graph data:
|
176 |
+
Nodes: {len(subgraph.nodes)}
|
177 |
+
Edges: {len(subgraph.edges)}
|
178 |
+
Context: {json.dumps(context, indent=2)}
|
179 |
+
|
180 |
+
Graph Summary:
|
181 |
+
{self.summarize_subgraph(subgraph)}
|
182 |
+
"""
|
183 |
+
|
184 |
+
insights = await self.llm_orchestrator.generate_completion(insight_prompt)
|
185 |
+
|
186 |
+
return self.parse_llm_response(insights, 'insights')
|
187 |
+
except Exception as e:
|
188 |
+
logger.error(f"Error generating insights: {str(e)}")
|
189 |
+
raise
|
190 |
+
|
191 |
+
def extract_relevant_subgraph(self, context: Dict[str, Any]) -> nx.DiGraph:
|
192 |
+
"""Extract a relevant subgraph based on context."""
|
193 |
+
# Implementation would include logic to extract relevant portions of the graph
|
194 |
+
# based on the provided context
|
195 |
+
return self.knowledge_graph
|
196 |
+
|
197 |
+
def summarize_subgraph(self, subgraph: nx.DiGraph) -> str:
|
198 |
+
"""Generate a summary of the subgraph."""
|
199 |
+
summary = {
|
200 |
+
'node_types': {},
|
201 |
+
'edge_types': {},
|
202 |
+
'key_entities': []
|
203 |
+
}
|
204 |
+
|
205 |
+
# Count node types
|
206 |
+
for node in subgraph.nodes(data=True):
|
207 |
+
node_type = node[1].get('type', 'unknown')
|
208 |
+
summary['node_types'][node_type] = summary['node_types'].get(
|
209 |
+
node_type, 0) + 1
|
210 |
+
|
211 |
+
# Count edge types
|
212 |
+
for edge in subgraph.edges(data=True):
|
213 |
+
edge_type = edge[2].get('type', 'unknown')
|
214 |
+
summary['edge_types'][edge_type] = summary['edge_types'].get(
|
215 |
+
edge_type, 0) + 1
|
216 |
+
|
217 |
+
# Identify key entities (e.g., nodes with highest degree)
|
218 |
+
for node in sorted(subgraph.degree, key=lambda x: x[1], reverse=True)[
|
219 |
+
:5]:
|
220 |
+
summary['key_entities'].append({
|
221 |
+
'id': node[0],
|
222 |
+
'degree': node[1]
|
223 |
+
})
|
224 |
+
|
225 |
+
return json.dumps(summary, indent=2)
|
226 |
+
|
227 |
+
@staticmethod
|
228 |
+
def parse_llm_response(
|
229 |
+
response: str, response_type: str) -> List[Dict[str, Any]]:
|
230 |
+
"""Parse LLM response into structured data."""
|
231 |
+
# Implementation would include logic to parse the LLM's response
|
232 |
+
# into a structured format based on the response_type
|
233 |
+
return [] # Placeholder return
|
234 |
+
|
235 |
+
async def backup_knowledge(self, backup_path: str):
|
236 |
+
"""Backup the knowledge graph to a file."""
|
237 |
+
try:
|
238 |
+
timestamp = datetime.utcnow().strftime('%Y%m%d_%H%M%S')
|
239 |
+
|
240 |
+
# Backup NetworkX graph
|
241 |
+
nx.write_gpickle(
|
242 |
+
self.knowledge_graph,
|
243 |
+
f"{backup_path}/knowledge_graph_{timestamp}.gpickle")
|
244 |
+
|
245 |
+
# Backup RDF graph
|
246 |
+
self.rdf_graph.serialize(
|
247 |
+
f"{backup_path}/rdf_graph_{timestamp}.ttl",
|
248 |
+
format="turtle")
|
249 |
+
|
250 |
+
logger.info(
|
251 |
+
f"Knowledge graph backed up successfully at {timestamp}")
|
252 |
+
except Exception as e:
|
253 |
+
logger.error(f"Error backing up knowledge graph: {str(e)}")
|
254 |
+
raise
|
logs/code_analysis_agent.log
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
2025-01-11 22:23:20.639 | INFO | agents.code_analysis_agent:analyze_repository:56 - Analyzing repository at .
|
2 |
+
2025-01-11 22:23:20.640 | INFO | agents.code_analysis_agent:analyze_file:90 - Analyzing file .\temp_analysis_script.py
|
3 |
+
2025-01-11 22:23:20.642 | INFO | agents.code_analysis_agent:analyze_file:90 - Analyzing file .\agents\central_ai_hub_test.py
|
4 |
+
2025-01-11 22:23:20.643 | INFO | agents.code_analysis_agent:analyze_file:90 - Analyzing file .\agents\code_analysis_agent.py
|
5 |
+
2025-01-11 22:23:20.647 | INFO | agents.code_analysis_agent:analyze_file:90 - Analyzing file .\agents\code_generation_agent.py
|
6 |
+
2025-01-11 22:23:20.647 | INFO | agents.code_analysis_agent:analyze_file:90 - Analyzing file .\agents\error_fixing_agent.py
|
7 |
+
2025-01-11 22:23:20.647 | INFO | agents.code_analysis_agent:analyze_file:90 - Analyzing file .\agents\planning_agent.py
|
8 |
+
2025-01-11 22:23:20.655 | INFO | agents.code_analysis_agent:analyze_file:90 - Analyzing file .\agents\q_learning_agent.py
|
9 |
+
2025-01-11 22:23:20.657 | INFO | agents.code_analysis_agent:analyze_file:90 - Analyzing file .\agents\reasoning_agent.py
|
10 |
+
2025-01-11 22:23:20.660 | INFO | agents.code_analysis_agent:analyze_file:90 - Analyzing file .\agents\testing_agent.py
|
11 |
+
2025-01-11 22:23:20.664 | INFO | agents.code_analysis_agent:analyze_file:90 - Analyzing file .\agents\test_runner.py
|
12 |
+
2025-01-11 22:23:20.666 | INFO | agents.code_analysis_agent:analyze_file:90 - Analyzing file .\agents\web_browsing_agent.py
|
13 |
+
2025-01-11 22:23:20.675 | INFO | agents.code_analysis_agent:analyze_file:90 - Analyzing file .\core\central_ai_hub.py
|
14 |
+
2025-01-11 22:23:20.681 | INFO | agents.code_analysis_agent:analyze_file:90 - Analyzing file .\core\knowledge_management_layer.py
|
15 |
+
2025-01-11 22:23:20.690 | INFO | agents.code_analysis_agent:analyze_repository:65 - Finished analyzing repository at .
|
16 |
+
2025-01-11 22:23:34.023 | INFO | agents.code_analysis_agent:analyze_repository:56 - Analyzing repository at .
|
17 |
+
2025-01-11 22:23:34.023 | INFO | agents.code_analysis_agent:analyze_file:90 - Analyzing file .\temp_analysis_script.py
|
18 |
+
2025-01-11 22:23:34.023 | INFO | agents.code_analysis_agent:analyze_file:90 - Analyzing file .\agents\central_ai_hub_test.py
|
19 |
+
2025-01-11 22:23:34.023 | INFO | agents.code_analysis_agent:analyze_file:90 - Analyzing file .\agents\code_analysis_agent.py
|
20 |
+
2025-01-11 22:23:34.039 | INFO | agents.code_analysis_agent:analyze_file:90 - Analyzing file .\agents\code_generation_agent.py
|
21 |
+
2025-01-11 22:23:34.040 | INFO | agents.code_analysis_agent:analyze_file:90 - Analyzing file .\agents\error_fixing_agent.py
|
22 |
+
2025-01-11 22:23:34.043 | INFO | agents.code_analysis_agent:analyze_file:90 - Analyzing file .\agents\planning_agent.py
|
23 |
+
2025-01-11 22:23:34.044 | INFO | agents.code_analysis_agent:analyze_file:90 - Analyzing file .\agents\q_learning_agent.py
|
24 |
+
2025-01-11 22:23:34.049 | INFO | agents.code_analysis_agent:analyze_file:90 - Analyzing file .\agents\reasoning_agent.py
|
25 |
+
2025-01-11 22:23:34.052 | INFO | agents.code_analysis_agent:analyze_file:90 - Analyzing file .\agents\testing_agent.py
|
26 |
+
2025-01-11 22:23:34.054 | INFO | agents.code_analysis_agent:analyze_file:90 - Analyzing file .\agents\test_runner.py
|
27 |
+
2025-01-11 22:23:34.056 | INFO | agents.code_analysis_agent:analyze_file:90 - Analyzing file .\agents\web_browsing_agent.py
|
28 |
+
2025-01-11 22:23:34.062 | INFO | agents.code_analysis_agent:analyze_file:90 - Analyzing file .\core\central_ai_hub.py
|
29 |
+
2025-01-11 22:23:34.066 | INFO | agents.code_analysis_agent:analyze_file:90 - Analyzing file .\core\knowledge_management_layer.py
|
30 |
+
2025-01-11 22:23:34.068 | INFO | agents.code_analysis_agent:analyze_repository:65 - Finished analyzing repository at .
|
requirements.txt
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
huggingface_hub
|
2 |
+
transformers
|
3 |
+
torch
|
4 |
+
ctransformers
|
temp_analysis_script.py
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from agents.code_analysis_agent import CodeAnalysisAgent
|
2 |
+
import asyncio
|
3 |
+
import json
|
4 |
+
|
5 |
+
async def main():
|
6 |
+
analyzer = CodeAnalysisAgent()
|
7 |
+
report = await analyzer.analyze_repository('.')
|
8 |
+
print(json.dumps(report, indent=4))
|
9 |
+
|
10 |
+
if __name__ == "__main__":
|
11 |
+
asyncio.run(main())
|