Spaces:
Runtime error
Runtime error
Cascade Bot
commited on
Commit
·
1d75522
0
Parent(s):
Added Groq streaming support and optimizations - clean version
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .github/workflows/sync-to-space.yml +17 -0
- .gitignore +91 -0
- Dockerfile +52 -0
- README.md +113 -0
- agentic_system.py +558 -0
- api/__init__.py +6 -0
- api/openai_compatible.py +139 -0
- api/venture_api.py +194 -0
- app.py +633 -0
- app.yaml +9 -0
- app_space.sh +34 -0
- check_space_status.py +55 -0
- check_versions.py +50 -0
- cleanup.sh +45 -0
- config.py +452 -0
- download_models.py +76 -0
- download_models_space.py +32 -0
- fix_indentation.patch +42 -0
- init_space.sh +28 -0
- main.py +133 -0
- meta_learning.py +436 -0
- multimodal_reasoning.py +301 -0
- orchestrator.py +628 -0
- reasoning.py +0 -0
- reasoning/__init__.py +70 -0
- reasoning/agentic.py +345 -0
- reasoning/analogical.py +611 -0
- reasoning/base.py +17 -0
- reasoning/bayesian.py +325 -0
- reasoning/chain_of_thought.py +415 -0
- reasoning/coordination.py +525 -0
- reasoning/emergent.py +133 -0
- reasoning/groq_strategy.py +332 -0
- reasoning/learning.py +394 -0
- reasoning/local_llm.py +117 -0
- reasoning/market_analysis.py +450 -0
- reasoning/meta_learning.py +339 -0
- reasoning/model_manager.py +145 -0
- reasoning/monetization.py +447 -0
- reasoning/multimodal.py +305 -0
- reasoning/neurosymbolic.py +316 -0
- reasoning/portfolio_optimization.py +549 -0
- reasoning/quantum.py +372 -0
- reasoning/recursive.py +576 -0
- reasoning/specialized.py +476 -0
- reasoning/tree_of_thoughts.py +516 -0
- reasoning/unified_engine.py +707 -0
- reasoning/venture_strategies.py +701 -0
- reasoning/venture_types.py +332 -0
- requirements.txt +35 -0
.github/workflows/sync-to-space.yml
ADDED
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: Sync to Hugging Face Space
|
2 |
+
on:
|
3 |
+
push:
|
4 |
+
branches: [main]
|
5 |
+
|
6 |
+
jobs:
|
7 |
+
sync-to-space:
|
8 |
+
runs-on: ubuntu-latest
|
9 |
+
steps:
|
10 |
+
- uses: actions/checkout@v2
|
11 |
+
with:
|
12 |
+
fetch-depth: 0
|
13 |
+
- name: Push to Space
|
14 |
+
env:
|
15 |
+
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
16 |
+
run: |
|
17 |
+
git push https://USER:[email protected]/spaces/USER/SPACE_NAME main
|
.gitignore
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Python
|
2 |
+
__pycache__/
|
3 |
+
*.py[cod]
|
4 |
+
*$py.class
|
5 |
+
*.so
|
6 |
+
.Python
|
7 |
+
build/
|
8 |
+
develop-eggs/
|
9 |
+
dist/
|
10 |
+
downloads/
|
11 |
+
eggs/
|
12 |
+
.eggs/
|
13 |
+
lib/
|
14 |
+
lib64/
|
15 |
+
parts/
|
16 |
+
sdist/
|
17 |
+
var/
|
18 |
+
wheels/
|
19 |
+
*.egg-info/
|
20 |
+
.installed.cfg
|
21 |
+
*.egg
|
22 |
+
|
23 |
+
# Virtual Environment
|
24 |
+
venv/
|
25 |
+
env/
|
26 |
+
ENV/
|
27 |
+
|
28 |
+
# IDE
|
29 |
+
.idea/
|
30 |
+
.vscode/
|
31 |
+
*.swp
|
32 |
+
*.swo
|
33 |
+
|
34 |
+
# Logs
|
35 |
+
*.log
|
36 |
+
logs/
|
37 |
+
log/
|
38 |
+
|
39 |
+
# Local development
|
40 |
+
.env
|
41 |
+
.env.local
|
42 |
+
.env.*.local
|
43 |
+
|
44 |
+
# Data
|
45 |
+
data/
|
46 |
+
*.db
|
47 |
+
*.sqlite3
|
48 |
+
|
49 |
+
# Model files
|
50 |
+
*.pt
|
51 |
+
*.pth
|
52 |
+
*.ckpt
|
53 |
+
*.bin
|
54 |
+
*.onnx
|
55 |
+
|
56 |
+
# Temporary files
|
57 |
+
.DS_Store
|
58 |
+
Thumbs.db
|
59 |
+
*.tmp
|
60 |
+
*.bak
|
61 |
+
*.swp
|
62 |
+
*~
|
63 |
+
|
64 |
+
# Distribution
|
65 |
+
dist/
|
66 |
+
build/
|
67 |
+
*.egg-info/
|
68 |
+
|
69 |
+
# Documentation
|
70 |
+
docs/_build/
|
71 |
+
site/
|
72 |
+
|
73 |
+
# Testing
|
74 |
+
.coverage
|
75 |
+
htmlcov/
|
76 |
+
.pytest_cache/
|
77 |
+
.tox/
|
78 |
+
nosetests.xml
|
79 |
+
coverage.xml
|
80 |
+
*.cover
|
81 |
+
.hypothesis/
|
82 |
+
|
83 |
+
# Jupyter Notebook
|
84 |
+
.ipynb_checkpoints
|
85 |
+
*.ipynb
|
86 |
+
|
87 |
+
# Project specific
|
88 |
+
outputs/
|
89 |
+
results/
|
90 |
+
experiments/
|
91 |
+
checkpoints/
|
Dockerfile
ADDED
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
FROM python:3.10-slim
|
2 |
+
|
3 |
+
# Set environment variables
|
4 |
+
ENV PYTHONUNBUFFERED=1 \
|
5 |
+
DEBIAN_FRONTEND=noninteractive \
|
6 |
+
REQUESTS_TIMEOUT=30 \
|
7 |
+
PYTHONPATH=/app
|
8 |
+
|
9 |
+
# Install system dependencies
|
10 |
+
RUN apt-get update && apt-get install -y \
|
11 |
+
build-essential \
|
12 |
+
curl \
|
13 |
+
git \
|
14 |
+
dnsutils \
|
15 |
+
iputils-ping \
|
16 |
+
&& rm -rf /var/lib/apt/lists/*
|
17 |
+
|
18 |
+
# Set working directory
|
19 |
+
WORKDIR /app
|
20 |
+
|
21 |
+
# Set up DNS configuration with multiple DNS servers for redundancy
|
22 |
+
RUN echo "nameserver 8.8.8.8" > /etc/resolv.conf && \
|
23 |
+
echo "nameserver 8.8.4.4" >> /etc/resolv.conf && \
|
24 |
+
echo "nameserver 1.1.1.1" >> /etc/resolv.conf && \
|
25 |
+
echo "options timeout:1 attempts:5" >> /etc/resolv.conf
|
26 |
+
|
27 |
+
# Copy requirements first to leverage Docker cache
|
28 |
+
COPY requirements.txt .
|
29 |
+
|
30 |
+
# Install Python dependencies with retry mechanism and explicit Gradio upgrade
|
31 |
+
RUN pip install --no-cache-dir -r requirements.txt || \
|
32 |
+
(sleep 5 && pip install --no-cache-dir -r requirements.txt) || \
|
33 |
+
(sleep 10 && pip install --no-cache-dir -r requirements.txt) && \
|
34 |
+
pip install --no-cache-dir gradio==4.44.1
|
35 |
+
|
36 |
+
# Copy application code
|
37 |
+
COPY . .
|
38 |
+
|
39 |
+
# Expose port
|
40 |
+
EXPOSE 7860
|
41 |
+
|
42 |
+
# Add network verification script
|
43 |
+
RUN echo '#!/bin/sh\n\
|
44 |
+
ping -c 1 huggingface.co || ping -c 1 8.8.8.8\n\
|
45 |
+
' > /healthcheck.sh && chmod +x /healthcheck.sh
|
46 |
+
|
47 |
+
# Healthcheck with more lenient settings
|
48 |
+
HEALTHCHECK --interval=30s --timeout=10s --start-period=30s --retries=5 \
|
49 |
+
CMD /healthcheck.sh || exit 1
|
50 |
+
|
51 |
+
# Command to run the application
|
52 |
+
CMD ["python", "app.py"]
|
README.md
ADDED
@@ -0,0 +1,113 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: Advanced Reasoning System 🤖
|
3 |
+
emoji: 🤖
|
4 |
+
colorFrom: blue
|
5 |
+
colorTo: purple
|
6 |
+
sdk: gradio
|
7 |
+
sdk_version: 4.16.0
|
8 |
+
app_file: app.py
|
9 |
+
pinned: false
|
10 |
+
license: mit
|
11 |
+
---
|
12 |
+
|
13 |
+
# Advanced Reasoning System 🤖
|
14 |
+
|
15 |
+
A sophisticated multi-agent reasoning system that combines multiple strategies for advanced problem-solving.
|
16 |
+
|
17 |
+
## Features
|
18 |
+
|
19 |
+
- 🧠 Multiple Reasoning Strategies:
|
20 |
+
- Chain of Thought
|
21 |
+
- Tree of Thoughts
|
22 |
+
- Meta Learning
|
23 |
+
- Local LLM Integration
|
24 |
+
- Recursive Reasoning
|
25 |
+
- Analogical Reasoning
|
26 |
+
|
27 |
+
- 🤝 Multi-Agent System:
|
28 |
+
- Dynamic Team Formation
|
29 |
+
- Cross-Team Collaboration
|
30 |
+
- Resource Management
|
31 |
+
- Task Orchestration
|
32 |
+
|
33 |
+
- 🔄 Adaptive Learning:
|
34 |
+
- Performance Tracking
|
35 |
+
- Strategy Weight Adjustment
|
36 |
+
- Pattern Recognition
|
37 |
+
- Meta-Learning Integration
|
38 |
+
|
39 |
+
## Quick Start
|
40 |
+
|
41 |
+
1. **Environment Setup**:
|
42 |
+
```bash
|
43 |
+
python -m venv venv
|
44 |
+
source venv/bin/activate # On Windows: .\venv\Scripts\activate
|
45 |
+
pip install -r requirements.txt
|
46 |
+
```
|
47 |
+
|
48 |
+
2. **Configuration**:
|
49 |
+
```bash
|
50 |
+
cp .env.example .env
|
51 |
+
# Edit .env with your settings
|
52 |
+
```
|
53 |
+
|
54 |
+
3. **Run the Application**:
|
55 |
+
```bash
|
56 |
+
python app.py
|
57 |
+
```
|
58 |
+
|
59 |
+
## Docker Support
|
60 |
+
|
61 |
+
Build and run with Docker:
|
62 |
+
|
63 |
+
```bash
|
64 |
+
docker build -t advanced-reasoning .
|
65 |
+
docker run -p 7860:7860 advanced-reasoning
|
66 |
+
```
|
67 |
+
|
68 |
+
## API Endpoints
|
69 |
+
|
70 |
+
- `/`: Main interface
|
71 |
+
- `/health`: Health check endpoint
|
72 |
+
- `/api/process_query`: Process queries via API
|
73 |
+
|
74 |
+
## Components
|
75 |
+
|
76 |
+
1. **Reasoning Engine**:
|
77 |
+
- Unified reasoning combining multiple strategies
|
78 |
+
- Dynamic strategy selection
|
79 |
+
- Result synthesis
|
80 |
+
|
81 |
+
2. **Team Management**:
|
82 |
+
- Specialized teams (Coders, Business, Research, Trading)
|
83 |
+
- Cross-team collaboration
|
84 |
+
- Resource sharing
|
85 |
+
|
86 |
+
3. **Orchestration**:
|
87 |
+
- Task planning and decomposition
|
88 |
+
- Resource allocation
|
89 |
+
- Performance monitoring
|
90 |
+
|
91 |
+
## Contributing
|
92 |
+
|
93 |
+
1. Fork the repository
|
94 |
+
2. Create your feature branch
|
95 |
+
3. Commit your changes
|
96 |
+
4. Push to the branch
|
97 |
+
5. Create a Pull Request
|
98 |
+
|
99 |
+
## License
|
100 |
+
|
101 |
+
MIT License - see LICENSE file for details
|
102 |
+
|
103 |
+
## Files
|
104 |
+
- `app.py`: Main application with Gradio interface and API integration
|
105 |
+
- `requirements.txt`: Project dependencies
|
106 |
+
- `.env.example`: Example environment variables (for reference)
|
107 |
+
|
108 |
+
## Dependencies
|
109 |
+
- gradio==4.16.0
|
110 |
+
- requests==2.31.0
|
111 |
+
|
112 |
+
---
|
113 |
+
Created with ❤️ using Gradio and Hugging Face
|
agentic_system.py
ADDED
@@ -0,0 +1,558 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Advanced Agentic System
|
3 |
+
----------------------
|
4 |
+
A sophisticated multi-agent system with:
|
5 |
+
|
6 |
+
Core Components:
|
7 |
+
1. Agent Management
|
8 |
+
2. Task Execution
|
9 |
+
3. Learning & Adaptation
|
10 |
+
4. Communication
|
11 |
+
5. Resource Management
|
12 |
+
|
13 |
+
Advanced Features:
|
14 |
+
1. Self-Improvement
|
15 |
+
2. Multi-Agent Coordination
|
16 |
+
3. Dynamic Role Assignment
|
17 |
+
4. Emergent Behavior
|
18 |
+
"""
|
19 |
+
|
20 |
+
import logging
|
21 |
+
from typing import Dict, Any, List, Optional, Union, TypeVar
|
22 |
+
from dataclasses import dataclass, field
|
23 |
+
from enum import Enum
|
24 |
+
import json
|
25 |
+
import asyncio
|
26 |
+
from datetime import datetime
|
27 |
+
import uuid
|
28 |
+
from concurrent.futures import ThreadPoolExecutor
|
29 |
+
import numpy as np
|
30 |
+
from collections import defaultdict
|
31 |
+
|
32 |
+
from orchestrator import (
|
33 |
+
AgentOrchestrator,
|
34 |
+
AgentRole,
|
35 |
+
AgentState,
|
36 |
+
TaskPriority,
|
37 |
+
Task
|
38 |
+
)
|
39 |
+
from reasoning import UnifiedReasoningEngine as ReasoningEngine, StrategyType as ReasoningMode
|
40 |
+
from reasoning.meta_learning import MetaLearningStrategy
|
41 |
+
|
42 |
+
class AgentCapability(Enum):
|
43 |
+
"""Core capabilities of agents."""
|
44 |
+
REASONING = "reasoning"
|
45 |
+
LEARNING = "learning"
|
46 |
+
EXECUTION = "execution"
|
47 |
+
COORDINATION = "coordination"
|
48 |
+
MONITORING = "monitoring"
|
49 |
+
|
50 |
+
class AgentPersonality(Enum):
|
51 |
+
"""Different personality types for agents."""
|
52 |
+
ANALYTICAL = "analytical"
|
53 |
+
CREATIVE = "creative"
|
54 |
+
CAUTIOUS = "cautious"
|
55 |
+
PROACTIVE = "proactive"
|
56 |
+
ADAPTIVE = "adaptive"
|
57 |
+
|
58 |
+
@dataclass
|
59 |
+
class AgentProfile:
|
60 |
+
"""Profile defining an agent's characteristics."""
|
61 |
+
id: str
|
62 |
+
name: str
|
63 |
+
role: AgentRole
|
64 |
+
capabilities: List[AgentCapability]
|
65 |
+
personality: AgentPersonality
|
66 |
+
expertise_areas: List[str]
|
67 |
+
learning_rate: float
|
68 |
+
risk_tolerance: float
|
69 |
+
created_at: datetime
|
70 |
+
metadata: Dict[str, Any]
|
71 |
+
|
72 |
+
class Agent:
|
73 |
+
"""Advanced autonomous agent with learning capabilities."""
|
74 |
+
|
75 |
+
def __init__(
|
76 |
+
self,
|
77 |
+
profile: AgentProfile,
|
78 |
+
reasoning_engine: ReasoningEngine,
|
79 |
+
meta_learning: MetaLearningStrategy,
|
80 |
+
config: Dict[str, Any] = None
|
81 |
+
):
|
82 |
+
self.profile = profile
|
83 |
+
self.reasoning_engine = reasoning_engine
|
84 |
+
self.meta_learning = meta_learning
|
85 |
+
self.config = config or {}
|
86 |
+
|
87 |
+
# State management
|
88 |
+
self.state = AgentState.IDLE
|
89 |
+
self.current_task: Optional[Task] = None
|
90 |
+
self.task_history: List[Task] = []
|
91 |
+
|
92 |
+
# Learning and adaptation
|
93 |
+
self.knowledge_base: Dict[str, Any] = {}
|
94 |
+
self.learned_patterns: List[Dict[str, Any]] = []
|
95 |
+
self.adaptation_history: List[Dict[str, Any]] = []
|
96 |
+
|
97 |
+
# Performance metrics
|
98 |
+
self.metrics: Dict[str, List[float]] = defaultdict(list)
|
99 |
+
self.performance_history: List[Dict[str, float]] = []
|
100 |
+
|
101 |
+
# Communication
|
102 |
+
self.message_queue = asyncio.Queue()
|
103 |
+
self.response_queue = asyncio.Queue()
|
104 |
+
|
105 |
+
# Resource management
|
106 |
+
self.resource_usage: Dict[str, float] = {}
|
107 |
+
self.resource_limits: Dict[str, float] = {}
|
108 |
+
|
109 |
+
# Async support
|
110 |
+
self.executor = ThreadPoolExecutor(max_workers=2)
|
111 |
+
self.lock = asyncio.Lock()
|
112 |
+
|
113 |
+
# Logging
|
114 |
+
self.logger = logging.getLogger(f"Agent-{profile.id}")
|
115 |
+
|
116 |
+
# Initialize components
|
117 |
+
self._init_components()
|
118 |
+
|
119 |
+
def _init_components(self):
|
120 |
+
"""Initialize agent components."""
|
121 |
+
# Set up knowledge base
|
122 |
+
self.knowledge_base = {
|
123 |
+
"expertise": {area: 0.5 for area in self.profile.expertise_areas},
|
124 |
+
"learned_skills": set(),
|
125 |
+
"interaction_patterns": defaultdict(int),
|
126 |
+
"success_patterns": defaultdict(float)
|
127 |
+
}
|
128 |
+
|
129 |
+
# Set up resource limits
|
130 |
+
self.resource_limits = {
|
131 |
+
"cpu": 1.0,
|
132 |
+
"memory": 1000,
|
133 |
+
"api_calls": 100,
|
134 |
+
"learning_capacity": 0.8
|
135 |
+
}
|
136 |
+
|
137 |
+
async def process_task(self, task: Task) -> Dict[str, Any]:
|
138 |
+
"""Process an assigned task."""
|
139 |
+
try:
|
140 |
+
self.current_task = task
|
141 |
+
self.state = AgentState.BUSY
|
142 |
+
|
143 |
+
# Analyze task
|
144 |
+
analysis = await self._analyze_task(task)
|
145 |
+
|
146 |
+
# Plan execution
|
147 |
+
plan = await self._plan_execution(analysis)
|
148 |
+
|
149 |
+
# Execute plan
|
150 |
+
result = await self._execute_plan(plan)
|
151 |
+
|
152 |
+
# Learn from execution
|
153 |
+
await self._learn_from_execution(task, result)
|
154 |
+
|
155 |
+
# Update metrics
|
156 |
+
self._update_metrics(task, result)
|
157 |
+
|
158 |
+
return {
|
159 |
+
"success": True,
|
160 |
+
"task_id": task.id,
|
161 |
+
"result": result,
|
162 |
+
"metrics": self._get_execution_metrics()
|
163 |
+
}
|
164 |
+
|
165 |
+
except Exception as e:
|
166 |
+
self.logger.error(f"Error processing task: {e}")
|
167 |
+
self.state = AgentState.ERROR
|
168 |
+
return {
|
169 |
+
"success": False,
|
170 |
+
"task_id": task.id,
|
171 |
+
"error": str(e)
|
172 |
+
}
|
173 |
+
finally:
|
174 |
+
self.state = AgentState.IDLE
|
175 |
+
self.current_task = None
|
176 |
+
|
177 |
+
async def _analyze_task(self, task: Task) -> Dict[str, Any]:
|
178 |
+
"""Analyze task requirements and constraints."""
|
179 |
+
# Use reasoning engine for analysis
|
180 |
+
analysis = await self.reasoning_engine.reason(
|
181 |
+
query=task.description,
|
182 |
+
context={
|
183 |
+
"agent_profile": self.profile.__dict__,
|
184 |
+
"task_history": self.task_history,
|
185 |
+
"knowledge_base": self.knowledge_base
|
186 |
+
},
|
187 |
+
mode=ReasoningMode.ANALYTICAL
|
188 |
+
)
|
189 |
+
|
190 |
+
return {
|
191 |
+
"requirements": analysis.get("requirements", []),
|
192 |
+
"constraints": analysis.get("constraints", []),
|
193 |
+
"complexity": analysis.get("complexity", 0.5),
|
194 |
+
"estimated_duration": analysis.get("estimated_duration", 3600),
|
195 |
+
"required_capabilities": analysis.get("required_capabilities", [])
|
196 |
+
}
|
197 |
+
|
198 |
+
async def _plan_execution(self, analysis: Dict[str, Any]) -> List[Dict[str, Any]]:
|
199 |
+
"""Plan task execution based on analysis."""
|
200 |
+
# Use reasoning engine for planning
|
201 |
+
plan = await self.reasoning_engine.reason(
|
202 |
+
query="Plan execution steps",
|
203 |
+
context={
|
204 |
+
"analysis": analysis,
|
205 |
+
"agent_capabilities": self.profile.capabilities,
|
206 |
+
"resource_limits": self.resource_limits
|
207 |
+
},
|
208 |
+
mode=ReasoningMode.FOCUSED
|
209 |
+
)
|
210 |
+
|
211 |
+
return plan.get("steps", [])
|
212 |
+
|
213 |
+
async def _execute_plan(self, plan: List[Dict[str, Any]]) -> Dict[str, Any]:
|
214 |
+
"""Execute the planned steps."""
|
215 |
+
results = []
|
216 |
+
|
217 |
+
for step in plan:
|
218 |
+
try:
|
219 |
+
# Check resources
|
220 |
+
if not self._check_resources(step):
|
221 |
+
raise RuntimeError("Insufficient resources for step execution")
|
222 |
+
|
223 |
+
# Execute step
|
224 |
+
step_result = await self._execute_step(step)
|
225 |
+
results.append(step_result)
|
226 |
+
|
227 |
+
# Update resource usage
|
228 |
+
self._update_resource_usage(step)
|
229 |
+
|
230 |
+
# Learn from step execution
|
231 |
+
await self._learn_from_step(step, step_result)
|
232 |
+
|
233 |
+
except Exception as e:
|
234 |
+
self.logger.error(f"Error executing step: {e}")
|
235 |
+
results.append({"error": str(e)})
|
236 |
+
|
237 |
+
return {
|
238 |
+
"success": all(r.get("success", False) for r in results),
|
239 |
+
"results": results
|
240 |
+
}
|
241 |
+
|
242 |
+
async def _execute_step(self, step: Dict[str, Any]) -> Dict[str, Any]:
|
243 |
+
"""Execute a single step of the plan."""
|
244 |
+
step_type = step.get("type", "unknown")
|
245 |
+
|
246 |
+
if step_type == "reasoning":
|
247 |
+
return await self._execute_reasoning_step(step)
|
248 |
+
elif step_type == "learning":
|
249 |
+
return await self._execute_learning_step(step)
|
250 |
+
elif step_type == "action":
|
251 |
+
return await self._execute_action_step(step)
|
252 |
+
else:
|
253 |
+
raise ValueError(f"Unknown step type: {step_type}")
|
254 |
+
|
255 |
+
async def _execute_reasoning_step(self, step: Dict[str, Any]) -> Dict[str, Any]:
|
256 |
+
"""Execute a reasoning step."""
|
257 |
+
result = await self.reasoning_engine.reason(
|
258 |
+
query=step["query"],
|
259 |
+
context=step.get("context", {}),
|
260 |
+
mode=ReasoningMode.ANALYTICAL
|
261 |
+
)
|
262 |
+
|
263 |
+
return {
|
264 |
+
"success": result.get("success", False),
|
265 |
+
"reasoning_result": result
|
266 |
+
}
|
267 |
+
|
268 |
+
async def _execute_learning_step(self, step: Dict[str, Any]) -> Dict[str, Any]:
|
269 |
+
"""Execute a learning step."""
|
270 |
+
result = await self.meta_learning.learn(
|
271 |
+
data=step["data"],
|
272 |
+
context=step.get("context", {})
|
273 |
+
)
|
274 |
+
|
275 |
+
return {
|
276 |
+
"success": result.get("success", False),
|
277 |
+
"learning_result": result
|
278 |
+
}
|
279 |
+
|
280 |
+
async def _execute_action_step(self, step: Dict[str, Any]) -> Dict[str, Any]:
|
281 |
+
"""Execute an action step."""
|
282 |
+
action_type = step.get("action_type")
|
283 |
+
|
284 |
+
if action_type == "api_call":
|
285 |
+
return await self._make_api_call(step)
|
286 |
+
elif action_type == "data_processing":
|
287 |
+
return await self._process_data(step)
|
288 |
+
elif action_type == "coordination":
|
289 |
+
return await self._coordinate_action(step)
|
290 |
+
else:
|
291 |
+
raise ValueError(f"Unknown action type: {action_type}")
|
292 |
+
|
293 |
+
def _check_resources(self, step: Dict[str, Any]) -> bool:
|
294 |
+
"""Check if sufficient resources are available."""
|
295 |
+
required_resources = step.get("required_resources", {})
|
296 |
+
|
297 |
+
for resource, amount in required_resources.items():
|
298 |
+
if self.resource_usage.get(resource, 0) + amount > self.resource_limits.get(resource, float('inf')):
|
299 |
+
return False
|
300 |
+
|
301 |
+
return True
|
302 |
+
|
303 |
+
def _update_resource_usage(self, step: Dict[str, Any]):
|
304 |
+
"""Update resource usage after step execution."""
|
305 |
+
used_resources = step.get("used_resources", {})
|
306 |
+
|
307 |
+
for resource, amount in used_resources.items():
|
308 |
+
self.resource_usage[resource] = self.resource_usage.get(resource, 0) + amount
|
309 |
+
|
310 |
+
async def _learn_from_execution(self, task: Task, result: Dict[str, Any]):
|
311 |
+
"""Learn from task execution experience."""
|
312 |
+
# Prepare learning data
|
313 |
+
learning_data = {
|
314 |
+
"task": task.__dict__,
|
315 |
+
"result": result,
|
316 |
+
"context": {
|
317 |
+
"agent_state": self.state,
|
318 |
+
"resource_usage": self.resource_usage,
|
319 |
+
"performance_metrics": self._get_execution_metrics()
|
320 |
+
}
|
321 |
+
}
|
322 |
+
|
323 |
+
# Learn patterns
|
324 |
+
patterns = await self.meta_learning.learn(
|
325 |
+
data=learning_data,
|
326 |
+
context=self.knowledge_base
|
327 |
+
)
|
328 |
+
|
329 |
+
# Update knowledge base
|
330 |
+
self._update_knowledge_base(patterns)
|
331 |
+
|
332 |
+
# Record adaptation
|
333 |
+
self.adaptation_history.append({
|
334 |
+
"timestamp": datetime.now(),
|
335 |
+
"patterns": patterns,
|
336 |
+
"metrics": self._get_execution_metrics()
|
337 |
+
})
|
338 |
+
|
339 |
+
async def _learn_from_step(self, step: Dict[str, Any], result: Dict[str, Any]):
|
340 |
+
"""Learn from individual step execution."""
|
341 |
+
if result.get("success", False):
|
342 |
+
# Update success patterns
|
343 |
+
pattern_key = f"{step['type']}:{step.get('action_type', 'none')}"
|
344 |
+
self.knowledge_base["success_patterns"][pattern_key] += 1
|
345 |
+
|
346 |
+
# Learn from successful execution
|
347 |
+
await self.meta_learning.learn(
|
348 |
+
data={
|
349 |
+
"step": step,
|
350 |
+
"result": result
|
351 |
+
},
|
352 |
+
context={"pattern_key": pattern_key}
|
353 |
+
)
|
354 |
+
|
355 |
+
def _update_knowledge_base(self, patterns: Dict[str, Any]):
|
356 |
+
"""Update knowledge base with new patterns."""
|
357 |
+
# Update expertise levels
|
358 |
+
for area, pattern in patterns.get("expertise_patterns", {}).items():
|
359 |
+
if area in self.knowledge_base["expertise"]:
|
360 |
+
current = self.knowledge_base["expertise"][area]
|
361 |
+
self.knowledge_base["expertise"][area] = current * 0.9 + pattern * 0.1
|
362 |
+
|
363 |
+
# Add new learned skills
|
364 |
+
new_skills = patterns.get("learned_skills", set())
|
365 |
+
self.knowledge_base["learned_skills"].update(new_skills)
|
366 |
+
|
367 |
+
# Update interaction patterns
|
368 |
+
for pattern, count in patterns.get("interaction_patterns", {}).items():
|
369 |
+
self.knowledge_base["interaction_patterns"][pattern] += count
|
370 |
+
|
371 |
+
def _update_metrics(self, task: Task, result: Dict[str, Any]):
|
372 |
+
"""Update performance metrics."""
|
373 |
+
metrics = {
|
374 |
+
"success": float(result.get("success", False)),
|
375 |
+
"duration": (datetime.now() - task.created_at).total_seconds(),
|
376 |
+
"resource_efficiency": self._calculate_resource_efficiency(),
|
377 |
+
"learning_progress": self._calculate_learning_progress()
|
378 |
+
}
|
379 |
+
|
380 |
+
for key, value in metrics.items():
|
381 |
+
self.metrics[key].append(value)
|
382 |
+
|
383 |
+
self.performance_history.append({
|
384 |
+
"timestamp": datetime.now(),
|
385 |
+
"metrics": metrics
|
386 |
+
})
|
387 |
+
|
388 |
+
def _calculate_resource_efficiency(self) -> float:
|
389 |
+
"""Calculate resource usage efficiency."""
|
390 |
+
if not self.resource_limits:
|
391 |
+
return 1.0
|
392 |
+
|
393 |
+
efficiencies = []
|
394 |
+
for resource, usage in self.resource_usage.items():
|
395 |
+
limit = self.resource_limits.get(resource, float('inf'))
|
396 |
+
if limit > 0:
|
397 |
+
efficiencies.append(1 - (usage / limit))
|
398 |
+
|
399 |
+
return sum(efficiencies) / len(efficiencies) if efficiencies else 1.0
|
400 |
+
|
401 |
+
def _calculate_learning_progress(self) -> float:
|
402 |
+
"""Calculate learning progress."""
|
403 |
+
if not self.knowledge_base["expertise"]:
|
404 |
+
return 0.0
|
405 |
+
|
406 |
+
return sum(self.knowledge_base["expertise"].values()) / len(self.knowledge_base["expertise"])
|
407 |
+
|
408 |
+
def _get_execution_metrics(self) -> Dict[str, float]:
|
409 |
+
"""Get current execution metrics."""
|
410 |
+
return {
|
411 |
+
key: sum(values[-10:]) / len(values[-10:])
|
412 |
+
for key, values in self.metrics.items()
|
413 |
+
if values
|
414 |
+
}
|
415 |
+
|
416 |
+
class AgenticSystem:
|
417 |
+
"""Advanced multi-agent system with orchestration."""
|
418 |
+
|
419 |
+
def __init__(self, config: Dict[str, Any] = None):
|
420 |
+
self.config = config or {}
|
421 |
+
|
422 |
+
# Initialize orchestrator
|
423 |
+
self.orchestrator = AgentOrchestrator(config)
|
424 |
+
|
425 |
+
# Initialize components
|
426 |
+
self.agents: Dict[str, Agent] = {}
|
427 |
+
self.reasoning_engine = ReasoningEngine(
|
428 |
+
min_confidence=self.config.get('min_confidence', 0.7),
|
429 |
+
parallel_threshold=self.config.get('parallel_threshold', 3),
|
430 |
+
learning_rate=self.config.get('learning_rate', 0.1),
|
431 |
+
strategy_weights=self.config.get('strategy_weights', {
|
432 |
+
"LOCAL_LLM": 0.8,
|
433 |
+
"CHAIN_OF_THOUGHT": 0.6,
|
434 |
+
"TREE_OF_THOUGHTS": 0.5,
|
435 |
+
"META_LEARNING": 0.4
|
436 |
+
})
|
437 |
+
)
|
438 |
+
self.meta_learning = MetaLearningStrategy(config)
|
439 |
+
|
440 |
+
# System state
|
441 |
+
self.state = "initialized"
|
442 |
+
self.metrics: Dict[str, List[float]] = defaultdict(list)
|
443 |
+
|
444 |
+
# Async support
|
445 |
+
self.executor = ThreadPoolExecutor(max_workers=4)
|
446 |
+
self.lock = asyncio.Lock()
|
447 |
+
|
448 |
+
# Logging
|
449 |
+
self.logger = logging.getLogger("AgenticSystem")
|
450 |
+
|
451 |
+
async def create_agent(
|
452 |
+
self,
|
453 |
+
name: str,
|
454 |
+
role: AgentRole,
|
455 |
+
capabilities: List[AgentCapability],
|
456 |
+
personality: AgentPersonality,
|
457 |
+
expertise_areas: List[str]
|
458 |
+
) -> str:
|
459 |
+
"""Create a new agent."""
|
460 |
+
# Create agent profile
|
461 |
+
profile = AgentProfile(
|
462 |
+
id=str(uuid.uuid4()),
|
463 |
+
name=name,
|
464 |
+
role=role,
|
465 |
+
capabilities=capabilities,
|
466 |
+
personality=personality,
|
467 |
+
expertise_areas=expertise_areas,
|
468 |
+
learning_rate=0.1,
|
469 |
+
risk_tolerance=0.5,
|
470 |
+
created_at=datetime.now(),
|
471 |
+
metadata={}
|
472 |
+
)
|
473 |
+
|
474 |
+
# Create agent instance
|
475 |
+
agent = Agent(
|
476 |
+
profile=profile,
|
477 |
+
reasoning_engine=self.reasoning_engine,
|
478 |
+
meta_learning=self.meta_learning,
|
479 |
+
config=self.config.get("agent_config", {})
|
480 |
+
)
|
481 |
+
|
482 |
+
# Register with orchestrator
|
483 |
+
agent_id = await self.orchestrator.register_agent(
|
484 |
+
role=role,
|
485 |
+
capabilities=[c.value for c in capabilities]
|
486 |
+
)
|
487 |
+
|
488 |
+
# Store agent
|
489 |
+
async with self.lock:
|
490 |
+
self.agents[agent_id] = agent
|
491 |
+
|
492 |
+
return agent_id
|
493 |
+
|
494 |
+
async def submit_task(
|
495 |
+
self,
|
496 |
+
description: str,
|
497 |
+
priority: TaskPriority = TaskPriority.MEDIUM,
|
498 |
+
deadline: Optional[datetime] = None
|
499 |
+
) -> str:
|
500 |
+
"""Submit a task to the system."""
|
501 |
+
return await self.orchestrator.submit_task(
|
502 |
+
description=description,
|
503 |
+
priority=priority,
|
504 |
+
deadline=deadline
|
505 |
+
)
|
506 |
+
|
507 |
+
async def get_task_status(self, task_id: str) -> Dict[str, Any]:
|
508 |
+
"""Get status of a task."""
|
509 |
+
return await self.orchestrator.get_task_status(task_id)
|
510 |
+
|
511 |
+
async def get_agent_status(self, agent_id: str) -> Dict[str, Any]:
|
512 |
+
"""Get status of an agent."""
|
513 |
+
agent = self.agents.get(agent_id)
|
514 |
+
if not agent:
|
515 |
+
raise ValueError(f"Unknown agent: {agent_id}")
|
516 |
+
|
517 |
+
return {
|
518 |
+
"profile": agent.profile.__dict__,
|
519 |
+
"state": agent.state,
|
520 |
+
"current_task": agent.current_task.__dict__ if agent.current_task else None,
|
521 |
+
"metrics": agent._get_execution_metrics(),
|
522 |
+
"resource_usage": agent.resource_usage
|
523 |
+
}
|
524 |
+
|
525 |
+
async def get_system_status(self) -> Dict[str, Any]:
|
526 |
+
"""Get overall system status."""
|
527 |
+
return {
|
528 |
+
"state": self.state,
|
529 |
+
"agent_count": len(self.agents),
|
530 |
+
"active_tasks": len([a for a in self.agents.values() if a.state == AgentState.BUSY]),
|
531 |
+
"performance_metrics": self._calculate_system_metrics(),
|
532 |
+
"resource_usage": self._calculate_resource_usage()
|
533 |
+
}
|
534 |
+
|
535 |
+
def _calculate_system_metrics(self) -> Dict[str, float]:
|
536 |
+
"""Calculate overall system metrics."""
|
537 |
+
metrics = defaultdict(list)
|
538 |
+
|
539 |
+
for agent in self.agents.values():
|
540 |
+
agent_metrics = agent._get_execution_metrics()
|
541 |
+
for key, value in agent_metrics.items():
|
542 |
+
metrics[key].append(value)
|
543 |
+
|
544 |
+
return {
|
545 |
+
key: sum(values) / len(values)
|
546 |
+
for key, values in metrics.items()
|
547 |
+
if values
|
548 |
+
}
|
549 |
+
|
550 |
+
def _calculate_resource_usage(self) -> Dict[str, float]:
|
551 |
+
"""Calculate overall resource usage."""
|
552 |
+
usage = defaultdict(float)
|
553 |
+
|
554 |
+
for agent in self.agents.values():
|
555 |
+
for resource, amount in agent.resource_usage.items():
|
556 |
+
usage[resource] += amount
|
557 |
+
|
558 |
+
return dict(usage)
|
api/__init__.py
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""API package for the agentic system."""
|
2 |
+
|
3 |
+
from .openai_compatible import OpenAICompatibleAPI
|
4 |
+
from .venture_api import VentureAPI
|
5 |
+
|
6 |
+
__all__ = ['OpenAICompatibleAPI', 'VentureAPI']
|
api/openai_compatible.py
ADDED
@@ -0,0 +1,139 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""OpenAI-compatible API endpoints."""
|
2 |
+
|
3 |
+
from typing import Dict, List, Optional, Union
|
4 |
+
from pydantic import BaseModel, Field
|
5 |
+
from fastapi import APIRouter, HTTPException, Depends
|
6 |
+
import time
|
7 |
+
import json
|
8 |
+
import asyncio
|
9 |
+
from datetime import datetime
|
10 |
+
|
11 |
+
class ChatMessage(BaseModel):
|
12 |
+
"""OpenAI-compatible chat message."""
|
13 |
+
role: str = Field(..., description="The role of the message author (system/user/assistant)")
|
14 |
+
content: str = Field(..., description="The content of the message")
|
15 |
+
name: Optional[str] = Field(None, description="The name of the author")
|
16 |
+
|
17 |
+
class ChatCompletionRequest(BaseModel):
|
18 |
+
"""OpenAI-compatible chat completion request."""
|
19 |
+
model: str = Field(..., description="Model to use")
|
20 |
+
messages: List[ChatMessage]
|
21 |
+
temperature: Optional[float] = Field(0.7, description="Sampling temperature")
|
22 |
+
top_p: Optional[float] = Field(1.0, description="Nucleus sampling parameter")
|
23 |
+
n: Optional[int] = Field(1, description="Number of completions")
|
24 |
+
stream: Optional[bool] = Field(False, description="Whether to stream responses")
|
25 |
+
stop: Optional[Union[str, List[str]]] = Field(None, description="Stop sequences")
|
26 |
+
max_tokens: Optional[int] = Field(None, description="Maximum tokens to generate")
|
27 |
+
presence_penalty: Optional[float] = Field(0.0, description="Presence penalty")
|
28 |
+
frequency_penalty: Optional[float] = Field(0.0, description="Frequency penalty")
|
29 |
+
user: Optional[str] = Field(None, description="User identifier")
|
30 |
+
|
31 |
+
class ChatCompletionResponse(BaseModel):
|
32 |
+
"""OpenAI-compatible chat completion response."""
|
33 |
+
id: str = Field(..., description="Unique identifier for the completion")
|
34 |
+
object: str = Field("chat.completion", description="Object type")
|
35 |
+
created: int = Field(..., description="Unix timestamp of creation")
|
36 |
+
model: str = Field(..., description="Model used")
|
37 |
+
choices: List[Dict] = Field(..., description="Completion choices")
|
38 |
+
usage: Dict[str, int] = Field(..., description="Token usage statistics")
|
39 |
+
|
40 |
+
class OpenAICompatibleAPI:
|
41 |
+
"""OpenAI-compatible API implementation."""
|
42 |
+
|
43 |
+
def __init__(self, reasoning_engine):
|
44 |
+
self.reasoning_engine = reasoning_engine
|
45 |
+
self.router = APIRouter()
|
46 |
+
self.setup_routes()
|
47 |
+
|
48 |
+
def setup_routes(self):
|
49 |
+
"""Setup API routes."""
|
50 |
+
|
51 |
+
@self.router.post("/v1/chat/completions")
|
52 |
+
async def create_chat_completion(request: ChatCompletionRequest) -> ChatCompletionResponse:
|
53 |
+
try:
|
54 |
+
# Convert chat history to context
|
55 |
+
context = self._prepare_context(request.messages)
|
56 |
+
|
57 |
+
# Get the last user message
|
58 |
+
user_message = next(
|
59 |
+
(msg.content for msg in reversed(request.messages)
|
60 |
+
if msg.role == "user"),
|
61 |
+
None
|
62 |
+
)
|
63 |
+
|
64 |
+
if not user_message:
|
65 |
+
raise HTTPException(status_code=400, detail="No user message found")
|
66 |
+
|
67 |
+
# Process with reasoning engine
|
68 |
+
result = await self.reasoning_engine.reason(
|
69 |
+
query=user_message,
|
70 |
+
context={
|
71 |
+
"chat_history": context,
|
72 |
+
"temperature": request.temperature,
|
73 |
+
"top_p": request.top_p,
|
74 |
+
"max_tokens": request.max_tokens,
|
75 |
+
"stream": request.stream
|
76 |
+
}
|
77 |
+
)
|
78 |
+
|
79 |
+
# Format response
|
80 |
+
response = {
|
81 |
+
"id": f"chatcmpl-{int(time.time()*1000)}",
|
82 |
+
"object": "chat.completion",
|
83 |
+
"created": int(time.time()),
|
84 |
+
"model": request.model,
|
85 |
+
"choices": [{
|
86 |
+
"index": 0,
|
87 |
+
"message": {
|
88 |
+
"role": "assistant",
|
89 |
+
"content": result.answer
|
90 |
+
},
|
91 |
+
"finish_reason": "stop"
|
92 |
+
}],
|
93 |
+
"usage": {
|
94 |
+
"prompt_tokens": self._estimate_tokens(user_message),
|
95 |
+
"completion_tokens": self._estimate_tokens(result.answer),
|
96 |
+
"total_tokens": self._estimate_tokens(user_message) +
|
97 |
+
self._estimate_tokens(result.answer)
|
98 |
+
}
|
99 |
+
}
|
100 |
+
|
101 |
+
return ChatCompletionResponse(**response)
|
102 |
+
|
103 |
+
except Exception as e:
|
104 |
+
raise HTTPException(status_code=500, detail=str(e))
|
105 |
+
|
106 |
+
@self.router.get("/v1/models")
|
107 |
+
async def list_models():
|
108 |
+
"""List available models."""
|
109 |
+
return {
|
110 |
+
"object": "list",
|
111 |
+
"data": [
|
112 |
+
{
|
113 |
+
"id": "venture-gpt-1",
|
114 |
+
"object": "model",
|
115 |
+
"created": int(time.time()),
|
116 |
+
"owned_by": "venture-ai",
|
117 |
+
"permission": [],
|
118 |
+
"root": "venture-gpt-1",
|
119 |
+
"parent": None
|
120 |
+
}
|
121 |
+
]
|
122 |
+
}
|
123 |
+
|
124 |
+
def _prepare_context(self, messages: List[ChatMessage]) -> List[Dict]:
|
125 |
+
"""Convert messages to context format."""
|
126 |
+
return [
|
127 |
+
{
|
128 |
+
"role": msg.role,
|
129 |
+
"content": msg.content,
|
130 |
+
"name": msg.name,
|
131 |
+
"timestamp": datetime.now().isoformat()
|
132 |
+
}
|
133 |
+
for msg in messages
|
134 |
+
]
|
135 |
+
|
136 |
+
def _estimate_tokens(self, text: str) -> int:
|
137 |
+
"""Estimate token count for a text."""
|
138 |
+
# Simple estimation: ~4 characters per token
|
139 |
+
return len(text) // 4
|
api/venture_api.py
ADDED
@@ -0,0 +1,194 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""API endpoints for venture strategies and analysis."""
|
2 |
+
|
3 |
+
from fastapi import APIRouter, HTTPException, Depends
|
4 |
+
from typing import List, Dict, Any, Optional
|
5 |
+
from pydantic import BaseModel, Field
|
6 |
+
from datetime import datetime
|
7 |
+
|
8 |
+
from reasoning.venture_strategies import (
|
9 |
+
AIStartupStrategy, SaaSVentureStrategy, AutomationVentureStrategy,
|
10 |
+
DataVentureStrategy, APIVentureStrategy, MarketplaceVentureStrategy,
|
11 |
+
AIInfrastructureStrategy, AIConsultingStrategy, AIProductStrategy,
|
12 |
+
FinTechStrategy, HealthTechStrategy, EdTechStrategy,
|
13 |
+
BlockchainStrategy, AIMarketplaceStrategy
|
14 |
+
)
|
15 |
+
from reasoning.market_analysis import MarketAnalyzer
|
16 |
+
from reasoning.portfolio_optimization import PortfolioOptimizer
|
17 |
+
from reasoning.monetization import MonetizationOptimizer
|
18 |
+
|
19 |
+
router = APIRouter(prefix="/api/ventures", tags=["ventures"])
|
20 |
+
|
21 |
+
# Models
|
22 |
+
class VentureRequest(BaseModel):
|
23 |
+
"""Venture analysis request."""
|
24 |
+
venture_type: str
|
25 |
+
query: str
|
26 |
+
context: Dict[str, Any] = Field(default_factory=dict)
|
27 |
+
|
28 |
+
class MarketRequest(BaseModel):
|
29 |
+
"""Market analysis request."""
|
30 |
+
segment: str
|
31 |
+
context: Dict[str, Any] = Field(default_factory=dict)
|
32 |
+
|
33 |
+
class PortfolioRequest(BaseModel):
|
34 |
+
"""Portfolio optimization request."""
|
35 |
+
ventures: List[str]
|
36 |
+
context: Dict[str, Any] = Field(default_factory=dict)
|
37 |
+
|
38 |
+
class MonetizationRequest(BaseModel):
|
39 |
+
"""Monetization optimization request."""
|
40 |
+
venture_type: str
|
41 |
+
context: Dict[str, Any] = Field(default_factory=dict)
|
42 |
+
|
43 |
+
# Strategy mapping
|
44 |
+
VENTURE_STRATEGIES = {
|
45 |
+
"ai_startup": AIStartupStrategy(),
|
46 |
+
"saas": SaaSVentureStrategy(),
|
47 |
+
"automation": AutomationVentureStrategy(),
|
48 |
+
"data": DataVentureStrategy(),
|
49 |
+
"api": APIVentureStrategy(),
|
50 |
+
"marketplace": MarketplaceVentureStrategy(),
|
51 |
+
"ai_infrastructure": AIInfrastructureStrategy(),
|
52 |
+
"ai_consulting": AIConsultingStrategy(),
|
53 |
+
"ai_product": AIProductStrategy(),
|
54 |
+
"fintech": FinTechStrategy(),
|
55 |
+
"healthtech": HealthTechStrategy(),
|
56 |
+
"edtech": EdTechStrategy(),
|
57 |
+
"blockchain": BlockchainStrategy(),
|
58 |
+
"ai_marketplace": AIMarketplaceStrategy()
|
59 |
+
}
|
60 |
+
|
61 |
+
# Endpoints
|
62 |
+
@router.post("/analyze")
|
63 |
+
async def analyze_venture(request: VentureRequest):
|
64 |
+
"""Analyze venture opportunity."""
|
65 |
+
try:
|
66 |
+
strategy = VENTURE_STRATEGIES.get(request.venture_type)
|
67 |
+
if not strategy:
|
68 |
+
raise HTTPException(
|
69 |
+
status_code=400,
|
70 |
+
detail=f"Invalid venture type: {request.venture_type}"
|
71 |
+
)
|
72 |
+
|
73 |
+
result = await strategy.reason(request.query, request.context)
|
74 |
+
return {
|
75 |
+
"success": True,
|
76 |
+
"result": result,
|
77 |
+
"timestamp": datetime.now().isoformat()
|
78 |
+
}
|
79 |
+
except Exception as e:
|
80 |
+
raise HTTPException(status_code=500, detail=str(e))
|
81 |
+
|
82 |
+
@router.post("/market")
|
83 |
+
async def analyze_market(request: MarketRequest):
|
84 |
+
"""Analyze market opportunity."""
|
85 |
+
try:
|
86 |
+
analyzer = MarketAnalyzer()
|
87 |
+
result = await analyzer.analyze_market(request.segment, request.context)
|
88 |
+
return {
|
89 |
+
"success": True,
|
90 |
+
"result": result,
|
91 |
+
"timestamp": datetime.now().isoformat()
|
92 |
+
}
|
93 |
+
except Exception as e:
|
94 |
+
raise HTTPException(status_code=500, detail=str(e))
|
95 |
+
|
96 |
+
@router.post("/portfolio")
|
97 |
+
async def optimize_portfolio(request: PortfolioRequest):
|
98 |
+
"""Optimize venture portfolio."""
|
99 |
+
try:
|
100 |
+
optimizer = PortfolioOptimizer()
|
101 |
+
result = await optimizer.optimize_portfolio(request.ventures, request.context)
|
102 |
+
return {
|
103 |
+
"success": True,
|
104 |
+
"result": result,
|
105 |
+
"timestamp": datetime.now().isoformat()
|
106 |
+
}
|
107 |
+
except Exception as e:
|
108 |
+
raise HTTPException(status_code=500, detail=str(e))
|
109 |
+
|
110 |
+
@router.post("/monetization")
|
111 |
+
async def optimize_monetization(request: MonetizationRequest):
|
112 |
+
"""Optimize venture monetization."""
|
113 |
+
try:
|
114 |
+
optimizer = MonetizationOptimizer()
|
115 |
+
result = await optimizer.optimize_monetization(
|
116 |
+
request.venture_type, request.context)
|
117 |
+
return {
|
118 |
+
"success": True,
|
119 |
+
"result": result,
|
120 |
+
"timestamp": datetime.now().isoformat()
|
121 |
+
}
|
122 |
+
except Exception as e:
|
123 |
+
raise HTTPException(status_code=500, detail=str(e))
|
124 |
+
|
125 |
+
@router.get("/strategies")
|
126 |
+
async def list_strategies():
|
127 |
+
"""List available venture strategies."""
|
128 |
+
return {
|
129 |
+
"success": True,
|
130 |
+
"strategies": list(VENTURE_STRATEGIES.keys()),
|
131 |
+
"timestamp": datetime.now().isoformat()
|
132 |
+
}
|
133 |
+
|
134 |
+
@router.get("/metrics/{venture_type}")
|
135 |
+
async def get_venture_metrics(venture_type: str):
|
136 |
+
"""Get venture performance metrics."""
|
137 |
+
try:
|
138 |
+
strategy = VENTURE_STRATEGIES.get(venture_type)
|
139 |
+
if not strategy:
|
140 |
+
raise HTTPException(
|
141 |
+
status_code=400,
|
142 |
+
detail=f"Invalid venture type: {venture_type}"
|
143 |
+
)
|
144 |
+
|
145 |
+
metrics = strategy.get_venture_metrics()
|
146 |
+
return {
|
147 |
+
"success": True,
|
148 |
+
"metrics": metrics,
|
149 |
+
"timestamp": datetime.now().isoformat()
|
150 |
+
}
|
151 |
+
except Exception as e:
|
152 |
+
raise HTTPException(status_code=500, detail=str(e))
|
153 |
+
|
154 |
+
@router.get("/insights")
|
155 |
+
async def get_market_insights():
|
156 |
+
"""Get comprehensive market insights."""
|
157 |
+
try:
|
158 |
+
analyzer = MarketAnalyzer()
|
159 |
+
insights = analyzer.get_market_insights()
|
160 |
+
return {
|
161 |
+
"success": True,
|
162 |
+
"insights": insights,
|
163 |
+
"timestamp": datetime.now().isoformat()
|
164 |
+
}
|
165 |
+
except Exception as e:
|
166 |
+
raise HTTPException(status_code=500, detail=str(e))
|
167 |
+
|
168 |
+
@router.get("/portfolio/insights")
|
169 |
+
async def get_portfolio_insights():
|
170 |
+
"""Get comprehensive portfolio insights."""
|
171 |
+
try:
|
172 |
+
optimizer = PortfolioOptimizer()
|
173 |
+
insights = optimizer.get_portfolio_insights()
|
174 |
+
return {
|
175 |
+
"success": True,
|
176 |
+
"insights": insights,
|
177 |
+
"timestamp": datetime.now().isoformat()
|
178 |
+
}
|
179 |
+
except Exception as e:
|
180 |
+
raise HTTPException(status_code=500, detail=str(e))
|
181 |
+
|
182 |
+
@router.get("/monetization/metrics")
|
183 |
+
async def get_monetization_metrics():
|
184 |
+
"""Get comprehensive monetization metrics."""
|
185 |
+
try:
|
186 |
+
optimizer = MonetizationOptimizer()
|
187 |
+
metrics = optimizer.get_monetization_metrics()
|
188 |
+
return {
|
189 |
+
"success": True,
|
190 |
+
"metrics": metrics,
|
191 |
+
"timestamp": datetime.now().isoformat()
|
192 |
+
}
|
193 |
+
except Exception as e:
|
194 |
+
raise HTTPException(status_code=500, detail=str(e))
|
app.py
ADDED
@@ -0,0 +1,633 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Advanced Agentic System Interface
|
3 |
+
-------------------------------
|
4 |
+
Provides a chat interface to interact with the autonomous agent teams:
|
5 |
+
- Team A: Coders (App/Software Developers)
|
6 |
+
- Team B: Business (Entrepreneurs)
|
7 |
+
- Team C: Research (Deep Online Research)
|
8 |
+
- Team D: Crypto & Sports Trading
|
9 |
+
"""
|
10 |
+
|
11 |
+
import gradio as gr
|
12 |
+
from fastapi import FastAPI, HTTPException
|
13 |
+
from fastapi.middleware.cors import CORSMiddleware
|
14 |
+
import uvicorn
|
15 |
+
from typing import Dict, Any, List, Tuple, Optional
|
16 |
+
import logging
|
17 |
+
from pathlib import Path
|
18 |
+
import asyncio
|
19 |
+
from datetime import datetime
|
20 |
+
import json
|
21 |
+
from requests.adapters import HTTPAdapter, Retry
|
22 |
+
from urllib3.util.retry import Retry
|
23 |
+
import time
|
24 |
+
|
25 |
+
from agentic_system import AgenticSystem
|
26 |
+
from team_management import TeamManager, TeamType, TeamObjective
|
27 |
+
from orchestrator import AgentOrchestrator
|
28 |
+
from reasoning import UnifiedReasoningEngine as ReasoningEngine
|
29 |
+
|
30 |
+
# Configure logging
|
31 |
+
logging.basicConfig(level=logging.INFO)
|
32 |
+
logger = logging.getLogger(__name__)
|
33 |
+
|
34 |
+
# Configure network settings
|
35 |
+
TIMEOUT = int(os.getenv('REQUESTS_TIMEOUT', '30'))
|
36 |
+
MAX_RETRIES = 5
|
37 |
+
RETRY_BACKOFF = 1
|
38 |
+
|
39 |
+
def setup_requests_session():
|
40 |
+
"""Configure requests session with retries."""
|
41 |
+
session = requests.Session()
|
42 |
+
retry_strategy = Retry(
|
43 |
+
total=MAX_RETRIES,
|
44 |
+
backoff_factor=RETRY_BACKOFF,
|
45 |
+
status_forcelist=[408, 429, 500, 502, 503, 504],
|
46 |
+
allowed_methods=["HEAD", "GET", "PUT", "DELETE", "OPTIONS", "TRACE"]
|
47 |
+
)
|
48 |
+
adapter = HTTPAdapter(max_retries=retry_strategy)
|
49 |
+
session.mount("https://", adapter)
|
50 |
+
session.mount("http://", adapter)
|
51 |
+
return session
|
52 |
+
|
53 |
+
def check_network(max_attempts=3):
|
54 |
+
"""Check network connectivity with retries."""
|
55 |
+
session = setup_requests_session()
|
56 |
+
|
57 |
+
for attempt in range(max_attempts):
|
58 |
+
try:
|
59 |
+
# Try multiple DNS servers
|
60 |
+
for dns in ['8.8.8.8', '8.8.4.4', '1.1.1.1']:
|
61 |
+
try:
|
62 |
+
socket.gethostbyname('huggingface.co')
|
63 |
+
break
|
64 |
+
except socket.gaierror:
|
65 |
+
continue
|
66 |
+
|
67 |
+
# Test connection to Hugging Face
|
68 |
+
response = session.get('https://huggingface.co/api/health',
|
69 |
+
timeout=TIMEOUT)
|
70 |
+
if response.status_code == 200:
|
71 |
+
return True
|
72 |
+
|
73 |
+
except (requests.RequestException, socket.gaierror) as e:
|
74 |
+
logger.warning(f"Network check attempt {attempt + 1} failed: {e}")
|
75 |
+
if attempt < max_attempts - 1:
|
76 |
+
time.sleep(RETRY_BACKOFF * (attempt + 1))
|
77 |
+
continue
|
78 |
+
|
79 |
+
logger.error("Network connectivity check failed after all attempts")
|
80 |
+
return False
|
81 |
+
|
82 |
+
class ChatInterface:
|
83 |
+
def __init__(self):
|
84 |
+
# Check network connectivity
|
85 |
+
if not check_network():
|
86 |
+
logger.warning("Network connectivity issues detected - continuing with degraded functionality")
|
87 |
+
|
88 |
+
# Initialize core components with consistent configuration
|
89 |
+
config = {
|
90 |
+
"min_confidence": 0.7,
|
91 |
+
"parallel_threshold": 3,
|
92 |
+
"learning_rate": 0.1,
|
93 |
+
"strategy_weights": {
|
94 |
+
"LOCAL_LLM": 0.8,
|
95 |
+
"CHAIN_OF_THOUGHT": 0.6,
|
96 |
+
"TREE_OF_THOUGHTS": 0.5,
|
97 |
+
"META_LEARNING": 0.4
|
98 |
+
}
|
99 |
+
}
|
100 |
+
|
101 |
+
self.orchestrator = AgentOrchestrator(config)
|
102 |
+
self.agentic_system = AgenticSystem(config)
|
103 |
+
self.team_manager = TeamManager(self.orchestrator)
|
104 |
+
self.chat_history = []
|
105 |
+
self.active_objectives = {}
|
106 |
+
|
107 |
+
# Set up network session
|
108 |
+
self.session = setup_requests_session()
|
109 |
+
|
110 |
+
# Initialize teams
|
111 |
+
asyncio.run(self.team_manager.initialize_team_agents())
|
112 |
+
|
113 |
+
async def process_message(
|
114 |
+
self,
|
115 |
+
message: str,
|
116 |
+
history: List[List[str]]
|
117 |
+
) -> Tuple[str, List[List[str]]]:
|
118 |
+
"""Process incoming chat message."""
|
119 |
+
try:
|
120 |
+
# Update chat history
|
121 |
+
self.chat_history = history
|
122 |
+
|
123 |
+
# Process message
|
124 |
+
response = await self._handle_message(message)
|
125 |
+
|
126 |
+
# Update history
|
127 |
+
if response:
|
128 |
+
history.append([message, response])
|
129 |
+
|
130 |
+
return response, history
|
131 |
+
|
132 |
+
except Exception as e:
|
133 |
+
logger.error(f"Error processing message: {str(e)}")
|
134 |
+
error_msg = "I apologize, but I encountered an error. Please try again."
|
135 |
+
history.append([message, error_msg])
|
136 |
+
return error_msg, history
|
137 |
+
|
138 |
+
async def _handle_message(self, message: str) -> str:
|
139 |
+
"""Handle message processing with error recovery."""
|
140 |
+
try:
|
141 |
+
# Analyze intent
|
142 |
+
intent = await self._analyze_intent(message)
|
143 |
+
intent_type = self._get_intent_type(intent)
|
144 |
+
|
145 |
+
# Route to appropriate handler
|
146 |
+
if intent_type == "query":
|
147 |
+
return await self._handle_query(message)
|
148 |
+
elif intent_type == "objective":
|
149 |
+
return await self._handle_objective(message)
|
150 |
+
elif intent_type == "status":
|
151 |
+
return await self._handle_status_request(message)
|
152 |
+
else:
|
153 |
+
return await self._handle_general_chat(message)
|
154 |
+
|
155 |
+
except Exception as e:
|
156 |
+
logger.error(f"Error in message handling: {str(e)}")
|
157 |
+
return "I apologize, but I encountered an error processing your message. Please try again."
|
158 |
+
|
159 |
+
def _get_intent_type(self, intent) -> str:
|
160 |
+
"""Safely extract intent type from various result formats."""
|
161 |
+
if isinstance(intent, dict):
|
162 |
+
return intent.get("type", "general")
|
163 |
+
return "general"
|
164 |
+
|
165 |
+
async def _analyze_intent(self, message: str) -> Dict[str, Any]:
|
166 |
+
"""Analyze user message intent with error handling."""
|
167 |
+
try:
|
168 |
+
# Use reasoning engine to analyze intent
|
169 |
+
analysis = await self.orchestrator.reasoning_engine.reason(
|
170 |
+
query=message,
|
171 |
+
context={
|
172 |
+
"chat_history": self.chat_history,
|
173 |
+
"active_objectives": self.active_objectives
|
174 |
+
}
|
175 |
+
)
|
176 |
+
|
177 |
+
return {
|
178 |
+
"type": analysis.get("intent_type", "general"),
|
179 |
+
"confidence": analysis.get("confidence", 0.5),
|
180 |
+
"entities": analysis.get("entities", []),
|
181 |
+
"action_required": analysis.get("action_required", False)
|
182 |
+
}
|
183 |
+
except Exception as e:
|
184 |
+
logger.error(f"Error analyzing intent: {str(e)}")
|
185 |
+
return {"type": "general", "confidence": 0.5}
|
186 |
+
|
187 |
+
async def _handle_query(self, message: str) -> str:
|
188 |
+
"""Handle information queries."""
|
189 |
+
try:
|
190 |
+
# Get relevant teams for the query
|
191 |
+
recommended_teams = await self.team_manager.get_team_recommendations(message)
|
192 |
+
|
193 |
+
# Get responses from relevant teams
|
194 |
+
responses = []
|
195 |
+
for team_type in recommended_teams:
|
196 |
+
response = await self._get_team_response(team_type, message)
|
197 |
+
if response:
|
198 |
+
responses.append(response)
|
199 |
+
|
200 |
+
if not responses:
|
201 |
+
return "I apologize, but I couldn't find a relevant answer to your query."
|
202 |
+
|
203 |
+
# Combine and format responses
|
204 |
+
return self._format_team_responses(responses)
|
205 |
+
|
206 |
+
except Exception as e:
|
207 |
+
logger.error(f"Error handling query: {str(e)}")
|
208 |
+
return "I apologize, but I encountered an error processing your query. Please try again."
|
209 |
+
|
210 |
+
async def _handle_objective(self, message: str) -> str:
|
211 |
+
"""Handle new objective creation."""
|
212 |
+
try:
|
213 |
+
# Create new objective
|
214 |
+
objective_id = await self.team_manager.create_objective(message)
|
215 |
+
if not objective_id:
|
216 |
+
return "I apologize, but I couldn't create the objective. Please try again."
|
217 |
+
|
218 |
+
# Format and return response
|
219 |
+
return self._format_objective_creation(objective_id)
|
220 |
+
|
221 |
+
except Exception as e:
|
222 |
+
logger.error(f"Error creating objective: {str(e)}")
|
223 |
+
return "I apologize, but I encountered an error creating the objective. Please try again."
|
224 |
+
|
225 |
+
async def _handle_status_request(self, message: str) -> str:
|
226 |
+
"""Handle status check requests."""
|
227 |
+
try:
|
228 |
+
# Get system status
|
229 |
+
system_status = await self.agentic_system.get_system_status()
|
230 |
+
|
231 |
+
# Get team status
|
232 |
+
team_status = {}
|
233 |
+
for team_id, team in self.team_manager.teams.items():
|
234 |
+
team_status[team.name] = await self.team_manager.monitor_objective_progress(team_id)
|
235 |
+
|
236 |
+
# Get objective status
|
237 |
+
objective_status = {}
|
238 |
+
for obj_id, obj in self.active_objectives.items():
|
239 |
+
objective_status[obj_id] = await self.team_manager.monitor_objective_progress(obj_id)
|
240 |
+
|
241 |
+
return self._format_status_response(system_status, team_status, objective_status)
|
242 |
+
|
243 |
+
except Exception as e:
|
244 |
+
logger.error(f"Error getting status: {str(e)}")
|
245 |
+
return "I apologize, but I encountered an error getting the status. Please try again."
|
246 |
+
|
247 |
+
async def _handle_general_chat(self, message: str) -> str:
|
248 |
+
"""Handle general chat interactions with error recovery."""
|
249 |
+
try:
|
250 |
+
# Use reasoning engine for response generation
|
251 |
+
response = await self.orchestrator.reasoning_engine.reason(
|
252 |
+
query=message,
|
253 |
+
context={
|
254 |
+
"chat_history": self.chat_history,
|
255 |
+
"system_state": await self.agentic_system.get_system_status()
|
256 |
+
}
|
257 |
+
)
|
258 |
+
|
259 |
+
if not response or not response.get("response"):
|
260 |
+
return "I apologize, but I couldn't generate a meaningful response. Please try again."
|
261 |
+
|
262 |
+
return response["response"]
|
263 |
+
|
264 |
+
except Exception as e:
|
265 |
+
logger.error(f"Error in general chat: {str(e)}")
|
266 |
+
return "I apologize, but I encountered an error processing your message. Please try again."
|
267 |
+
|
268 |
+
async def _get_team_response(self, team_type: TeamType, query: str) -> Dict[str, Any]:
|
269 |
+
"""Get response from a specific team."""
|
270 |
+
try:
|
271 |
+
team = self.team_manager.teams.get(team_type.value)
|
272 |
+
if not team:
|
273 |
+
return None
|
274 |
+
|
275 |
+
# Get response from team's agents
|
276 |
+
responses = []
|
277 |
+
for agent in team.agents:
|
278 |
+
response = await agent.process_query(query)
|
279 |
+
if response:
|
280 |
+
responses.append(response)
|
281 |
+
|
282 |
+
if not responses:
|
283 |
+
return None
|
284 |
+
|
285 |
+
# Return best response
|
286 |
+
return self._combine_agent_responses(responses)
|
287 |
+
|
288 |
+
except Exception as e:
|
289 |
+
logger.error(f"Error getting team response: {str(e)}")
|
290 |
+
return None
|
291 |
+
|
292 |
+
def _combine_agent_responses(self, responses: List[Dict[str, Any]]) -> Dict[str, Any]:
|
293 |
+
"""Combine multiple agent responses into a coherent response."""
|
294 |
+
try:
|
295 |
+
# Sort by confidence
|
296 |
+
valid_responses = [
|
297 |
+
r for r in responses
|
298 |
+
if r.get("success", False) and r.get("response")
|
299 |
+
]
|
300 |
+
|
301 |
+
if not valid_responses:
|
302 |
+
return None
|
303 |
+
|
304 |
+
sorted_responses = sorted(
|
305 |
+
valid_responses,
|
306 |
+
key=lambda x: x.get("confidence", 0),
|
307 |
+
reverse=True
|
308 |
+
)
|
309 |
+
|
310 |
+
# Take the highest confidence response
|
311 |
+
return sorted_responses[0]
|
312 |
+
|
313 |
+
except Exception as e:
|
314 |
+
logger.error(f"Error combining responses: {str(e)}")
|
315 |
+
return None
|
316 |
+
|
317 |
+
def _format_team_responses(self, responses: List[Dict[str, Any]]) -> str:
|
318 |
+
"""Format team responses into a readable message."""
|
319 |
+
try:
|
320 |
+
if not responses:
|
321 |
+
return "No team responses available."
|
322 |
+
|
323 |
+
formatted = []
|
324 |
+
for resp in responses:
|
325 |
+
if resp and resp.get("response"):
|
326 |
+
team_name = resp.get("team_name", "Unknown Team")
|
327 |
+
confidence = resp.get("confidence", 0)
|
328 |
+
formatted.append(
|
329 |
+
f"\n{team_name} (Confidence: {confidence:.2%}):\n{resp['response']}"
|
330 |
+
)
|
331 |
+
|
332 |
+
if not formatted:
|
333 |
+
return "No valid team responses available."
|
334 |
+
|
335 |
+
return "\n".join(formatted)
|
336 |
+
|
337 |
+
except Exception as e:
|
338 |
+
logger.error(f"Error formatting responses: {str(e)}")
|
339 |
+
return "Error formatting team responses."
|
340 |
+
|
341 |
+
def _format_objective_creation(self, objective_id: str) -> str:
|
342 |
+
"""Format objective creation response."""
|
343 |
+
try:
|
344 |
+
obj = self.active_objectives.get(objective_id)
|
345 |
+
if not obj:
|
346 |
+
return "Objective created but details not available."
|
347 |
+
|
348 |
+
return "\n".join([
|
349 |
+
"New Objective Created:",
|
350 |
+
f"Description: {obj['description']}",
|
351 |
+
f"Status: {obj['status']}",
|
352 |
+
f"Assigned Teams: {', '.join(t.value for t in obj['teams'])}"
|
353 |
+
])
|
354 |
+
|
355 |
+
except Exception as e:
|
356 |
+
logger.error(f"Error formatting objective: {str(e)}")
|
357 |
+
return "Error formatting objective details."
|
358 |
+
|
359 |
+
def _format_status_response(
|
360 |
+
self,
|
361 |
+
system_status: Dict[str, Any],
|
362 |
+
team_status: Dict[str, Any],
|
363 |
+
objective_status: Dict[str, Any]
|
364 |
+
) -> str:
|
365 |
+
"""Format status response."""
|
366 |
+
try:
|
367 |
+
# Format system status
|
368 |
+
status = [
|
369 |
+
"System Status:",
|
370 |
+
f"- State: {system_status['state']}",
|
371 |
+
f"- Active Agents: {system_status['agent_count']}",
|
372 |
+
f"- Active Tasks: {system_status['active_tasks']}",
|
373 |
+
"\nTeam Status:"
|
374 |
+
]
|
375 |
+
|
376 |
+
# Add team status
|
377 |
+
for team_name, team_info in team_status.items():
|
378 |
+
status.extend([
|
379 |
+
f"\n{team_name}:",
|
380 |
+
f"- Active Agents: {team_info['active_agents']}",
|
381 |
+
f"- Completion Rate: {team_info['completion_rate']:.2%}",
|
382 |
+
f"- Collaboration Score: {team_info['collaboration_score']:.2f}"
|
383 |
+
])
|
384 |
+
|
385 |
+
# Add objective status
|
386 |
+
if objective_status:
|
387 |
+
status.append("\nActive Objectives:")
|
388 |
+
for obj_id, obj_info in objective_status.items():
|
389 |
+
obj = self.active_objectives[obj_id]
|
390 |
+
status.extend([
|
391 |
+
f"\n{obj['description']}:",
|
392 |
+
f"- Status: {obj['status']}",
|
393 |
+
f"- Teams: {', '.join(t.value for t in obj['teams'])}",
|
394 |
+
f"- Progress: {sum(t['completion_rate'] for t in obj_info.values())/len(obj_info):.2%}"
|
395 |
+
])
|
396 |
+
|
397 |
+
return "\n".join(status)
|
398 |
+
|
399 |
+
except Exception as e:
|
400 |
+
logger.error(f"Error formatting status: {str(e)}")
|
401 |
+
return "Error formatting status information."
|
402 |
+
|
403 |
+
class VentureUI:
|
404 |
+
def __init__(self, app):
|
405 |
+
self.app = app
|
406 |
+
|
407 |
+
def create_interface(self):
|
408 |
+
"""Create the Gradio interface."""
|
409 |
+
with gr.Blocks(
|
410 |
+
theme=gr.themes.Soft(),
|
411 |
+
analytics_enabled=False,
|
412 |
+
title="Advanced Agentic System"
|
413 |
+
) as interface:
|
414 |
+
# Verify Gradio version
|
415 |
+
gr.Markdown(f"""
|
416 |
+
# Advanced Agentic System Chat Interface v{gr.__version__}
|
417 |
+
|
418 |
+
Chat with our autonomous agent teams:
|
419 |
+
- Team A: Coders (App/Software Developers)
|
420 |
+
- Team B: Business (Entrepreneurs)
|
421 |
+
- Team C: Research (Deep Online Research)
|
422 |
+
- Team D: Crypto & Sports Trading
|
423 |
+
|
424 |
+
You can:
|
425 |
+
1. Ask questions
|
426 |
+
2. Create new objectives
|
427 |
+
3. Check status of teams and objectives
|
428 |
+
4. Get insights and recommendations
|
429 |
+
""")
|
430 |
+
|
431 |
+
chatbot = gr.Chatbot(
|
432 |
+
label="Chat History",
|
433 |
+
height=400,
|
434 |
+
bubble_full_width=False,
|
435 |
+
show_copy_button=True,
|
436 |
+
render_markdown=True
|
437 |
+
)
|
438 |
+
|
439 |
+
with gr.Row():
|
440 |
+
msg = gr.Textbox(
|
441 |
+
label="Message",
|
442 |
+
placeholder="Chat with the Agentic System...",
|
443 |
+
lines=2,
|
444 |
+
scale=9,
|
445 |
+
autofocus=True,
|
446 |
+
container=True
|
447 |
+
)
|
448 |
+
submit = gr.Button(
|
449 |
+
"Send",
|
450 |
+
scale=1,
|
451 |
+
variant="primary"
|
452 |
+
)
|
453 |
+
|
454 |
+
with gr.Row():
|
455 |
+
clear = gr.ClearButton(
|
456 |
+
[msg, chatbot],
|
457 |
+
value="Clear Chat",
|
458 |
+
variant="secondary",
|
459 |
+
scale=1
|
460 |
+
)
|
461 |
+
retry = gr.Button(
|
462 |
+
"Retry Last",
|
463 |
+
variant="secondary",
|
464 |
+
scale=1
|
465 |
+
)
|
466 |
+
|
467 |
+
async def respond(message, history):
|
468 |
+
try:
|
469 |
+
# Convert history to the format expected by process_message
|
470 |
+
history_list = [[x, y] for x, y in history] if history else []
|
471 |
+
response, history_list = await self.app(message, history_list)
|
472 |
+
|
473 |
+
# Update history
|
474 |
+
if history is None:
|
475 |
+
history = []
|
476 |
+
history.append((message, response))
|
477 |
+
|
478 |
+
return "", history
|
479 |
+
except Exception as e:
|
480 |
+
logger.error(f"Error in chat response: {str(e)}")
|
481 |
+
error_msg = "I apologize, but I encountered an error. Please try again."
|
482 |
+
|
483 |
+
if history is None:
|
484 |
+
history = []
|
485 |
+
history.append((message, error_msg))
|
486 |
+
|
487 |
+
return "", history
|
488 |
+
|
489 |
+
async def retry_last(history):
|
490 |
+
if not history:
|
491 |
+
return history
|
492 |
+
last_user_msg = history[-1][0]
|
493 |
+
history = history[:-1] # Remove last exchange
|
494 |
+
return await respond(last_user_msg, history)
|
495 |
+
|
496 |
+
msg.submit(
|
497 |
+
respond,
|
498 |
+
[msg, chatbot],
|
499 |
+
[msg, chatbot],
|
500 |
+
api_name="chat"
|
501 |
+
).then(
|
502 |
+
lambda: gr.update(interactive=True),
|
503 |
+
None,
|
504 |
+
[msg, submit],
|
505 |
+
queue=False
|
506 |
+
)
|
507 |
+
|
508 |
+
submit.click(
|
509 |
+
respond,
|
510 |
+
[msg, chatbot],
|
511 |
+
[msg, chatbot],
|
512 |
+
api_name="submit"
|
513 |
+
).then(
|
514 |
+
lambda: gr.update(interactive=True),
|
515 |
+
None,
|
516 |
+
[msg, submit],
|
517 |
+
queue=False
|
518 |
+
)
|
519 |
+
|
520 |
+
retry.click(
|
521 |
+
retry_last,
|
522 |
+
[chatbot],
|
523 |
+
[chatbot],
|
524 |
+
api_name="retry"
|
525 |
+
)
|
526 |
+
|
527 |
+
# Event handlers for better UX
|
528 |
+
msg.change(lambda x: gr.update(interactive=bool(x.strip())), [msg], [submit])
|
529 |
+
|
530 |
+
# Add example inputs
|
531 |
+
gr.Examples(
|
532 |
+
examples=[
|
533 |
+
"What can Team A (Coders) help me with?",
|
534 |
+
"Create a new objective: Analyze market trends",
|
535 |
+
"What's the status of all teams?",
|
536 |
+
"Give me insights about recent developments"
|
537 |
+
],
|
538 |
+
inputs=msg,
|
539 |
+
label="Example Queries"
|
540 |
+
)
|
541 |
+
|
542 |
+
return interface
|
543 |
+
|
544 |
+
def create_chat_interface() -> gr.Blocks:
|
545 |
+
"""Create Gradio chat interface."""
|
546 |
+
chat = ChatInterface()
|
547 |
+
ui = VentureUI(chat.process_message)
|
548 |
+
return ui.create_interface()
|
549 |
+
|
550 |
+
# Initialize FastAPI
|
551 |
+
app = FastAPI(
|
552 |
+
title="Advanced Agentic System",
|
553 |
+
description="Venture Strategy Optimizer with OpenAI-compatible API",
|
554 |
+
version="1.0.0"
|
555 |
+
)
|
556 |
+
|
557 |
+
# Add CORS middleware
|
558 |
+
app.add_middleware(
|
559 |
+
CORSMiddleware,
|
560 |
+
allow_origins=["*"],
|
561 |
+
allow_credentials=True,
|
562 |
+
allow_methods=["*"],
|
563 |
+
allow_headers=["*"],
|
564 |
+
)
|
565 |
+
|
566 |
+
# Include OpenAI-compatible routes
|
567 |
+
from api.openai_compatible import OpenAICompatibleAPI
|
568 |
+
reasoning_engine = UnifiedReasoningEngine()
|
569 |
+
openai_api = OpenAICompatibleAPI(reasoning_engine)
|
570 |
+
app.include_router(openai_api.router, tags=["OpenAI Compatible"])
|
571 |
+
|
572 |
+
# Original API routes
|
573 |
+
@app.get("/api/health")
|
574 |
+
async def health_check():
|
575 |
+
"""Health check endpoint."""
|
576 |
+
return {
|
577 |
+
"status": "healthy",
|
578 |
+
"version": "1.0.0",
|
579 |
+
"endpoints": {
|
580 |
+
"openai_compatible": "/v1/chat/completions",
|
581 |
+
"venture": "/api/venture",
|
582 |
+
"ui": "/"
|
583 |
+
}
|
584 |
+
}
|
585 |
+
|
586 |
+
@app.post("/api/reason")
|
587 |
+
async def reason(query: str, context: Optional[Dict[str, Any]] = None):
|
588 |
+
"""Reasoning endpoint."""
|
589 |
+
try:
|
590 |
+
result = await reasoning_engine.reason(query, context or {})
|
591 |
+
return result
|
592 |
+
except Exception as e:
|
593 |
+
logger.error(f"Reasoning error: {e}")
|
594 |
+
raise HTTPException(status_code=500, detail=str(e))
|
595 |
+
|
596 |
+
@app.post("/api/venture/analyze")
|
597 |
+
async def analyze_venture(
|
598 |
+
venture_type: str,
|
599 |
+
description: str,
|
600 |
+
metrics: Optional[Dict[str, Any]] = None
|
601 |
+
):
|
602 |
+
"""Venture analysis endpoint."""
|
603 |
+
try:
|
604 |
+
result = await VentureAPI(reasoning_engine).analyze_venture(
|
605 |
+
venture_type=venture_type,
|
606 |
+
description=description,
|
607 |
+
metrics=metrics or {}
|
608 |
+
)
|
609 |
+
return result
|
610 |
+
except Exception as e:
|
611 |
+
logger.error(f"Analysis error: {e}")
|
612 |
+
raise HTTPException(status_code=500, detail=str(e))
|
613 |
+
|
614 |
+
@app.get("/api/venture/types")
|
615 |
+
async def get_venture_types():
|
616 |
+
"""Get available venture types."""
|
617 |
+
return VentureAPI(reasoning_engine).get_venture_types()
|
618 |
+
|
619 |
+
# Create Gradio interface
|
620 |
+
interface = create_chat_interface()
|
621 |
+
|
622 |
+
# Mount Gradio app to FastAPI
|
623 |
+
app = gr.mount_gradio_app(app, interface, path="/")
|
624 |
+
|
625 |
+
if __name__ == "__main__":
|
626 |
+
# Run with uvicorn when called directly
|
627 |
+
uvicorn.run(
|
628 |
+
"app:app",
|
629 |
+
host="0.0.0.0",
|
630 |
+
port=7860,
|
631 |
+
reload=True,
|
632 |
+
workers=4
|
633 |
+
)
|
app.yaml
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
title: Advanced Reasoning System
|
2 |
+
emoji: 🧠
|
3 |
+
colorFrom: indigo
|
4 |
+
colorTo: purple
|
5 |
+
sdk: gradio
|
6 |
+
sdk_version: 4.16.0
|
7 |
+
app_file: app.py
|
8 |
+
pinned: false
|
9 |
+
license: mit
|
app_space.sh
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
# Exit on error
|
4 |
+
set -e
|
5 |
+
|
6 |
+
echo "Starting Advanced Agentic System initialization..."
|
7 |
+
|
8 |
+
# Create necessary directories
|
9 |
+
mkdir -p /data/models
|
10 |
+
mkdir -p logs
|
11 |
+
|
12 |
+
# Install system dependencies
|
13 |
+
apt-get update && apt-get install -y \
|
14 |
+
git \
|
15 |
+
git-lfs \
|
16 |
+
cmake \
|
17 |
+
build-essential \
|
18 |
+
pkg-config \
|
19 |
+
libcurl4-openssl-dev
|
20 |
+
|
21 |
+
# Initialize git-lfs
|
22 |
+
git lfs install
|
23 |
+
|
24 |
+
# Upgrade pip and install requirements
|
25 |
+
python -m pip install --upgrade pip
|
26 |
+
pip install -r requirements.txt
|
27 |
+
|
28 |
+
# Download and initialize models
|
29 |
+
echo "Initializing models..."
|
30 |
+
python download_models_space.py
|
31 |
+
|
32 |
+
# Start the application
|
33 |
+
echo "Starting Gradio interface..."
|
34 |
+
python app.py
|
check_space_status.py
ADDED
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from huggingface_hub import HfApi
|
2 |
+
import time
|
3 |
+
import os
|
4 |
+
import requests
|
5 |
+
|
6 |
+
def check_space_status():
|
7 |
+
api = HfApi()
|
8 |
+
space_name = "nananie143/Agentic_llm"
|
9 |
+
|
10 |
+
try:
|
11 |
+
# First try direct API request
|
12 |
+
response = requests.get(
|
13 |
+
f"https://huggingface.co/api/spaces/{space_name}/runtime",
|
14 |
+
headers={"Authorization": f"Bearer {os.environ['HUGGINGFACE_TOKEN']}"}
|
15 |
+
)
|
16 |
+
print(f"\nAPI Response Status: {response.status_code}")
|
17 |
+
if response.ok:
|
18 |
+
data = response.json()
|
19 |
+
print(f"Space Info: {data}")
|
20 |
+
return data.get("stage")
|
21 |
+
|
22 |
+
# Fallback to HF API
|
23 |
+
space_info = api.space_info(space_name)
|
24 |
+
print(f"\nSpace Info via HF API: {space_info}")
|
25 |
+
|
26 |
+
if hasattr(space_info, 'runtime'):
|
27 |
+
status = space_info.runtime.stage
|
28 |
+
print(f"Status: {status}")
|
29 |
+
return status
|
30 |
+
|
31 |
+
print("No status information available")
|
32 |
+
return None
|
33 |
+
|
34 |
+
except Exception as e:
|
35 |
+
print(f"Error checking status: {e}")
|
36 |
+
return None
|
37 |
+
|
38 |
+
print("Starting Space status check...")
|
39 |
+
print("Will check every 30 seconds until the Space is running...")
|
40 |
+
|
41 |
+
while True:
|
42 |
+
status = check_space_status()
|
43 |
+
print(f"Current status: {status}")
|
44 |
+
|
45 |
+
if status == "RUNNING":
|
46 |
+
print("\nSpace is now running! ")
|
47 |
+
print(f"Access your Space at: https://huggingface.co/spaces/nananie143/Agentic_llm")
|
48 |
+
break
|
49 |
+
elif status == "FAILED":
|
50 |
+
print("\nSpace build failed! Please check the logs for details.")
|
51 |
+
break
|
52 |
+
elif status is None:
|
53 |
+
print("\nCouldn't determine status. Will try again...")
|
54 |
+
|
55 |
+
time.sleep(30)
|
check_versions.py
ADDED
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
|
3 |
+
import sys
|
4 |
+
import pkg_resources
|
5 |
+
import logging
|
6 |
+
|
7 |
+
logging.basicConfig(level=logging.INFO)
|
8 |
+
logger = logging.getLogger(__name__)
|
9 |
+
|
10 |
+
def check_package_version(package_name):
|
11 |
+
"""Check installed version of a package."""
|
12 |
+
try:
|
13 |
+
version = pkg_resources.get_distribution(package_name).version
|
14 |
+
logger.info(f"{package_name} version: {version}")
|
15 |
+
return version
|
16 |
+
except pkg_resources.DistributionNotFound:
|
17 |
+
logger.error(f"{package_name} is not installed")
|
18 |
+
return None
|
19 |
+
|
20 |
+
def main():
|
21 |
+
"""Check versions of key packages."""
|
22 |
+
packages = [
|
23 |
+
"gradio",
|
24 |
+
"torch",
|
25 |
+
"transformers",
|
26 |
+
"huggingface-hub",
|
27 |
+
"pydantic",
|
28 |
+
"fastapi",
|
29 |
+
"uvicorn"
|
30 |
+
]
|
31 |
+
|
32 |
+
logger.info("Checking package versions...")
|
33 |
+
for package in packages:
|
34 |
+
version = check_package_version(package)
|
35 |
+
if version is None:
|
36 |
+
sys.exit(1)
|
37 |
+
|
38 |
+
# Verify Gradio version specifically
|
39 |
+
gradio_version = check_package_version("gradio")
|
40 |
+
if gradio_version:
|
41 |
+
major, minor, patch = map(int, gradio_version.split("."))
|
42 |
+
if major < 4 or (major == 4 and minor < 44):
|
43 |
+
logger.error(f"Gradio version {gradio_version} is too old. Please upgrade to 4.44.1 or later")
|
44 |
+
sys.exit(1)
|
45 |
+
|
46 |
+
logger.info("All package versions verified successfully")
|
47 |
+
return 0
|
48 |
+
|
49 |
+
if __name__ == "__main__":
|
50 |
+
sys.exit(main())
|
cleanup.sh
ADDED
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
# Exit on error
|
4 |
+
set -e
|
5 |
+
|
6 |
+
echo "Starting cleanup..."
|
7 |
+
|
8 |
+
# Define core files to keep
|
9 |
+
CORE_FILES=(
|
10 |
+
"agentic_system.py"
|
11 |
+
"orchestrator.py"
|
12 |
+
"team_management.py"
|
13 |
+
"meta_learning.py"
|
14 |
+
"config.py"
|
15 |
+
"space.yml"
|
16 |
+
"app.py"
|
17 |
+
"startup.sh"
|
18 |
+
"check_versions.py"
|
19 |
+
"requirements.txt"
|
20 |
+
"upload_to_hub.py"
|
21 |
+
"app_space.sh"
|
22 |
+
".gitattributes"
|
23 |
+
)
|
24 |
+
|
25 |
+
# Remove backup and temporary files (excluding reasoning directory)
|
26 |
+
find . -type f ! -path "./reasoning/*" \( -name "*.bak*" -o -name "*.backup" -o -name "*.temp" -o -name "*.log" \) -delete
|
27 |
+
|
28 |
+
# Remove cache files (excluding reasoning directory)
|
29 |
+
find . -type d ! -path "./reasoning/*" -name "__pycache__" -exec rm -rf {} +
|
30 |
+
find . -type f ! -path "./reasoning/*" -name "*.pyc" -delete
|
31 |
+
|
32 |
+
# Remove sample and simplified files
|
33 |
+
rm -f simple_reasoning.py quick_check.py
|
34 |
+
rm -rf simple_reasoning/
|
35 |
+
|
36 |
+
# Remove environment files (after backing up if needed)
|
37 |
+
if [ -f .env ]; then
|
38 |
+
mv .env .env.backup
|
39 |
+
fi
|
40 |
+
rm -f .env.example
|
41 |
+
|
42 |
+
# Remove quantum_learning.py since its functionality exists in reasoning/quantum.py
|
43 |
+
rm -f quantum_learning.py
|
44 |
+
|
45 |
+
echo "Cleanup complete! All files in the reasoning directory have been preserved."
|
config.py
ADDED
@@ -0,0 +1,452 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
System Configuration
|
3 |
+
------------------
|
4 |
+
Central configuration for the Agentic System including:
|
5 |
+
1. Local Model Settings
|
6 |
+
2. Team Settings
|
7 |
+
3. System Parameters
|
8 |
+
4. Resource Limits
|
9 |
+
5. Free API Configurations
|
10 |
+
"""
|
11 |
+
|
12 |
+
import os
|
13 |
+
from typing import Dict, Any, Optional
|
14 |
+
from pathlib import Path
|
15 |
+
import json
|
16 |
+
import logging
|
17 |
+
from dataclasses import dataclass, field
|
18 |
+
|
19 |
+
logger = logging.getLogger(__name__)
|
20 |
+
|
21 |
+
@dataclass
|
22 |
+
class Config:
|
23 |
+
"""Configuration for the Advanced Agentic System."""
|
24 |
+
|
25 |
+
# Core settings
|
26 |
+
min_confidence: float = 0.7
|
27 |
+
parallel_threshold: int = 3
|
28 |
+
learning_rate: float = 0.1
|
29 |
+
|
30 |
+
# Model settings
|
31 |
+
model_backend: str = field(default_factory=lambda: os.getenv('MODEL_BACKEND', 'huggingface'))
|
32 |
+
groq_api_key: Optional[str] = field(default_factory=lambda: os.getenv('GROQ_API_KEY'))
|
33 |
+
huggingface_token: Optional[str] = field(default_factory=lambda: os.getenv('HUGGINGFACE_TOKEN'))
|
34 |
+
|
35 |
+
# API settings
|
36 |
+
enable_openai_compatibility: bool = True
|
37 |
+
api_rate_limit: int = 100
|
38 |
+
api_timeout: int = 30
|
39 |
+
|
40 |
+
# Resource limits
|
41 |
+
max_parallel_requests: int = field(
|
42 |
+
default_factory=lambda: int(os.getenv('MAX_PARALLEL_REQUESTS', '10'))
|
43 |
+
)
|
44 |
+
request_timeout: int = field(
|
45 |
+
default_factory=lambda: int(os.getenv('REQUEST_TIMEOUT', '30'))
|
46 |
+
)
|
47 |
+
batch_size: int = field(
|
48 |
+
default_factory=lambda: int(os.getenv('BATCH_SIZE', '4'))
|
49 |
+
)
|
50 |
+
|
51 |
+
# Cache settings
|
52 |
+
enable_cache: bool = field(
|
53 |
+
default_factory=lambda: os.getenv('CACHE_MODELS', 'false').lower() == 'true'
|
54 |
+
)
|
55 |
+
cache_dir: str = field(
|
56 |
+
default_factory=lambda: os.getenv('SPACE_CACHE_DIR', '/tmp/models')
|
57 |
+
)
|
58 |
+
|
59 |
+
# Strategy weights
|
60 |
+
strategy_weights: Dict[str, float] = field(default_factory=lambda: {
|
61 |
+
"LOCAL_LLM": 2.0,
|
62 |
+
"CHAIN_OF_THOUGHT": 1.5,
|
63 |
+
"TREE_OF_THOUGHTS": 1.5,
|
64 |
+
"META_LEARNING": 1.5,
|
65 |
+
"TASK_DECOMPOSITION": 1.3,
|
66 |
+
"RESOURCE_MANAGEMENT": 1.3,
|
67 |
+
"CONTEXTUAL_PLANNING": 1.3,
|
68 |
+
"ADAPTIVE_EXECUTION": 1.3,
|
69 |
+
"FEEDBACK_INTEGRATION": 1.3,
|
70 |
+
"BAYESIAN": 1.2,
|
71 |
+
"MARKET_ANALYSIS": 1.2,
|
72 |
+
"PORTFOLIO_OPTIMIZATION": 1.2,
|
73 |
+
"VENTURE": 1.2,
|
74 |
+
"MONETIZATION": 1.0,
|
75 |
+
"MULTIMODAL": 1.0,
|
76 |
+
"NEUROSYMBOLIC": 1.0,
|
77 |
+
"SPECIALIZED": 1.0,
|
78 |
+
"VENTURE_TYPE": 1.0,
|
79 |
+
"RECURSIVE": 1.0,
|
80 |
+
"ANALOGICAL": 1.0
|
81 |
+
})
|
82 |
+
|
83 |
+
# Agentic system settings
|
84 |
+
agentic_system: Dict[str, Any] = field(default_factory=lambda: {
|
85 |
+
"min_confidence": 0.7,
|
86 |
+
"parallel_threshold": 3,
|
87 |
+
"learning_rate": 0.1,
|
88 |
+
"enable_meta_learning": True,
|
89 |
+
"enable_self_improvement": True,
|
90 |
+
"max_agents": 10,
|
91 |
+
"default_agent_config": {
|
92 |
+
"learning_rate": 0.1,
|
93 |
+
"risk_tolerance": 0.5,
|
94 |
+
"max_retries": 3
|
95 |
+
}
|
96 |
+
})
|
97 |
+
|
98 |
+
def __init__(self, config: Optional[Dict[str, Any]] = None):
|
99 |
+
"""Initialize configuration."""
|
100 |
+
if config:
|
101 |
+
for key, value in config.items():
|
102 |
+
if hasattr(self, key):
|
103 |
+
setattr(self, key, value)
|
104 |
+
|
105 |
+
# Validate configuration
|
106 |
+
self._validate_config()
|
107 |
+
|
108 |
+
def _validate_config(self):
|
109 |
+
"""Validate configuration values."""
|
110 |
+
if self.min_confidence < 0 or self.min_confidence > 1:
|
111 |
+
raise ValueError("min_confidence must be between 0 and 1")
|
112 |
+
|
113 |
+
if self.parallel_threshold < 1:
|
114 |
+
raise ValueError("parallel_threshold must be at least 1")
|
115 |
+
|
116 |
+
if self.learning_rate <= 0 or self.learning_rate > 1:
|
117 |
+
raise ValueError("learning_rate must be between 0 and 1")
|
118 |
+
|
119 |
+
if self.model_backend not in ['groq', 'huggingface']:
|
120 |
+
raise ValueError("model_backend must be either 'groq' or 'huggingface'")
|
121 |
+
|
122 |
+
def get(self, key: str, default: Any = None) -> Any:
|
123 |
+
"""Get configuration value."""
|
124 |
+
return getattr(self, key, default)
|
125 |
+
|
126 |
+
def to_dict(self) -> Dict[str, Any]:
|
127 |
+
"""Convert configuration to dictionary."""
|
128 |
+
return {
|
129 |
+
key: getattr(self, key)
|
130 |
+
for key in self.__annotations__
|
131 |
+
if hasattr(self, key)
|
132 |
+
}
|
133 |
+
|
134 |
+
@classmethod
|
135 |
+
def from_file(cls, filepath: str) -> 'Config':
|
136 |
+
"""Load configuration from file."""
|
137 |
+
path = Path(filepath)
|
138 |
+
if not path.exists():
|
139 |
+
raise FileNotFoundError(f"Configuration file not found: {filepath}")
|
140 |
+
|
141 |
+
with open(filepath, 'r') as f:
|
142 |
+
config = json.load(f)
|
143 |
+
|
144 |
+
return cls(config)
|
145 |
+
|
146 |
+
def save(self, filepath: str):
|
147 |
+
"""Save configuration to file."""
|
148 |
+
with open(filepath, 'w') as f:
|
149 |
+
json.dump(self.to_dict(), f, indent=2)
|
150 |
+
|
151 |
+
class SystemConfig:
|
152 |
+
"""System-wide configuration."""
|
153 |
+
|
154 |
+
# Base Paths
|
155 |
+
BASE_DIR = Path(__file__).parent.absolute()
|
156 |
+
CACHE_DIR = BASE_DIR / "cache"
|
157 |
+
LOG_DIR = BASE_DIR / "logs"
|
158 |
+
DATA_DIR = BASE_DIR / "data"
|
159 |
+
MODEL_DIR = BASE_DIR / "models"
|
160 |
+
|
161 |
+
# System Parameters
|
162 |
+
DEBUG_MODE = os.getenv("DEBUG_MODE", "False").lower() == "true"
|
163 |
+
LOG_LEVEL = os.getenv("LOG_LEVEL", "INFO")
|
164 |
+
MAX_WORKERS = int(os.getenv("MAX_WORKERS", "4"))
|
165 |
+
ASYNC_TIMEOUT = int(os.getenv("ASYNC_TIMEOUT", "30"))
|
166 |
+
|
167 |
+
# Local Model Configurations
|
168 |
+
MODEL_CONFIG = {
|
169 |
+
"quick_coder": {
|
170 |
+
"name": "tugstugi/Qwen2.5-Coder-0.5B-QwQ-draft",
|
171 |
+
"type": "transformers",
|
172 |
+
"description": "Fast code completion and simple tasks",
|
173 |
+
"temperature": 0.2,
|
174 |
+
"max_tokens": 1000,
|
175 |
+
"timeout": 30
|
176 |
+
},
|
177 |
+
"deep_coder": {
|
178 |
+
"name": "YorkieOH10/deepseek-coder-6.7B-kexer-Q8_0-GGUF",
|
179 |
+
"type": "gguf",
|
180 |
+
"description": "Complex code generation and refactoring",
|
181 |
+
"temperature": 0.3,
|
182 |
+
"max_tokens": 2000,
|
183 |
+
"timeout": 45
|
184 |
+
},
|
185 |
+
"text_gen": {
|
186 |
+
"name": "Orenguteng/Llama-3-8B-Lexi-Uncensored",
|
187 |
+
"type": "transformers",
|
188 |
+
"description": "General text generation and reasoning",
|
189 |
+
"temperature": 0.7,
|
190 |
+
"max_tokens": 1500,
|
191 |
+
"timeout": 40
|
192 |
+
},
|
193 |
+
"workflow": {
|
194 |
+
"name": "deepseek-ai/JanusFlow-1.3B",
|
195 |
+
"type": "transformers",
|
196 |
+
"description": "Task planning and workflow management",
|
197 |
+
"temperature": 0.5,
|
198 |
+
"max_tokens": 1000,
|
199 |
+
"timeout": 30
|
200 |
+
}
|
201 |
+
}
|
202 |
+
|
203 |
+
# Team Configurations
|
204 |
+
TEAM_CONFIG = {
|
205 |
+
"coders": {
|
206 |
+
"min_agents": 3,
|
207 |
+
"max_agents": 7,
|
208 |
+
"capabilities": [
|
209 |
+
"full_stack_development",
|
210 |
+
"cloud_architecture",
|
211 |
+
"ai_ml",
|
212 |
+
"blockchain",
|
213 |
+
"mobile_development"
|
214 |
+
],
|
215 |
+
"resource_limits": {
|
216 |
+
"cpu_percent": 80,
|
217 |
+
"memory_mb": 4096,
|
218 |
+
"gpu_memory_mb": 2048
|
219 |
+
}
|
220 |
+
},
|
221 |
+
"business": {
|
222 |
+
"min_agents": 2,
|
223 |
+
"max_agents": 5,
|
224 |
+
"capabilities": [
|
225 |
+
"market_analysis",
|
226 |
+
"business_strategy",
|
227 |
+
"digital_transformation",
|
228 |
+
"startup_innovation",
|
229 |
+
"product_management"
|
230 |
+
],
|
231 |
+
"resource_limits": {
|
232 |
+
"cpu_percent": 60,
|
233 |
+
"memory_mb": 2048,
|
234 |
+
"api_calls_per_minute": 100
|
235 |
+
}
|
236 |
+
},
|
237 |
+
"research": {
|
238 |
+
"min_agents": 2,
|
239 |
+
"max_agents": 6,
|
240 |
+
"capabilities": [
|
241 |
+
"deep_research",
|
242 |
+
"data_analysis",
|
243 |
+
"trend_forecasting",
|
244 |
+
"competitive_analysis",
|
245 |
+
"technology_assessment"
|
246 |
+
],
|
247 |
+
"resource_limits": {
|
248 |
+
"cpu_percent": 70,
|
249 |
+
"memory_mb": 3072,
|
250 |
+
"api_calls_per_minute": 150
|
251 |
+
}
|
252 |
+
},
|
253 |
+
"traders": {
|
254 |
+
"min_agents": 2,
|
255 |
+
"max_agents": 5,
|
256 |
+
"capabilities": [
|
257 |
+
"crypto_trading",
|
258 |
+
"sports_betting",
|
259 |
+
"risk_management",
|
260 |
+
"market_timing",
|
261 |
+
"portfolio_optimization"
|
262 |
+
],
|
263 |
+
"resource_limits": {
|
264 |
+
"cpu_percent": 60,
|
265 |
+
"memory_mb": 2048,
|
266 |
+
"api_calls_per_minute": 200
|
267 |
+
}
|
268 |
+
}
|
269 |
+
}
|
270 |
+
|
271 |
+
# Resource Management
|
272 |
+
RESOURCE_LIMITS = {
|
273 |
+
"total_cpu_percent": 90,
|
274 |
+
"total_memory_mb": 8192,
|
275 |
+
"total_gpu_memory_mb": 4096,
|
276 |
+
"max_api_calls_per_minute": 500,
|
277 |
+
"max_concurrent_tasks": 20
|
278 |
+
}
|
279 |
+
|
280 |
+
# Collaboration Settings
|
281 |
+
COLLABORATION_CONFIG = {
|
282 |
+
"min_confidence_threshold": 0.6,
|
283 |
+
"max_team_size": 10,
|
284 |
+
"max_concurrent_objectives": 5,
|
285 |
+
"objective_timeout_minutes": 60,
|
286 |
+
"team_sync_interval_seconds": 30
|
287 |
+
}
|
288 |
+
|
289 |
+
# Error Recovery
|
290 |
+
ERROR_RECOVERY = {
|
291 |
+
"max_retries": 3,
|
292 |
+
"retry_delay_seconds": 5,
|
293 |
+
"error_threshold": 0.2,
|
294 |
+
"recovery_timeout": 300
|
295 |
+
}
|
296 |
+
|
297 |
+
# Monitoring
|
298 |
+
MONITORING = {
|
299 |
+
"metrics_interval_seconds": 60,
|
300 |
+
"health_check_interval": 30,
|
301 |
+
"performance_log_retention_days": 7,
|
302 |
+
"alert_threshold": {
|
303 |
+
"cpu": 85,
|
304 |
+
"memory": 90,
|
305 |
+
"error_rate": 0.1
|
306 |
+
}
|
307 |
+
}
|
308 |
+
|
309 |
+
# Free API Configurations (No API Keys Required)
|
310 |
+
API_CONFIG = {
|
311 |
+
"search": {
|
312 |
+
"duckduckgo": {
|
313 |
+
"base_url": "https://api.duckduckgo.com",
|
314 |
+
"rate_limit": 100,
|
315 |
+
"requires_auth": False,
|
316 |
+
"method": "GET"
|
317 |
+
},
|
318 |
+
"wikipedia": {
|
319 |
+
"base_url": "https://en.wikipedia.org/w/api.php",
|
320 |
+
"rate_limit": 200,
|
321 |
+
"requires_auth": False,
|
322 |
+
"method": "GET"
|
323 |
+
},
|
324 |
+
"arxiv": {
|
325 |
+
"base_url": "http://export.arxiv.org/api/query",
|
326 |
+
"rate_limit": 60,
|
327 |
+
"requires_auth": False,
|
328 |
+
"method": "GET"
|
329 |
+
},
|
330 |
+
"crossref": {
|
331 |
+
"base_url": "https://api.crossref.org/works",
|
332 |
+
"rate_limit": 50,
|
333 |
+
"requires_auth": False,
|
334 |
+
"method": "GET"
|
335 |
+
},
|
336 |
+
"unpaywall": {
|
337 |
+
"base_url": "https://api.unpaywall.org/v2",
|
338 |
+
"rate_limit": 100,
|
339 |
+
"requires_auth": False,
|
340 |
+
"method": "GET"
|
341 |
+
}
|
342 |
+
},
|
343 |
+
"crypto": {
|
344 |
+
"coincap": {
|
345 |
+
"base_url": "https://api.coincap.io/v2",
|
346 |
+
"rate_limit": 200,
|
347 |
+
"requires_auth": False,
|
348 |
+
"method": "GET",
|
349 |
+
"endpoints": {
|
350 |
+
"assets": "/assets",
|
351 |
+
"rates": "/rates",
|
352 |
+
"markets": "/markets"
|
353 |
+
}
|
354 |
+
},
|
355 |
+
"blockchair": {
|
356 |
+
"base_url": "https://api.blockchair.com",
|
357 |
+
"rate_limit": 30,
|
358 |
+
"requires_auth": False,
|
359 |
+
"method": "GET"
|
360 |
+
}
|
361 |
+
},
|
362 |
+
"news": {
|
363 |
+
"wikinews": {
|
364 |
+
"base_url": "https://en.wikinews.org/w/api.php",
|
365 |
+
"rate_limit": 200,
|
366 |
+
"requires_auth": False,
|
367 |
+
"method": "GET"
|
368 |
+
},
|
369 |
+
"reddit": {
|
370 |
+
"base_url": "https://www.reddit.com/r/news/.json",
|
371 |
+
"rate_limit": 60,
|
372 |
+
"requires_auth": False,
|
373 |
+
"method": "GET"
|
374 |
+
},
|
375 |
+
"hackernews": {
|
376 |
+
"base_url": "https://hacker-news.firebaseio.com/v0",
|
377 |
+
"rate_limit": 100,
|
378 |
+
"requires_auth": False,
|
379 |
+
"method": "GET"
|
380 |
+
}
|
381 |
+
},
|
382 |
+
"market_data": {
|
383 |
+
"yahoo_finance": {
|
384 |
+
"base_url": "https://query1.finance.yahoo.com/v8/finance",
|
385 |
+
"rate_limit": 100,
|
386 |
+
"requires_auth": False,
|
387 |
+
"method": "GET"
|
388 |
+
},
|
389 |
+
"marketstack_free": {
|
390 |
+
"base_url": "https://api.marketstack.com/v1",
|
391 |
+
"rate_limit": 100,
|
392 |
+
"requires_auth": False,
|
393 |
+
"method": "GET"
|
394 |
+
}
|
395 |
+
},
|
396 |
+
"sports": {
|
397 |
+
"football_data": {
|
398 |
+
"base_url": "https://www.football-data.org/v4",
|
399 |
+
"rate_limit": 10,
|
400 |
+
"requires_auth": False,
|
401 |
+
"method": "GET",
|
402 |
+
"free_endpoints": [
|
403 |
+
"/competitions",
|
404 |
+
"/matches"
|
405 |
+
]
|
406 |
+
},
|
407 |
+
"nhl": {
|
408 |
+
"base_url": "https://statsapi.web.nhl.com/api/v1",
|
409 |
+
"rate_limit": 50,
|
410 |
+
"requires_auth": False,
|
411 |
+
"method": "GET"
|
412 |
+
},
|
413 |
+
"mlb": {
|
414 |
+
"base_url": "https://statsapi.mlb.com/api/v1",
|
415 |
+
"rate_limit": 50,
|
416 |
+
"requires_auth": False,
|
417 |
+
"method": "GET"
|
418 |
+
}
|
419 |
+
},
|
420 |
+
"web_scraping": {
|
421 |
+
"web_archive": {
|
422 |
+
"base_url": "https://archive.org/wayback/available",
|
423 |
+
"rate_limit": 40,
|
424 |
+
"requires_auth": False,
|
425 |
+
"method": "GET"
|
426 |
+
},
|
427 |
+
"metahtml": {
|
428 |
+
"base_url": "https://html.spec.whatwg.org/multipage",
|
429 |
+
"rate_limit": 30,
|
430 |
+
"requires_auth": False,
|
431 |
+
"method": "GET"
|
432 |
+
}
|
433 |
+
}
|
434 |
+
}
|
435 |
+
|
436 |
+
@classmethod
|
437 |
+
def get_team_config(cls, team_name: str) -> Dict[str, Any]:
|
438 |
+
"""Get configuration for a specific team."""
|
439 |
+
return cls.TEAM_CONFIG.get(team_name, {})
|
440 |
+
|
441 |
+
@classmethod
|
442 |
+
def get_model_config(cls, model_type: str) -> Dict[str, Any]:
|
443 |
+
"""Get configuration for a specific model type."""
|
444 |
+
return cls.MODEL_CONFIG.get(model_type, {})
|
445 |
+
|
446 |
+
@classmethod
|
447 |
+
def get_api_config(cls, api_name: str) -> Dict[str, Any]:
|
448 |
+
"""Get configuration for a specific API."""
|
449 |
+
for category in cls.API_CONFIG.values():
|
450 |
+
if api_name in category:
|
451 |
+
return category[api_name]
|
452 |
+
return {}
|
download_models.py
ADDED
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Script to download and prepare models for HuggingFace Spaces."""
|
2 |
+
|
3 |
+
import os
|
4 |
+
import asyncio
|
5 |
+
import logging
|
6 |
+
from pathlib import Path
|
7 |
+
from huggingface_hub import HfApi, upload_file
|
8 |
+
from reasoning.model_manager import ModelManager
|
9 |
+
|
10 |
+
# Configure logging
|
11 |
+
logging.basicConfig(level=logging.INFO)
|
12 |
+
logger = logging.getLogger(__name__)
|
13 |
+
|
14 |
+
async def download_and_prepare_models():
|
15 |
+
"""Download all models and prepare for Spaces."""
|
16 |
+
try:
|
17 |
+
# Initialize model manager
|
18 |
+
model_dir = os.path.join(os.getcwd(), "models")
|
19 |
+
manager = ModelManager(model_dir)
|
20 |
+
|
21 |
+
# Create models directory
|
22 |
+
os.makedirs(model_dir, exist_ok=True)
|
23 |
+
|
24 |
+
# Download all models
|
25 |
+
logger.info("Starting model downloads...")
|
26 |
+
await manager.initialize_all_models()
|
27 |
+
logger.info("All models downloaded successfully!")
|
28 |
+
|
29 |
+
return True
|
30 |
+
|
31 |
+
except Exception as e:
|
32 |
+
logger.error(f"Error downloading models: {e}")
|
33 |
+
return False
|
34 |
+
|
35 |
+
def upload_to_spaces(space_name: str = "agentic-system-models"):
|
36 |
+
"""Upload models to HuggingFace Spaces."""
|
37 |
+
try:
|
38 |
+
api = HfApi()
|
39 |
+
model_dir = os.path.join(os.getcwd(), "models")
|
40 |
+
|
41 |
+
# Create .gitattributes for LFS
|
42 |
+
gitattributes_path = os.path.join(model_dir, ".gitattributes")
|
43 |
+
with open(gitattributes_path, "w") as f:
|
44 |
+
f.write("*.gguf filter=lfs diff=lfs merge=lfs -text")
|
45 |
+
|
46 |
+
# Upload .gitattributes first
|
47 |
+
api.upload_file(
|
48 |
+
path_or_fileobj=gitattributes_path,
|
49 |
+
path_in_repo=".gitattributes",
|
50 |
+
repo_id=f"spaces/{space_name}",
|
51 |
+
repo_type="space"
|
52 |
+
)
|
53 |
+
|
54 |
+
# Upload each model file
|
55 |
+
for model_file in Path(model_dir).glob("*.gguf"):
|
56 |
+
logger.info(f"Uploading {model_file.name}...")
|
57 |
+
api.upload_file(
|
58 |
+
path_or_fileobj=str(model_file),
|
59 |
+
path_in_repo=f"models/{model_file.name}",
|
60 |
+
repo_id=f"spaces/{space_name}",
|
61 |
+
repo_type="space"
|
62 |
+
)
|
63 |
+
|
64 |
+
logger.info("All models uploaded to Spaces successfully!")
|
65 |
+
return True
|
66 |
+
|
67 |
+
except Exception as e:
|
68 |
+
logger.error(f"Error uploading to Spaces: {e}")
|
69 |
+
return False
|
70 |
+
|
71 |
+
if __name__ == "__main__":
|
72 |
+
# Download models
|
73 |
+
asyncio.run(download_and_prepare_models())
|
74 |
+
|
75 |
+
# Upload to Spaces
|
76 |
+
upload_to_spaces()
|
download_models_space.py
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Initialize and download models in Hugging Face Spaces environment."""
|
2 |
+
|
3 |
+
import os
|
4 |
+
import asyncio
|
5 |
+
import logging
|
6 |
+
from huggingface_hub import HfApi
|
7 |
+
from reasoning.model_manager import ModelManager
|
8 |
+
|
9 |
+
# Configure logging
|
10 |
+
logging.basicConfig(level=logging.INFO)
|
11 |
+
logger = logging.getLogger(__name__)
|
12 |
+
|
13 |
+
async def initialize_space_models():
|
14 |
+
"""Download and initialize models in Spaces environment."""
|
15 |
+
try:
|
16 |
+
# Initialize model manager
|
17 |
+
manager = ModelManager()
|
18 |
+
|
19 |
+
# Download all models
|
20 |
+
logger.info("Starting model downloads in Spaces environment...")
|
21 |
+
await manager.initialize_all_models()
|
22 |
+
logger.info("All models downloaded and initialized successfully!")
|
23 |
+
|
24 |
+
return True
|
25 |
+
|
26 |
+
except Exception as e:
|
27 |
+
logger.error(f"Error initializing models in Spaces: {e}")
|
28 |
+
return False
|
29 |
+
|
30 |
+
if __name__ == "__main__":
|
31 |
+
# Initialize models in Spaces
|
32 |
+
asyncio.run(initialize_space_models())
|
fix_indentation.patch
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
--- reasoning.py
|
2 |
+
+++ reasoning.py
|
3 |
+
@@ -2796,297 +2796,297 @@
|
4 |
+
async def _cross_modal_alignment(self, modalities: Dict[str, List[Dict[str, Any]]], context: Dict[str, Any]) -> List[Dict[str, Any]]:
|
5 |
+
"""Align information across different modalities."""
|
6 |
+
try:
|
7 |
+
# Extract modality types
|
8 |
+
modal_types = list(modalities.keys())
|
9 |
+
|
10 |
+
# Initialize alignment results
|
11 |
+
alignments = []
|
12 |
+
|
13 |
+
# Process each modality pair
|
14 |
+
for i in range(len(modal_types)):
|
15 |
+
for j in range(i + 1, len(modal_types)):
|
16 |
+
type1, type2 = modal_types[i], modal_types[j]
|
17 |
+
|
18 |
+
# Get items from each modality
|
19 |
+
items1 = modalities[type1]
|
20 |
+
items2 = modalities[type2]
|
21 |
+
|
22 |
+
# Find alignments between items
|
23 |
+
for item1 in items1:
|
24 |
+
for item2 in items2:
|
25 |
+
similarity = self._calculate_similarity(item1, item2)
|
26 |
+
if similarity > 0.5: # Threshold for alignment
|
27 |
+
alignments.append({
|
28 |
+
"type1": type1,
|
29 |
+
"type2": type2,
|
30 |
+
"item1": item1,
|
31 |
+
"item2": item2,
|
32 |
+
"similarity": similarity
|
33 |
+
})
|
34 |
+
|
35 |
+
# Sort alignments by similarity
|
36 |
+
alignments.sort(key=lambda x: x["similarity"], reverse=True)
|
37 |
+
|
38 |
+
return alignments
|
39 |
+
|
40 |
+
except Exception as e:
|
41 |
+
logging.error(f"Error in cross-modal alignment: {str(e)}")
|
42 |
+
return []
|
init_space.sh
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
# Create necessary directories
|
4 |
+
mkdir -p models
|
5 |
+
mkdir -p logs
|
6 |
+
|
7 |
+
# Install system dependencies
|
8 |
+
apt-get update && apt-get install -y \
|
9 |
+
git \
|
10 |
+
git-lfs \
|
11 |
+
python3-pip \
|
12 |
+
python3-dev \
|
13 |
+
build-essential \
|
14 |
+
cmake \
|
15 |
+
pkg-config \
|
16 |
+
libcurl4-openssl-dev
|
17 |
+
|
18 |
+
# Initialize git-lfs
|
19 |
+
git lfs install
|
20 |
+
|
21 |
+
# Install Python dependencies
|
22 |
+
pip install -r requirements.txt
|
23 |
+
|
24 |
+
# Download models
|
25 |
+
python download_models.py
|
26 |
+
|
27 |
+
# Start the application
|
28 |
+
python app.py
|
main.py
ADDED
@@ -0,0 +1,133 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Main entry point for the Advanced Agentic System."""
|
2 |
+
|
3 |
+
import asyncio
|
4 |
+
import logging
|
5 |
+
from typing import Dict, Any, Optional
|
6 |
+
from fastapi import FastAPI
|
7 |
+
from fastapi.middleware.cors import CORSMiddleware
|
8 |
+
import gradio as gr
|
9 |
+
import uvicorn
|
10 |
+
|
11 |
+
from agentic_system import AgenticSystem
|
12 |
+
from reasoning.unified_engine import UnifiedReasoningEngine
|
13 |
+
from api.openai_compatible import OpenAICompatibleAPI
|
14 |
+
from api.venture_api import VentureAPI
|
15 |
+
from ui.venture_ui import VentureUI
|
16 |
+
from config import Config
|
17 |
+
|
18 |
+
# Configure logging
|
19 |
+
logging.basicConfig(level=logging.INFO)
|
20 |
+
logger = logging.getLogger(__name__)
|
21 |
+
|
22 |
+
class AgenticSystemApp:
|
23 |
+
"""Main application class integrating all components."""
|
24 |
+
|
25 |
+
def __init__(self, config: Optional[Dict[str, Any]] = None):
|
26 |
+
self.config = Config(config)
|
27 |
+
|
28 |
+
# Initialize core components
|
29 |
+
self.reasoning_engine = UnifiedReasoningEngine(
|
30 |
+
min_confidence=self.config.get('min_confidence', 0.7),
|
31 |
+
parallel_threshold=self.config.get('parallel_threshold', 3),
|
32 |
+
learning_rate=self.config.get('learning_rate', 0.1)
|
33 |
+
)
|
34 |
+
|
35 |
+
self.agentic_system = AgenticSystem(
|
36 |
+
config=self.config.get('agentic_system', {})
|
37 |
+
)
|
38 |
+
|
39 |
+
# Initialize APIs
|
40 |
+
self.venture_api = VentureAPI(self.reasoning_engine)
|
41 |
+
self.openai_api = OpenAICompatibleAPI(self.reasoning_engine)
|
42 |
+
|
43 |
+
# Initialize FastAPI
|
44 |
+
self.app = FastAPI(
|
45 |
+
title="Advanced Agentic System",
|
46 |
+
description="Venture Strategy Optimizer with OpenAI-compatible API",
|
47 |
+
version="1.0.0"
|
48 |
+
)
|
49 |
+
|
50 |
+
# Setup middleware
|
51 |
+
self._setup_middleware()
|
52 |
+
|
53 |
+
# Setup routes
|
54 |
+
self._setup_routes()
|
55 |
+
|
56 |
+
# Initialize UI
|
57 |
+
self.ui = VentureUI(self.venture_api)
|
58 |
+
self.interface = self.ui.create_interface()
|
59 |
+
|
60 |
+
# Mount Gradio app
|
61 |
+
self.app = gr.mount_gradio_app(self.app, self.interface, path="/")
|
62 |
+
|
63 |
+
def _setup_middleware(self):
|
64 |
+
"""Setup FastAPI middleware."""
|
65 |
+
self.app.add_middleware(
|
66 |
+
CORSMiddleware,
|
67 |
+
allow_origins=["*"],
|
68 |
+
allow_credentials=True,
|
69 |
+
allow_methods=["*"],
|
70 |
+
allow_headers=["*"],
|
71 |
+
)
|
72 |
+
|
73 |
+
def _setup_routes(self):
|
74 |
+
"""Setup API routes."""
|
75 |
+
# Include OpenAI-compatible routes
|
76 |
+
self.app.include_router(
|
77 |
+
self.openai_api.router,
|
78 |
+
tags=["OpenAI Compatible"]
|
79 |
+
)
|
80 |
+
|
81 |
+
# Health check
|
82 |
+
@self.app.get("/api/health")
|
83 |
+
async def health_check():
|
84 |
+
return {
|
85 |
+
"status": "healthy",
|
86 |
+
"version": "1.0.0",
|
87 |
+
"components": {
|
88 |
+
"reasoning_engine": "active",
|
89 |
+
"agentic_system": "active",
|
90 |
+
"openai_api": "active",
|
91 |
+
"venture_api": "active"
|
92 |
+
}
|
93 |
+
}
|
94 |
+
|
95 |
+
# System status
|
96 |
+
@self.app.get("/api/system/status")
|
97 |
+
async def system_status():
|
98 |
+
return await self.agentic_system.get_system_status()
|
99 |
+
|
100 |
+
# Reasoning endpoint
|
101 |
+
@self.app.post("/api/reason")
|
102 |
+
async def reason(query: str, context: Optional[Dict[str, Any]] = None):
|
103 |
+
return await self.reasoning_engine.reason(query, context or {})
|
104 |
+
|
105 |
+
# Venture analysis
|
106 |
+
@self.app.post("/api/venture/analyze")
|
107 |
+
async def analyze_venture(
|
108 |
+
venture_type: str,
|
109 |
+
description: str,
|
110 |
+
metrics: Optional[Dict[str, Any]] = None
|
111 |
+
):
|
112 |
+
return await self.venture_api.analyze_venture(
|
113 |
+
venture_type=venture_type,
|
114 |
+
description=description,
|
115 |
+
metrics=metrics or {}
|
116 |
+
)
|
117 |
+
|
118 |
+
def run(self, host: str = "0.0.0.0", port: int = 7860):
|
119 |
+
"""Run the application."""
|
120 |
+
uvicorn.run(
|
121 |
+
self.app,
|
122 |
+
host=host,
|
123 |
+
port=port,
|
124 |
+
workers=4
|
125 |
+
)
|
126 |
+
|
127 |
+
def main():
|
128 |
+
"""Main entry point."""
|
129 |
+
app = AgenticSystemApp()
|
130 |
+
app.run()
|
131 |
+
|
132 |
+
if __name__ == "__main__":
|
133 |
+
main()
|
meta_learning.py
ADDED
@@ -0,0 +1,436 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Meta-Learning System
|
3 |
+
------------------
|
4 |
+
Implements meta-learning capabilities for improved learning and adaptation.
|
5 |
+
"""
|
6 |
+
|
7 |
+
from typing import Dict, Any, List, Optional, Tuple
|
8 |
+
import numpy as np
|
9 |
+
from dataclasses import dataclass, field
|
10 |
+
import logging
|
11 |
+
from datetime import datetime
|
12 |
+
from enum import Enum
|
13 |
+
import json
|
14 |
+
from quantum_learning import QuantumLearningSystem, Pattern, PatternType
|
15 |
+
|
16 |
+
class LearningStrategy(Enum):
|
17 |
+
GRADIENT_BASED = "gradient_based"
|
18 |
+
MEMORY_BASED = "memory_based"
|
19 |
+
EVOLUTIONARY = "evolutionary"
|
20 |
+
REINFORCEMENT = "reinforcement"
|
21 |
+
QUANTUM = "quantum"
|
22 |
+
|
23 |
+
@dataclass
|
24 |
+
class MetaParameters:
|
25 |
+
"""Meta-parameters for learning strategies"""
|
26 |
+
learning_rate: float = 0.01
|
27 |
+
memory_size: int = 1000
|
28 |
+
evolution_rate: float = 0.1
|
29 |
+
exploration_rate: float = 0.2
|
30 |
+
quantum_interference: float = 0.5
|
31 |
+
adaptation_threshold: float = 0.7
|
32 |
+
|
33 |
+
@dataclass
|
34 |
+
class LearningMetrics:
|
35 |
+
"""Metrics for learning performance"""
|
36 |
+
accuracy: float
|
37 |
+
convergence_rate: float
|
38 |
+
adaptation_speed: float
|
39 |
+
resource_usage: float
|
40 |
+
timestamp: str = field(default_factory=lambda: datetime.now().isoformat())
|
41 |
+
|
42 |
+
class MetaLearningSystem:
|
43 |
+
"""Meta-learning system for optimizing learning strategies"""
|
44 |
+
|
45 |
+
def __init__(self, config: Optional[Dict[str, Any]] = None):
|
46 |
+
self.logger = logging.getLogger(__name__)
|
47 |
+
self.config = config or {}
|
48 |
+
|
49 |
+
# Standard reasoning parameters
|
50 |
+
self.min_confidence = self.config.get('min_confidence', 0.7)
|
51 |
+
self.parallel_threshold = self.config.get('parallel_threshold', 3)
|
52 |
+
self.learning_rate = self.config.get('learning_rate', 0.1)
|
53 |
+
self.strategy_weights = self.config.get('strategy_weights', {
|
54 |
+
"LOCAL_LLM": 0.8,
|
55 |
+
"CHAIN_OF_THOUGHT": 0.6,
|
56 |
+
"TREE_OF_THOUGHTS": 0.5,
|
57 |
+
"META_LEARNING": 0.4
|
58 |
+
})
|
59 |
+
|
60 |
+
# Initialize quantum system with shared config
|
61 |
+
quantum_config = {
|
62 |
+
'min_confidence': self.min_confidence,
|
63 |
+
'parallel_threshold': self.parallel_threshold,
|
64 |
+
'learning_rate': self.learning_rate,
|
65 |
+
'strategy_weights': self.strategy_weights,
|
66 |
+
'num_qubits': self.config.get('num_qubits', 8),
|
67 |
+
'entanglement_strength': self.config.get('entanglement_strength', 0.5),
|
68 |
+
'interference_threshold': self.config.get('interference_threshold', 0.3),
|
69 |
+
'tunneling_rate': self.config.get('tunneling_rate', 0.1),
|
70 |
+
'annealing_schedule': self.config.get('annealing_schedule', {
|
71 |
+
'initial_temp': 1.0,
|
72 |
+
'final_temp': 0.01,
|
73 |
+
'steps': 100,
|
74 |
+
'cooling_rate': 0.95
|
75 |
+
})
|
76 |
+
}
|
77 |
+
self.quantum_system = QuantumLearningSystem(quantum_config)
|
78 |
+
self.strategies = {}
|
79 |
+
self.performance_history = []
|
80 |
+
self.meta_parameters = MetaParameters()
|
81 |
+
|
82 |
+
async def optimize_learning(
|
83 |
+
self,
|
84 |
+
observation: Dict[str, Any],
|
85 |
+
current_strategy: LearningStrategy
|
86 |
+
) -> Tuple[Dict[str, Any], LearningMetrics]:
|
87 |
+
"""Optimize learning strategy based on observation"""
|
88 |
+
try:
|
89 |
+
# Process with quantum system
|
90 |
+
quantum_result = await self.quantum_system.process_observation(observation)
|
91 |
+
|
92 |
+
# Evaluate current strategy
|
93 |
+
current_metrics = self._evaluate_strategy(
|
94 |
+
current_strategy,
|
95 |
+
observation,
|
96 |
+
quantum_result
|
97 |
+
)
|
98 |
+
|
99 |
+
# Update performance history
|
100 |
+
self._update_performance_history(current_metrics)
|
101 |
+
|
102 |
+
# Adapt meta-parameters
|
103 |
+
self._adapt_meta_parameters(current_metrics)
|
104 |
+
|
105 |
+
# Select optimal strategy
|
106 |
+
optimal_strategy = self._select_optimal_strategy(
|
107 |
+
observation,
|
108 |
+
current_metrics
|
109 |
+
)
|
110 |
+
|
111 |
+
# Apply selected strategy
|
112 |
+
result = await self._apply_strategy(
|
113 |
+
optimal_strategy,
|
114 |
+
observation,
|
115 |
+
quantum_result
|
116 |
+
)
|
117 |
+
|
118 |
+
return result, current_metrics
|
119 |
+
|
120 |
+
except Exception as e:
|
121 |
+
self.logger.error(f"Failed to optimize learning: {str(e)}")
|
122 |
+
raise
|
123 |
+
|
124 |
+
def _evaluate_strategy(
|
125 |
+
self,
|
126 |
+
strategy: LearningStrategy,
|
127 |
+
observation: Dict[str, Any],
|
128 |
+
quantum_result: Dict[str, Any]
|
129 |
+
) -> LearningMetrics:
|
130 |
+
"""Evaluate performance of current learning strategy"""
|
131 |
+
# Calculate accuracy
|
132 |
+
accuracy = self._calculate_accuracy(
|
133 |
+
strategy,
|
134 |
+
observation,
|
135 |
+
quantum_result
|
136 |
+
)
|
137 |
+
|
138 |
+
# Calculate convergence rate
|
139 |
+
convergence_rate = self._calculate_convergence_rate(
|
140 |
+
strategy,
|
141 |
+
self.performance_history
|
142 |
+
)
|
143 |
+
|
144 |
+
# Calculate adaptation speed
|
145 |
+
adaptation_speed = self._calculate_adaptation_speed(
|
146 |
+
strategy,
|
147 |
+
observation
|
148 |
+
)
|
149 |
+
|
150 |
+
# Calculate resource usage
|
151 |
+
resource_usage = self._calculate_resource_usage(strategy)
|
152 |
+
|
153 |
+
return LearningMetrics(
|
154 |
+
accuracy=accuracy,
|
155 |
+
convergence_rate=convergence_rate,
|
156 |
+
adaptation_speed=adaptation_speed,
|
157 |
+
resource_usage=resource_usage
|
158 |
+
)
|
159 |
+
|
160 |
+
def _update_performance_history(
|
161 |
+
self,
|
162 |
+
metrics: LearningMetrics
|
163 |
+
) -> None:
|
164 |
+
"""Update performance history with new metrics"""
|
165 |
+
self.performance_history.append(metrics)
|
166 |
+
|
167 |
+
# Maintain history size
|
168 |
+
if len(self.performance_history) > self.meta_parameters.memory_size:
|
169 |
+
self.performance_history.pop(0)
|
170 |
+
|
171 |
+
def _adapt_meta_parameters(
|
172 |
+
self,
|
173 |
+
metrics: LearningMetrics
|
174 |
+
) -> None:
|
175 |
+
"""Adapt meta-parameters based on performance metrics"""
|
176 |
+
# Adjust learning rate
|
177 |
+
if metrics.convergence_rate < self.meta_parameters.adaptation_threshold:
|
178 |
+
self.meta_parameters.learning_rate *= 0.9
|
179 |
+
else:
|
180 |
+
self.meta_parameters.learning_rate *= 1.1
|
181 |
+
|
182 |
+
# Adjust memory size
|
183 |
+
if metrics.resource_usage > 0.8:
|
184 |
+
self.meta_parameters.memory_size = int(
|
185 |
+
self.meta_parameters.memory_size * 0.9
|
186 |
+
)
|
187 |
+
elif metrics.resource_usage < 0.2:
|
188 |
+
self.meta_parameters.memory_size = int(
|
189 |
+
self.meta_parameters.memory_size * 1.1
|
190 |
+
)
|
191 |
+
|
192 |
+
# Adjust evolution rate
|
193 |
+
if metrics.adaptation_speed < self.meta_parameters.adaptation_threshold:
|
194 |
+
self.meta_parameters.evolution_rate *= 1.1
|
195 |
+
else:
|
196 |
+
self.meta_parameters.evolution_rate *= 0.9
|
197 |
+
|
198 |
+
# Adjust exploration rate
|
199 |
+
if metrics.accuracy < self.meta_parameters.adaptation_threshold:
|
200 |
+
self.meta_parameters.exploration_rate *= 1.1
|
201 |
+
else:
|
202 |
+
self.meta_parameters.exploration_rate *= 0.9
|
203 |
+
|
204 |
+
# Adjust quantum interference
|
205 |
+
if metrics.accuracy > 0.8:
|
206 |
+
self.meta_parameters.quantum_interference *= 1.1
|
207 |
+
else:
|
208 |
+
self.meta_parameters.quantum_interference *= 0.9
|
209 |
+
|
210 |
+
# Ensure parameters stay within reasonable bounds
|
211 |
+
self._normalize_parameters()
|
212 |
+
|
213 |
+
def _normalize_parameters(self) -> None:
|
214 |
+
"""Normalize meta-parameters to stay within bounds"""
|
215 |
+
self.meta_parameters.learning_rate = np.clip(
|
216 |
+
self.meta_parameters.learning_rate,
|
217 |
+
0.001,
|
218 |
+
0.1
|
219 |
+
)
|
220 |
+
self.meta_parameters.memory_size = np.clip(
|
221 |
+
self.meta_parameters.memory_size,
|
222 |
+
100,
|
223 |
+
10000
|
224 |
+
)
|
225 |
+
self.meta_parameters.evolution_rate = np.clip(
|
226 |
+
self.meta_parameters.evolution_rate,
|
227 |
+
0.01,
|
228 |
+
0.5
|
229 |
+
)
|
230 |
+
self.meta_parameters.exploration_rate = np.clip(
|
231 |
+
self.meta_parameters.exploration_rate,
|
232 |
+
0.1,
|
233 |
+
0.9
|
234 |
+
)
|
235 |
+
self.meta_parameters.quantum_interference = np.clip(
|
236 |
+
self.meta_parameters.quantum_interference,
|
237 |
+
0.1,
|
238 |
+
0.9
|
239 |
+
)
|
240 |
+
|
241 |
+
def _select_optimal_strategy(
|
242 |
+
self,
|
243 |
+
observation: Dict[str, Any],
|
244 |
+
metrics: LearningMetrics
|
245 |
+
) -> LearningStrategy:
|
246 |
+
"""Select optimal learning strategy"""
|
247 |
+
strategies = list(LearningStrategy)
|
248 |
+
scores = []
|
249 |
+
|
250 |
+
for strategy in strategies:
|
251 |
+
# Calculate strategy score
|
252 |
+
score = self._calculate_strategy_score(
|
253 |
+
strategy,
|
254 |
+
observation,
|
255 |
+
metrics
|
256 |
+
)
|
257 |
+
scores.append((strategy, score))
|
258 |
+
|
259 |
+
# Select strategy with highest score
|
260 |
+
optimal_strategy = max(scores, key=lambda x: x[1])[0]
|
261 |
+
|
262 |
+
return optimal_strategy
|
263 |
+
|
264 |
+
async def _apply_strategy(
|
265 |
+
self,
|
266 |
+
strategy: LearningStrategy,
|
267 |
+
observation: Dict[str, Any],
|
268 |
+
quantum_result: Dict[str, Any]
|
269 |
+
) -> Dict[str, Any]:
|
270 |
+
"""Apply selected learning strategy"""
|
271 |
+
if strategy == LearningStrategy.GRADIENT_BASED:
|
272 |
+
return await self._apply_gradient_strategy(
|
273 |
+
observation,
|
274 |
+
quantum_result
|
275 |
+
)
|
276 |
+
elif strategy == LearningStrategy.MEMORY_BASED:
|
277 |
+
return await self._apply_memory_strategy(
|
278 |
+
observation,
|
279 |
+
quantum_result
|
280 |
+
)
|
281 |
+
elif strategy == LearningStrategy.EVOLUTIONARY:
|
282 |
+
return await self._apply_evolutionary_strategy(
|
283 |
+
observation,
|
284 |
+
quantum_result
|
285 |
+
)
|
286 |
+
elif strategy == LearningStrategy.REINFORCEMENT:
|
287 |
+
return await self._apply_reinforcement_strategy(
|
288 |
+
observation,
|
289 |
+
quantum_result
|
290 |
+
)
|
291 |
+
else: # QUANTUM
|
292 |
+
return quantum_result
|
293 |
+
|
294 |
+
def _calculate_accuracy(
|
295 |
+
self,
|
296 |
+
strategy: LearningStrategy,
|
297 |
+
observation: Dict[str, Any],
|
298 |
+
quantum_result: Dict[str, Any]
|
299 |
+
) -> float:
|
300 |
+
"""Calculate accuracy of learning strategy"""
|
301 |
+
if "patterns" not in quantum_result:
|
302 |
+
return 0.0
|
303 |
+
|
304 |
+
patterns = quantum_result["patterns"]
|
305 |
+
if not patterns:
|
306 |
+
return 0.0
|
307 |
+
|
308 |
+
# Calculate pattern confidence
|
309 |
+
confidence_sum = sum(pattern.confidence for pattern in patterns)
|
310 |
+
return confidence_sum / len(patterns)
|
311 |
+
|
312 |
+
def _calculate_convergence_rate(
|
313 |
+
self,
|
314 |
+
strategy: LearningStrategy,
|
315 |
+
history: List[LearningMetrics]
|
316 |
+
) -> float:
|
317 |
+
"""Calculate convergence rate of learning strategy"""
|
318 |
+
if not history:
|
319 |
+
return 0.0
|
320 |
+
|
321 |
+
# Calculate rate of improvement
|
322 |
+
accuracies = [metrics.accuracy for metrics in history[-10:]]
|
323 |
+
if len(accuracies) < 2:
|
324 |
+
return 0.0
|
325 |
+
|
326 |
+
differences = np.diff(accuracies)
|
327 |
+
return float(np.mean(differences > 0))
|
328 |
+
|
329 |
+
def _calculate_adaptation_speed(
|
330 |
+
self,
|
331 |
+
strategy: LearningStrategy,
|
332 |
+
observation: Dict[str, Any]
|
333 |
+
) -> float:
|
334 |
+
"""Calculate adaptation speed of learning strategy"""
|
335 |
+
if not self.performance_history:
|
336 |
+
return 0.0
|
337 |
+
|
338 |
+
# Calculate time to reach adaptation threshold
|
339 |
+
threshold = self.meta_parameters.adaptation_threshold
|
340 |
+
for i, metrics in enumerate(self.performance_history):
|
341 |
+
if metrics.accuracy >= threshold:
|
342 |
+
return 1.0 / (i + 1)
|
343 |
+
|
344 |
+
return 0.0
|
345 |
+
|
346 |
+
def _calculate_resource_usage(
|
347 |
+
self,
|
348 |
+
strategy: LearningStrategy
|
349 |
+
) -> float:
|
350 |
+
"""Calculate resource usage of learning strategy"""
|
351 |
+
# Simulate resource usage based on strategy
|
352 |
+
base_usage = {
|
353 |
+
LearningStrategy.GRADIENT_BASED: 0.4,
|
354 |
+
LearningStrategy.MEMORY_BASED: 0.6,
|
355 |
+
LearningStrategy.EVOLUTIONARY: 0.7,
|
356 |
+
LearningStrategy.REINFORCEMENT: 0.5,
|
357 |
+
LearningStrategy.QUANTUM: 0.8
|
358 |
+
}
|
359 |
+
|
360 |
+
return base_usage[strategy]
|
361 |
+
|
362 |
+
def _calculate_strategy_score(
|
363 |
+
self,
|
364 |
+
strategy: LearningStrategy,
|
365 |
+
observation: Dict[str, Any],
|
366 |
+
metrics: LearningMetrics
|
367 |
+
) -> float:
|
368 |
+
"""Calculate score for learning strategy"""
|
369 |
+
# Weight different factors
|
370 |
+
weights = {
|
371 |
+
"accuracy": 0.4,
|
372 |
+
"convergence": 0.2,
|
373 |
+
"adaptation": 0.2,
|
374 |
+
"resources": 0.2
|
375 |
+
}
|
376 |
+
|
377 |
+
score = (
|
378 |
+
weights["accuracy"] * metrics.accuracy +
|
379 |
+
weights["convergence"] * metrics.convergence_rate +
|
380 |
+
weights["adaptation"] * metrics.adaptation_speed +
|
381 |
+
weights["resources"] * (1 - metrics.resource_usage)
|
382 |
+
)
|
383 |
+
|
384 |
+
# Add exploration bonus
|
385 |
+
if np.random.random() < self.meta_parameters.exploration_rate:
|
386 |
+
score += 0.1
|
387 |
+
|
388 |
+
return score
|
389 |
+
|
390 |
+
async def _apply_gradient_strategy(
|
391 |
+
self,
|
392 |
+
observation: Dict[str, Any],
|
393 |
+
quantum_result: Dict[str, Any]
|
394 |
+
) -> Dict[str, Any]:
|
395 |
+
"""Apply gradient-based learning strategy"""
|
396 |
+
return {
|
397 |
+
"result": "gradient_optimization",
|
398 |
+
"quantum_enhanced": quantum_result,
|
399 |
+
"meta_parameters": self.meta_parameters.__dict__
|
400 |
+
}
|
401 |
+
|
402 |
+
async def _apply_memory_strategy(
|
403 |
+
self,
|
404 |
+
observation: Dict[str, Any],
|
405 |
+
quantum_result: Dict[str, Any]
|
406 |
+
) -> Dict[str, Any]:
|
407 |
+
"""Apply memory-based learning strategy"""
|
408 |
+
return {
|
409 |
+
"result": "memory_optimization",
|
410 |
+
"quantum_enhanced": quantum_result,
|
411 |
+
"meta_parameters": self.meta_parameters.__dict__
|
412 |
+
}
|
413 |
+
|
414 |
+
async def _apply_evolutionary_strategy(
|
415 |
+
self,
|
416 |
+
observation: Dict[str, Any],
|
417 |
+
quantum_result: Dict[str, Any]
|
418 |
+
) -> Dict[str, Any]:
|
419 |
+
"""Apply evolutionary learning strategy"""
|
420 |
+
return {
|
421 |
+
"result": "evolutionary_optimization",
|
422 |
+
"quantum_enhanced": quantum_result,
|
423 |
+
"meta_parameters": self.meta_parameters.__dict__
|
424 |
+
}
|
425 |
+
|
426 |
+
async def _apply_reinforcement_strategy(
|
427 |
+
self,
|
428 |
+
observation: Dict[str, Any],
|
429 |
+
quantum_result: Dict[str, Any]
|
430 |
+
) -> Dict[str, Any]:
|
431 |
+
"""Apply reinforcement learning strategy"""
|
432 |
+
return {
|
433 |
+
"result": "reinforcement_optimization",
|
434 |
+
"quantum_enhanced": quantum_result,
|
435 |
+
"meta_parameters": self.meta_parameters.__dict__
|
436 |
+
}
|
multimodal_reasoning.py
ADDED
@@ -0,0 +1,301 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Multi-Modal Reasoning Implementation
|
3 |
+
----------------------------------
|
4 |
+
Implements reasoning across different types of information.
|
5 |
+
"""
|
6 |
+
|
7 |
+
import logging
|
8 |
+
from typing import Dict, Any, List, Optional
|
9 |
+
from datetime import datetime
|
10 |
+
import json
|
11 |
+
import numpy as np
|
12 |
+
from .reasoning import ReasoningStrategy
|
13 |
+
|
14 |
+
class MultiModalReasoning(ReasoningStrategy):
|
15 |
+
"""Implements multi-modal reasoning across different types of information."""
|
16 |
+
|
17 |
+
def __init__(self, config: Optional[Dict[str, Any]] = None):
|
18 |
+
"""Initialize multi-modal reasoning."""
|
19 |
+
super().__init__()
|
20 |
+
self.config = config or {}
|
21 |
+
|
22 |
+
# Standard reasoning parameters
|
23 |
+
self.min_confidence = self.config.get('min_confidence', 0.7)
|
24 |
+
self.parallel_threshold = self.config.get('parallel_threshold', 3)
|
25 |
+
self.learning_rate = self.config.get('learning_rate', 0.1)
|
26 |
+
self.strategy_weights = self.config.get('strategy_weights', {
|
27 |
+
"LOCAL_LLM": 0.8,
|
28 |
+
"CHAIN_OF_THOUGHT": 0.6,
|
29 |
+
"TREE_OF_THOUGHTS": 0.5,
|
30 |
+
"META_LEARNING": 0.4
|
31 |
+
})
|
32 |
+
|
33 |
+
# Multi-modal specific parameters
|
34 |
+
self.modality_weights = self.config.get('modality_weights', {
|
35 |
+
'text': 0.8,
|
36 |
+
'image': 0.7,
|
37 |
+
'audio': 0.6,
|
38 |
+
'video': 0.5,
|
39 |
+
'structured': 0.7
|
40 |
+
})
|
41 |
+
self.cross_modal_threshold = self.config.get('cross_modal_threshold', 0.6)
|
42 |
+
self.integration_steps = self.config.get('integration_steps', 3)
|
43 |
+
self.alignment_method = self.config.get('alignment_method', 'attention')
|
44 |
+
|
45 |
+
async def reason(self, query: str, context: Dict[str, Any]) -> Dict[str, Any]:
|
46 |
+
try:
|
47 |
+
# Process different modalities
|
48 |
+
modalities = await self._process_modalities(query, context)
|
49 |
+
|
50 |
+
# Align across modalities
|
51 |
+
alignment = await self._cross_modal_alignment(modalities, context)
|
52 |
+
|
53 |
+
# Integrated analysis
|
54 |
+
integration = await self._integrated_analysis(alignment, context)
|
55 |
+
|
56 |
+
# Generate final response
|
57 |
+
response = await self._generate_response(integration, context)
|
58 |
+
|
59 |
+
return {
|
60 |
+
"success": True,
|
61 |
+
"answer": response["conclusion"],
|
62 |
+
"modalities": modalities,
|
63 |
+
"alignment": alignment,
|
64 |
+
"integration": integration,
|
65 |
+
"confidence": response["confidence"]
|
66 |
+
}
|
67 |
+
except Exception as e:
|
68 |
+
logging.error(f"Error in multi-modal reasoning: {str(e)}")
|
69 |
+
return {"success": False, "error": str(e)}
|
70 |
+
|
71 |
+
async def _process_modalities(self, query: str, context: Dict[str, Any]) -> Dict[str, List[Dict[str, Any]]]:
|
72 |
+
"""Process query across different modalities."""
|
73 |
+
prompt = f"""
|
74 |
+
Process query across modalities:
|
75 |
+
Query: {query}
|
76 |
+
Context: {json.dumps(context)}
|
77 |
+
|
78 |
+
For each modality extract:
|
79 |
+
1. [Type]: Modality type
|
80 |
+
2. [Content]: Relevant content
|
81 |
+
3. [Features]: Key features
|
82 |
+
4. [Quality]: Content quality
|
83 |
+
|
84 |
+
Format as:
|
85 |
+
[M1]
|
86 |
+
Type: ...
|
87 |
+
Content: ...
|
88 |
+
Features: ...
|
89 |
+
Quality: ...
|
90 |
+
"""
|
91 |
+
|
92 |
+
response = await context["groq_api"].predict(prompt)
|
93 |
+
return self._parse_modalities(response["answer"])
|
94 |
+
|
95 |
+
async def _cross_modal_alignment(self, modalities: Dict[str, List[Dict[str, Any]]], context: Dict[str, Any]) -> List[Dict[str, Any]]:
|
96 |
+
"""Align information across different modalities."""
|
97 |
+
try:
|
98 |
+
# Extract modality types
|
99 |
+
modal_types = list(modalities.keys())
|
100 |
+
|
101 |
+
# Initialize alignment results
|
102 |
+
alignments = []
|
103 |
+
|
104 |
+
# Process each modality pair
|
105 |
+
for i in range(len(modal_types)):
|
106 |
+
for j in range(i + 1, len(modal_types)):
|
107 |
+
type1, type2 = modal_types[i], modal_types[j]
|
108 |
+
|
109 |
+
# Get items from each modality
|
110 |
+
items1 = modalities[type1]
|
111 |
+
items2 = modalities[type2]
|
112 |
+
|
113 |
+
# Find alignments between items
|
114 |
+
for item1 in items1:
|
115 |
+
for item2 in items2:
|
116 |
+
similarity = self._calculate_similarity(item1, item2)
|
117 |
+
if similarity > self.cross_modal_threshold: # Threshold for alignment
|
118 |
+
alignments.append({
|
119 |
+
"type1": type1,
|
120 |
+
"type2": type2,
|
121 |
+
"item1": item1,
|
122 |
+
"item2": item2,
|
123 |
+
"similarity": similarity
|
124 |
+
})
|
125 |
+
|
126 |
+
# Sort alignments by similarity
|
127 |
+
alignments.sort(key=lambda x: x["similarity"], reverse=True)
|
128 |
+
|
129 |
+
return alignments
|
130 |
+
|
131 |
+
except Exception as e:
|
132 |
+
logging.error(f"Error in cross-modal alignment: {str(e)}")
|
133 |
+
return []
|
134 |
+
|
135 |
+
def _calculate_similarity(self, item1: Dict[str, Any], item2: Dict[str, Any]) -> float:
|
136 |
+
"""Calculate similarity between two items from different modalities."""
|
137 |
+
try:
|
138 |
+
# Extract content from items
|
139 |
+
content1 = str(item1.get("content", ""))
|
140 |
+
content2 = str(item2.get("content", ""))
|
141 |
+
|
142 |
+
# Calculate basic similarity (can be enhanced with more sophisticated methods)
|
143 |
+
common_words = set(content1.lower().split()) & set(content2.lower().split())
|
144 |
+
total_words = set(content1.lower().split()) | set(content2.lower().split())
|
145 |
+
|
146 |
+
if not total_words:
|
147 |
+
return 0.0
|
148 |
+
|
149 |
+
return len(common_words) / len(total_words)
|
150 |
+
|
151 |
+
except Exception as e:
|
152 |
+
logging.error(f"Error calculating similarity: {str(e)}")
|
153 |
+
return 0.0
|
154 |
+
|
155 |
+
async def _integrated_analysis(self, alignment: List[Dict[str, Any]], context: Dict[str, Any]) -> List[Dict[str, Any]]:
|
156 |
+
prompt = f"""
|
157 |
+
Perform integrated multi-modal analysis:
|
158 |
+
Alignment: {json.dumps(alignment)}
|
159 |
+
Context: {json.dumps(context)}
|
160 |
+
|
161 |
+
For each insight:
|
162 |
+
1. [Insight]: Key finding
|
163 |
+
2. [Sources]: Contributing modalities
|
164 |
+
3. [Support]: Supporting evidence
|
165 |
+
4. [Confidence]: Confidence level
|
166 |
+
|
167 |
+
Format as:
|
168 |
+
[I1]
|
169 |
+
Insight: ...
|
170 |
+
Sources: ...
|
171 |
+
Support: ...
|
172 |
+
Confidence: ...
|
173 |
+
"""
|
174 |
+
|
175 |
+
response = await context["groq_api"].predict(prompt)
|
176 |
+
return self._parse_integration(response["answer"])
|
177 |
+
|
178 |
+
async def _generate_response(self, integration: List[Dict[str, Any]], context: Dict[str, Any]) -> Dict[str, Any]:
|
179 |
+
prompt = f"""
|
180 |
+
Generate unified multi-modal response:
|
181 |
+
Integration: {json.dumps(integration)}
|
182 |
+
Context: {json.dumps(context)}
|
183 |
+
|
184 |
+
Provide:
|
185 |
+
1. Main conclusion
|
186 |
+
2. Modal contributions
|
187 |
+
3. Integration benefits
|
188 |
+
4. Confidence level (0-1)
|
189 |
+
"""
|
190 |
+
|
191 |
+
response = await context["groq_api"].predict(prompt)
|
192 |
+
return self._parse_response(response["answer"])
|
193 |
+
|
194 |
+
def _parse_modalities(self, response: str) -> Dict[str, List[Dict[str, Any]]]:
|
195 |
+
"""Parse modalities from response."""
|
196 |
+
modalities = {}
|
197 |
+
current_modality = None
|
198 |
+
|
199 |
+
for line in response.split('\n'):
|
200 |
+
line = line.strip()
|
201 |
+
if not line:
|
202 |
+
continue
|
203 |
+
|
204 |
+
if line.startswith('[M'):
|
205 |
+
if current_modality:
|
206 |
+
if current_modality["type"] not in modalities:
|
207 |
+
modalities[current_modality["type"]] = []
|
208 |
+
modalities[current_modality["type"]].append(current_modality)
|
209 |
+
current_modality = {
|
210 |
+
"type": "",
|
211 |
+
"content": "",
|
212 |
+
"features": "",
|
213 |
+
"quality": ""
|
214 |
+
}
|
215 |
+
elif current_modality:
|
216 |
+
if line.startswith('Type:'):
|
217 |
+
current_modality["type"] = line[5:].strip()
|
218 |
+
elif line.startswith('Content:'):
|
219 |
+
current_modality["content"] = line[8:].strip()
|
220 |
+
elif line.startswith('Features:'):
|
221 |
+
current_modality["features"] = line[9:].strip()
|
222 |
+
elif line.startswith('Quality:'):
|
223 |
+
current_modality["quality"] = line[8:].strip()
|
224 |
+
|
225 |
+
if current_modality:
|
226 |
+
if current_modality["type"] not in modalities:
|
227 |
+
modalities[current_modality["type"]] = []
|
228 |
+
modalities[current_modality["type"]].append(current_modality)
|
229 |
+
|
230 |
+
return modalities
|
231 |
+
|
232 |
+
def _parse_integration(self, response: str) -> List[Dict[str, Any]]:
|
233 |
+
"""Parse integration from response."""
|
234 |
+
integration = []
|
235 |
+
current_insight = None
|
236 |
+
|
237 |
+
for line in response.split('\n'):
|
238 |
+
line = line.strip()
|
239 |
+
if not line:
|
240 |
+
continue
|
241 |
+
|
242 |
+
if line.startswith('[I'):
|
243 |
+
if current_insight:
|
244 |
+
integration.append(current_insight)
|
245 |
+
current_insight = {
|
246 |
+
"insight": "",
|
247 |
+
"sources": "",
|
248 |
+
"support": "",
|
249 |
+
"confidence": 0.0
|
250 |
+
}
|
251 |
+
elif current_insight:
|
252 |
+
if line.startswith('Insight:'):
|
253 |
+
current_insight["insight"] = line[8:].strip()
|
254 |
+
elif line.startswith('Sources:'):
|
255 |
+
current_insight["sources"] = line[8:].strip()
|
256 |
+
elif line.startswith('Support:'):
|
257 |
+
current_insight["support"] = line[8:].strip()
|
258 |
+
elif line.startswith('Confidence:'):
|
259 |
+
try:
|
260 |
+
current_insight["confidence"] = float(line[11:].strip())
|
261 |
+
except:
|
262 |
+
pass
|
263 |
+
|
264 |
+
if current_insight:
|
265 |
+
integration.append(current_insight)
|
266 |
+
|
267 |
+
return integration
|
268 |
+
|
269 |
+
def _parse_response(self, response: str) -> Dict[str, Any]:
|
270 |
+
"""Parse response from response."""
|
271 |
+
response_dict = {
|
272 |
+
"conclusion": "",
|
273 |
+
"modal_contributions": [],
|
274 |
+
"integration_benefits": [],
|
275 |
+
"confidence": 0.0
|
276 |
+
}
|
277 |
+
|
278 |
+
mode = None
|
279 |
+
for line in response.split('\n'):
|
280 |
+
line = line.strip()
|
281 |
+
if not line:
|
282 |
+
continue
|
283 |
+
|
284 |
+
if line.startswith('Conclusion:'):
|
285 |
+
response_dict["conclusion"] = line[11:].strip()
|
286 |
+
elif line.startswith('Modal Contributions:'):
|
287 |
+
mode = "modal"
|
288 |
+
elif line.startswith('Integration Benefits:'):
|
289 |
+
mode = "integration"
|
290 |
+
elif line.startswith('Confidence:'):
|
291 |
+
try:
|
292 |
+
response_dict["confidence"] = float(line[11:].strip())
|
293 |
+
except:
|
294 |
+
response_dict["confidence"] = 0.5
|
295 |
+
mode = None
|
296 |
+
elif mode == "modal" and line.startswith('- '):
|
297 |
+
response_dict["modal_contributions"].append(line[2:].strip())
|
298 |
+
elif mode == "integration" and line.startswith('- '):
|
299 |
+
response_dict["integration_benefits"].append(line[2:].strip())
|
300 |
+
|
301 |
+
return response_dict
|
orchestrator.py
ADDED
@@ -0,0 +1,628 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Agentic Orchestrator for Advanced AI System
|
3 |
+
-----------------------------------------
|
4 |
+
Manages and coordinates multiple agentic components:
|
5 |
+
1. Task Planning & Decomposition
|
6 |
+
2. Resource Management
|
7 |
+
3. Agent Communication
|
8 |
+
4. State Management
|
9 |
+
5. Error Recovery
|
10 |
+
6. Performance Monitoring
|
11 |
+
"""
|
12 |
+
|
13 |
+
import logging
|
14 |
+
from typing import Dict, Any, List, Optional, Union, TypeVar, Generic
|
15 |
+
from dataclasses import dataclass, field
|
16 |
+
from enum import Enum
|
17 |
+
import json
|
18 |
+
import asyncio
|
19 |
+
from datetime import datetime
|
20 |
+
import uuid
|
21 |
+
from concurrent.futures import ThreadPoolExecutor
|
22 |
+
import networkx as nx
|
23 |
+
from collections import defaultdict
|
24 |
+
import numpy as np
|
25 |
+
|
26 |
+
from reasoning import UnifiedReasoningEngine as ReasoningEngine, StrategyType as ReasoningMode
|
27 |
+
from reasoning.meta_learning import MetaLearningStrategy
|
28 |
+
|
29 |
+
T = TypeVar('T')
|
30 |
+
|
31 |
+
class AgentRole(Enum):
|
32 |
+
"""Different roles an agent can take."""
|
33 |
+
PLANNER = "planner"
|
34 |
+
EXECUTOR = "executor"
|
35 |
+
MONITOR = "monitor"
|
36 |
+
COORDINATOR = "coordinator"
|
37 |
+
LEARNER = "learner"
|
38 |
+
|
39 |
+
class AgentState(Enum):
|
40 |
+
"""Possible states of an agent."""
|
41 |
+
IDLE = "idle"
|
42 |
+
BUSY = "busy"
|
43 |
+
ERROR = "error"
|
44 |
+
LEARNING = "learning"
|
45 |
+
TERMINATED = "terminated"
|
46 |
+
|
47 |
+
class TaskPriority(Enum):
|
48 |
+
"""Task priority levels."""
|
49 |
+
LOW = 0
|
50 |
+
MEDIUM = 1
|
51 |
+
HIGH = 2
|
52 |
+
CRITICAL = 3
|
53 |
+
|
54 |
+
@dataclass
|
55 |
+
class AgentMetadata:
|
56 |
+
"""Metadata about an agent."""
|
57 |
+
id: str
|
58 |
+
role: AgentRole
|
59 |
+
capabilities: List[str]
|
60 |
+
state: AgentState
|
61 |
+
load: float
|
62 |
+
last_active: datetime
|
63 |
+
metrics: Dict[str, float]
|
64 |
+
|
65 |
+
@dataclass
|
66 |
+
class Task:
|
67 |
+
"""Represents a task in the system."""
|
68 |
+
id: str
|
69 |
+
description: str
|
70 |
+
priority: TaskPriority
|
71 |
+
dependencies: List[str]
|
72 |
+
assigned_to: Optional[str]
|
73 |
+
state: str
|
74 |
+
created_at: datetime
|
75 |
+
deadline: Optional[datetime]
|
76 |
+
metadata: Dict[str, Any]
|
77 |
+
|
78 |
+
class AgentOrchestrator:
|
79 |
+
"""Advanced orchestrator for managing agentic system."""
|
80 |
+
|
81 |
+
def __init__(self, config: Dict[str, Any] = None):
|
82 |
+
self.config = config or {}
|
83 |
+
|
84 |
+
# Core components
|
85 |
+
self.agents: Dict[str, AgentMetadata] = {}
|
86 |
+
self.tasks: Dict[str, Task] = {}
|
87 |
+
self.task_graph = nx.DiGraph()
|
88 |
+
|
89 |
+
# State management
|
90 |
+
self.state_history: List[Dict[str, Any]] = []
|
91 |
+
self.global_state: Dict[str, Any] = {}
|
92 |
+
|
93 |
+
# Resource management
|
94 |
+
self.resource_pool: Dict[str, Any] = {}
|
95 |
+
self.resource_locks: Dict[str, asyncio.Lock] = {}
|
96 |
+
|
97 |
+
# Communication
|
98 |
+
self.message_queue = asyncio.Queue()
|
99 |
+
self.event_bus = asyncio.Queue()
|
100 |
+
|
101 |
+
# Performance monitoring
|
102 |
+
self.metrics = defaultdict(list)
|
103 |
+
self.performance_log = []
|
104 |
+
|
105 |
+
# Error handling
|
106 |
+
self.error_handlers: Dict[str, callable] = {}
|
107 |
+
self.recovery_strategies: Dict[str, callable] = {}
|
108 |
+
|
109 |
+
# Async support
|
110 |
+
self.executor = ThreadPoolExecutor(max_workers=4)
|
111 |
+
self.lock = asyncio.Lock()
|
112 |
+
|
113 |
+
# Logging
|
114 |
+
self.logger = logging.getLogger(__name__)
|
115 |
+
|
116 |
+
# Initialize components
|
117 |
+
self._init_components()
|
118 |
+
|
119 |
+
def _init_components(self):
|
120 |
+
"""Initialize orchestrator components."""
|
121 |
+
# Initialize reasoning engine
|
122 |
+
self.reasoning_engine = ReasoningEngine(
|
123 |
+
min_confidence=0.7,
|
124 |
+
parallel_threshold=5,
|
125 |
+
learning_rate=0.1,
|
126 |
+
strategy_weights={
|
127 |
+
"LOCAL_LLM": 2.0,
|
128 |
+
"CHAIN_OF_THOUGHT": 1.0,
|
129 |
+
"TREE_OF_THOUGHTS": 1.0,
|
130 |
+
"META_LEARNING": 1.5
|
131 |
+
}
|
132 |
+
)
|
133 |
+
|
134 |
+
# Initialize meta-learning
|
135 |
+
self.meta_learning = MetaLearningStrategy()
|
136 |
+
|
137 |
+
# Register basic error handlers
|
138 |
+
self._register_error_handlers()
|
139 |
+
|
140 |
+
async def register_agent(
|
141 |
+
self,
|
142 |
+
role: AgentRole,
|
143 |
+
capabilities: List[str]
|
144 |
+
) -> str:
|
145 |
+
"""Register a new agent with the orchestrator."""
|
146 |
+
agent_id = str(uuid.uuid4())
|
147 |
+
|
148 |
+
agent = AgentMetadata(
|
149 |
+
id=agent_id,
|
150 |
+
role=role,
|
151 |
+
capabilities=capabilities,
|
152 |
+
state=AgentState.IDLE,
|
153 |
+
load=0.0,
|
154 |
+
last_active=datetime.now(),
|
155 |
+
metrics={}
|
156 |
+
)
|
157 |
+
|
158 |
+
async with self.lock:
|
159 |
+
self.agents[agent_id] = agent
|
160 |
+
self.logger.info(f"Registered new agent: {agent_id} with role {role}")
|
161 |
+
|
162 |
+
return agent_id
|
163 |
+
|
164 |
+
async def submit_task(
|
165 |
+
self,
|
166 |
+
description: str,
|
167 |
+
priority: TaskPriority = TaskPriority.MEDIUM,
|
168 |
+
dependencies: List[str] = None,
|
169 |
+
deadline: Optional[datetime] = None,
|
170 |
+
metadata: Dict[str, Any] = None
|
171 |
+
) -> str:
|
172 |
+
"""Submit a new task to the orchestrator."""
|
173 |
+
task_id = str(uuid.uuid4())
|
174 |
+
|
175 |
+
task = Task(
|
176 |
+
id=task_id,
|
177 |
+
description=description,
|
178 |
+
priority=priority,
|
179 |
+
dependencies=dependencies or [],
|
180 |
+
assigned_to=None,
|
181 |
+
state="pending",
|
182 |
+
created_at=datetime.now(),
|
183 |
+
deadline=deadline,
|
184 |
+
metadata=metadata or {}
|
185 |
+
)
|
186 |
+
|
187 |
+
async with self.lock:
|
188 |
+
self.tasks[task_id] = task
|
189 |
+
self._update_task_graph(task)
|
190 |
+
|
191 |
+
# Trigger task planning
|
192 |
+
await self._plan_task_execution(task_id)
|
193 |
+
|
194 |
+
return task_id
|
195 |
+
|
196 |
+
async def _plan_task_execution(self, task_id: str) -> None:
|
197 |
+
"""Plan the execution of a task."""
|
198 |
+
task = self.tasks[task_id]
|
199 |
+
|
200 |
+
# Check dependencies
|
201 |
+
if not await self._check_dependencies(task):
|
202 |
+
self.logger.info(f"Task {task_id} waiting for dependencies")
|
203 |
+
return
|
204 |
+
|
205 |
+
# Find suitable agent
|
206 |
+
agent_id = await self._find_suitable_agent(task)
|
207 |
+
if not agent_id:
|
208 |
+
self.logger.warning(f"No suitable agent found for task {task_id}")
|
209 |
+
return
|
210 |
+
|
211 |
+
# Assign task
|
212 |
+
await self._assign_task(task_id, agent_id)
|
213 |
+
|
214 |
+
async def _check_dependencies(self, task: Task) -> bool:
|
215 |
+
"""Check if all task dependencies are satisfied."""
|
216 |
+
for dep_id in task.dependencies:
|
217 |
+
if dep_id not in self.tasks:
|
218 |
+
return False
|
219 |
+
if self.tasks[dep_id].state != "completed":
|
220 |
+
return False
|
221 |
+
return True
|
222 |
+
|
223 |
+
async def _find_suitable_agent(self, task: Task) -> Optional[str]:
|
224 |
+
"""Find the most suitable agent for a task."""
|
225 |
+
best_agent = None
|
226 |
+
best_score = float('-inf')
|
227 |
+
|
228 |
+
for agent_id, agent in self.agents.items():
|
229 |
+
if agent.state != AgentState.IDLE:
|
230 |
+
continue
|
231 |
+
|
232 |
+
score = await self._calculate_agent_suitability(agent, task)
|
233 |
+
if score > best_score:
|
234 |
+
best_score = score
|
235 |
+
best_agent = agent_id
|
236 |
+
|
237 |
+
return best_agent
|
238 |
+
|
239 |
+
async def _calculate_agent_suitability(
|
240 |
+
self,
|
241 |
+
agent: AgentMetadata,
|
242 |
+
task: Task
|
243 |
+
) -> float:
|
244 |
+
"""Calculate how suitable an agent is for a task."""
|
245 |
+
# Base score on capabilities match
|
246 |
+
capability_score = sum(
|
247 |
+
1 for cap in task.metadata.get("required_capabilities", [])
|
248 |
+
if cap in agent.capabilities
|
249 |
+
)
|
250 |
+
|
251 |
+
# Consider agent load
|
252 |
+
load_score = 1 - agent.load
|
253 |
+
|
254 |
+
# Consider agent's recent performance
|
255 |
+
performance_score = sum(agent.metrics.values()) / len(agent.metrics) if agent.metrics else 0.5
|
256 |
+
|
257 |
+
# Weighted combination
|
258 |
+
weights = self.config.get("agent_selection_weights", {
|
259 |
+
"capabilities": 0.5,
|
260 |
+
"load": 0.3,
|
261 |
+
"performance": 0.2
|
262 |
+
})
|
263 |
+
|
264 |
+
return (
|
265 |
+
weights["capabilities"] * capability_score +
|
266 |
+
weights["load"] * load_score +
|
267 |
+
weights["performance"] * performance_score
|
268 |
+
)
|
269 |
+
|
270 |
+
async def _assign_task(self, task_id: str, agent_id: str) -> None:
|
271 |
+
"""Assign a task to an agent."""
|
272 |
+
async with self.lock:
|
273 |
+
task = self.tasks[task_id]
|
274 |
+
agent = self.agents[agent_id]
|
275 |
+
|
276 |
+
task.assigned_to = agent_id
|
277 |
+
task.state = "assigned"
|
278 |
+
agent.state = AgentState.BUSY
|
279 |
+
agent.load += 1
|
280 |
+
agent.last_active = datetime.now()
|
281 |
+
|
282 |
+
self.logger.info(f"Assigned task {task_id} to agent {agent_id}")
|
283 |
+
|
284 |
+
# Notify agent
|
285 |
+
await self.message_queue.put({
|
286 |
+
"type": "task_assignment",
|
287 |
+
"task_id": task_id,
|
288 |
+
"agent_id": agent_id,
|
289 |
+
"timestamp": datetime.now()
|
290 |
+
})
|
291 |
+
|
292 |
+
def _update_task_graph(self, task: Task) -> None:
|
293 |
+
"""Update the task dependency graph."""
|
294 |
+
self.task_graph.add_node(task.id, task=task)
|
295 |
+
for dep_id in task.dependencies:
|
296 |
+
self.task_graph.add_edge(dep_id, task.id)
|
297 |
+
|
298 |
+
async def _monitor_system_state(self):
|
299 |
+
"""Monitor overall system state."""
|
300 |
+
while True:
|
301 |
+
try:
|
302 |
+
# Collect agent states
|
303 |
+
agent_states = {
|
304 |
+
agent_id: {
|
305 |
+
"state": agent.state,
|
306 |
+
"load": agent.load,
|
307 |
+
"metrics": agent.metrics
|
308 |
+
}
|
309 |
+
for agent_id, agent in self.agents.items()
|
310 |
+
}
|
311 |
+
|
312 |
+
# Collect task states
|
313 |
+
task_states = {
|
314 |
+
task_id: {
|
315 |
+
"state": task.state,
|
316 |
+
"assigned_to": task.assigned_to,
|
317 |
+
"deadline": task.deadline
|
318 |
+
}
|
319 |
+
for task_id, task in self.tasks.items()
|
320 |
+
}
|
321 |
+
|
322 |
+
# Update global state
|
323 |
+
self.global_state = {
|
324 |
+
"timestamp": datetime.now(),
|
325 |
+
"agents": agent_states,
|
326 |
+
"tasks": task_states,
|
327 |
+
"resource_usage": self._get_resource_usage(),
|
328 |
+
"performance_metrics": self._calculate_performance_metrics()
|
329 |
+
}
|
330 |
+
|
331 |
+
# Archive state
|
332 |
+
self.state_history.append(self.global_state.copy())
|
333 |
+
|
334 |
+
# Trim history if too long
|
335 |
+
if len(self.state_history) > 1000:
|
336 |
+
self.state_history = self.state_history[-1000:]
|
337 |
+
|
338 |
+
# Check for anomalies
|
339 |
+
await self._check_anomalies()
|
340 |
+
|
341 |
+
await asyncio.sleep(1) # Monitor frequency
|
342 |
+
|
343 |
+
except Exception as e:
|
344 |
+
self.logger.error(f"Error in system monitoring: {e}")
|
345 |
+
await self._handle_error("monitoring_error", e)
|
346 |
+
|
347 |
+
def _get_resource_usage(self) -> Dict[str, float]:
|
348 |
+
"""Get current resource usage statistics."""
|
349 |
+
return {
|
350 |
+
"cpu_usage": sum(agent.load for agent in self.agents.values()) / len(self.agents),
|
351 |
+
"memory_usage": len(self.state_history) * 1000, # Rough estimate
|
352 |
+
"queue_size": self.message_queue.qsize()
|
353 |
+
}
|
354 |
+
|
355 |
+
def _calculate_performance_metrics(self) -> Dict[str, float]:
|
356 |
+
"""Calculate current performance metrics."""
|
357 |
+
metrics = {}
|
358 |
+
|
359 |
+
# Task completion rate
|
360 |
+
completed_tasks = sum(1 for task in self.tasks.values() if task.state == "completed")
|
361 |
+
total_tasks = len(self.tasks)
|
362 |
+
metrics["task_completion_rate"] = completed_tasks / max(1, total_tasks)
|
363 |
+
|
364 |
+
# Average task duration
|
365 |
+
durations = []
|
366 |
+
for task in self.tasks.values():
|
367 |
+
if task.state == "completed" and "completion_time" in task.metadata:
|
368 |
+
duration = (task.metadata["completion_time"] - task.created_at).total_seconds()
|
369 |
+
durations.append(duration)
|
370 |
+
metrics["avg_task_duration"] = sum(durations) / len(durations) if durations else 0
|
371 |
+
|
372 |
+
# Agent utilization
|
373 |
+
metrics["agent_utilization"] = sum(agent.load for agent in self.agents.values()) / len(self.agents)
|
374 |
+
|
375 |
+
return metrics
|
376 |
+
|
377 |
+
async def _check_anomalies(self):
|
378 |
+
"""Check for system anomalies."""
|
379 |
+
# Check for overloaded agents
|
380 |
+
for agent_id, agent in self.agents.items():
|
381 |
+
if agent.load > 0.9: # 90% load threshold
|
382 |
+
await self._handle_overload(agent_id)
|
383 |
+
|
384 |
+
# Check for stalled tasks
|
385 |
+
now = datetime.now()
|
386 |
+
for task_id, task in self.tasks.items():
|
387 |
+
if task.state == "assigned":
|
388 |
+
duration = (now - task.created_at).total_seconds()
|
389 |
+
if duration > 3600: # 1 hour threshold
|
390 |
+
await self._handle_stalled_task(task_id)
|
391 |
+
|
392 |
+
# Check for missed deadlines
|
393 |
+
for task_id, task in self.tasks.items():
|
394 |
+
if task.deadline and now > task.deadline and task.state != "completed":
|
395 |
+
await self._handle_missed_deadline(task_id)
|
396 |
+
|
397 |
+
async def _handle_overload(self, agent_id: str):
|
398 |
+
"""Handle an overloaded agent."""
|
399 |
+
agent = self.agents[agent_id]
|
400 |
+
|
401 |
+
# Try to redistribute tasks
|
402 |
+
assigned_tasks = [
|
403 |
+
task_id for task_id, task in self.tasks.items()
|
404 |
+
if task.assigned_to == agent_id and task.state == "assigned"
|
405 |
+
]
|
406 |
+
|
407 |
+
for task_id in assigned_tasks:
|
408 |
+
# Find another suitable agent
|
409 |
+
new_agent_id = await self._find_suitable_agent(self.tasks[task_id])
|
410 |
+
if new_agent_id:
|
411 |
+
await self._reassign_task(task_id, new_agent_id)
|
412 |
+
|
413 |
+
async def _handle_stalled_task(self, task_id: str):
|
414 |
+
"""Handle a stalled task."""
|
415 |
+
task = self.tasks[task_id]
|
416 |
+
|
417 |
+
# First, try to ping the assigned agent
|
418 |
+
if task.assigned_to:
|
419 |
+
agent = self.agents[task.assigned_to]
|
420 |
+
if agent.state == AgentState.ERROR:
|
421 |
+
# Agent is in error state, reassign task
|
422 |
+
await self._reassign_task(task_id, None)
|
423 |
+
else:
|
424 |
+
# Request status update from agent
|
425 |
+
await self.message_queue.put({
|
426 |
+
"type": "status_request",
|
427 |
+
"task_id": task_id,
|
428 |
+
"agent_id": task.assigned_to,
|
429 |
+
"timestamp": datetime.now()
|
430 |
+
})
|
431 |
+
|
432 |
+
async def _handle_missed_deadline(self, task_id: str):
|
433 |
+
"""Handle a missed deadline."""
|
434 |
+
task = self.tasks[task_id]
|
435 |
+
|
436 |
+
# Log the incident
|
437 |
+
self.logger.warning(f"Task {task_id} missed deadline: {task.deadline}")
|
438 |
+
|
439 |
+
# Update task priority to CRITICAL
|
440 |
+
task.priority = TaskPriority.CRITICAL
|
441 |
+
|
442 |
+
# If task is assigned, try to speed it up
|
443 |
+
if task.assigned_to:
|
444 |
+
await self.message_queue.put({
|
445 |
+
"type": "expedite_request",
|
446 |
+
"task_id": task_id,
|
447 |
+
"agent_id": task.assigned_to,
|
448 |
+
"timestamp": datetime.now()
|
449 |
+
})
|
450 |
+
else:
|
451 |
+
# If not assigned, try to assign to fastest available agent
|
452 |
+
await self._plan_task_execution(task_id)
|
453 |
+
|
454 |
+
async def _reassign_task(self, task_id: str, new_agent_id: Optional[str] = None):
|
455 |
+
"""Reassign a task to a new agent."""
|
456 |
+
task = self.tasks[task_id]
|
457 |
+
old_agent_id = task.assigned_to
|
458 |
+
|
459 |
+
if old_agent_id:
|
460 |
+
# Update old agent
|
461 |
+
old_agent = self.agents[old_agent_id]
|
462 |
+
old_agent.load -= 1
|
463 |
+
if old_agent.load <= 0:
|
464 |
+
old_agent.state = AgentState.IDLE
|
465 |
+
|
466 |
+
if new_agent_id is None:
|
467 |
+
# Find new suitable agent
|
468 |
+
new_agent_id = await self._find_suitable_agent(task)
|
469 |
+
|
470 |
+
if new_agent_id:
|
471 |
+
# Assign to new agent
|
472 |
+
await self._assign_task(task_id, new_agent_id)
|
473 |
+
else:
|
474 |
+
# No suitable agent found, mark task as pending
|
475 |
+
task.state = "pending"
|
476 |
+
task.assigned_to = None
|
477 |
+
|
478 |
+
def _register_error_handlers(self):
|
479 |
+
"""Register basic error handlers."""
|
480 |
+
self.error_handlers.update({
|
481 |
+
"monitoring_error": self._handle_monitoring_error,
|
482 |
+
"agent_error": self._handle_agent_error,
|
483 |
+
"task_error": self._handle_task_error,
|
484 |
+
"resource_error": self._handle_resource_error
|
485 |
+
})
|
486 |
+
|
487 |
+
self.recovery_strategies.update({
|
488 |
+
"agent_recovery": self._recover_agent,
|
489 |
+
"task_recovery": self._recover_task,
|
490 |
+
"resource_recovery": self._recover_resource
|
491 |
+
})
|
492 |
+
|
493 |
+
async def _handle_error(self, error_type: str, error: Exception):
|
494 |
+
"""Handle an error using registered handlers."""
|
495 |
+
handler = self.error_handlers.get(error_type)
|
496 |
+
if handler:
|
497 |
+
try:
|
498 |
+
await handler(error)
|
499 |
+
except Exception as e:
|
500 |
+
self.logger.error(f"Error in error handler: {e}")
|
501 |
+
else:
|
502 |
+
self.logger.error(f"No handler for error type: {error_type}")
|
503 |
+
self.logger.error(f"Error: {error}")
|
504 |
+
|
505 |
+
async def _handle_monitoring_error(self, error: Exception):
|
506 |
+
"""Handle monitoring system errors."""
|
507 |
+
self.logger.error(f"Monitoring error: {error}")
|
508 |
+
# Implement recovery logic
|
509 |
+
pass
|
510 |
+
|
511 |
+
async def _handle_agent_error(self, error: Exception):
|
512 |
+
"""Handle agent-related errors."""
|
513 |
+
self.logger.error(f"Agent error: {error}")
|
514 |
+
# Implement recovery logic
|
515 |
+
pass
|
516 |
+
|
517 |
+
async def _handle_task_error(self, error: Exception):
|
518 |
+
"""Handle task-related errors."""
|
519 |
+
self.logger.error(f"Task error: {error}")
|
520 |
+
# Implement recovery logic
|
521 |
+
pass
|
522 |
+
|
523 |
+
async def _handle_resource_error(self, error: Exception):
|
524 |
+
"""Handle resource-related errors."""
|
525 |
+
self.logger.error(f"Resource error: {error}")
|
526 |
+
# Implement recovery logic
|
527 |
+
pass
|
528 |
+
|
529 |
+
async def _recover_agent(self, agent_id: str):
|
530 |
+
"""Recover a failed agent."""
|
531 |
+
try:
|
532 |
+
agent = self.agents[agent_id]
|
533 |
+
|
534 |
+
# Log recovery attempt
|
535 |
+
self.logger.info(f"Attempting to recover agent {agent_id}")
|
536 |
+
|
537 |
+
# Reset agent state
|
538 |
+
agent.state = AgentState.IDLE
|
539 |
+
agent.load = 0
|
540 |
+
agent.last_active = datetime.now()
|
541 |
+
|
542 |
+
# Reassign any tasks that were assigned to this agent
|
543 |
+
for task_id, task in self.tasks.items():
|
544 |
+
if task.assigned_to == agent_id:
|
545 |
+
await self._reassign_task(task_id)
|
546 |
+
|
547 |
+
# Update metrics
|
548 |
+
agent.metrics["recovery_attempts"] = agent.metrics.get("recovery_attempts", 0) + 1
|
549 |
+
|
550 |
+
self.logger.info(f"Successfully recovered agent {agent_id}")
|
551 |
+
return True
|
552 |
+
|
553 |
+
except Exception as e:
|
554 |
+
self.logger.error(f"Failed to recover agent {agent_id}: {e}")
|
555 |
+
return False
|
556 |
+
|
557 |
+
async def _recover_task(self, task_id: str):
|
558 |
+
"""Recover a failed task."""
|
559 |
+
try:
|
560 |
+
task = self.tasks[task_id]
|
561 |
+
|
562 |
+
# Log recovery attempt
|
563 |
+
self.logger.info(f"Attempting to recover task {task_id}")
|
564 |
+
|
565 |
+
# Reset task state
|
566 |
+
task.state = "pending"
|
567 |
+
task.assigned_to = None
|
568 |
+
|
569 |
+
# Try to reassign the task
|
570 |
+
await self._reassign_task(task_id)
|
571 |
+
|
572 |
+
self.logger.info(f"Successfully recovered task {task_id}")
|
573 |
+
return True
|
574 |
+
|
575 |
+
except Exception as e:
|
576 |
+
self.logger.error(f"Failed to recover task {task_id}: {e}")
|
577 |
+
return False
|
578 |
+
|
579 |
+
async def _recover_resource(self, resource_id: str):
|
580 |
+
"""Recover a failed resource."""
|
581 |
+
try:
|
582 |
+
# Log recovery attempt
|
583 |
+
self.logger.info(f"Attempting to recover resource {resource_id}")
|
584 |
+
|
585 |
+
# Release any locks on the resource
|
586 |
+
if resource_id in self.resource_locks:
|
587 |
+
lock = self.resource_locks[resource_id]
|
588 |
+
if lock.locked():
|
589 |
+
lock.release()
|
590 |
+
|
591 |
+
# Reset resource state
|
592 |
+
if resource_id in self.resource_pool:
|
593 |
+
self.resource_pool[resource_id] = {
|
594 |
+
"state": "available",
|
595 |
+
"last_error": None,
|
596 |
+
"last_recovery": datetime.now()
|
597 |
+
}
|
598 |
+
|
599 |
+
self.logger.info(f"Successfully recovered resource {resource_id}")
|
600 |
+
return True
|
601 |
+
|
602 |
+
except Exception as e:
|
603 |
+
self.logger.error(f"Failed to recover resource {resource_id}: {e}")
|
604 |
+
return False
|
605 |
+
|
606 |
+
async def create_agent(self, role: AgentRole, capabilities: List[str]) -> str:
|
607 |
+
"""Create a new agent with specified role and capabilities."""
|
608 |
+
agent_id = str(uuid.uuid4())
|
609 |
+
|
610 |
+
agent_metadata = AgentMetadata(
|
611 |
+
id=agent_id,
|
612 |
+
role=role,
|
613 |
+
capabilities=capabilities,
|
614 |
+
state=AgentState.IDLE,
|
615 |
+
load=0.0,
|
616 |
+
last_active=datetime.now(),
|
617 |
+
metrics={
|
618 |
+
"tasks_completed": 0,
|
619 |
+
"success_rate": 1.0,
|
620 |
+
"avg_response_time": 0.0,
|
621 |
+
"resource_usage": 0.0
|
622 |
+
}
|
623 |
+
)
|
624 |
+
|
625 |
+
self.agents[agent_id] = agent_metadata
|
626 |
+
self.logger.info(f"Created new agent {agent_id} with role {role}")
|
627 |
+
|
628 |
+
return agent_id
|
reasoning.py
ADDED
The diff for this file is too large to render.
See raw diff
|
|
reasoning/__init__.py
ADDED
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Advanced Reasoning Engine for Multi-Model System
|
3 |
+
---------------------------------------------
|
4 |
+
A highly sophisticated reasoning system combining multiple reasoning strategies.
|
5 |
+
|
6 |
+
Core Reasoning:
|
7 |
+
1. Chain of Thought (CoT)
|
8 |
+
2. Tree of Thoughts (ToT)
|
9 |
+
3. Recursive Reasoning
|
10 |
+
4. Analogical Reasoning
|
11 |
+
5. Meta-Learning
|
12 |
+
6. Local LLM
|
13 |
+
|
14 |
+
Advanced Reasoning:
|
15 |
+
7. Neurosymbolic Reasoning
|
16 |
+
8. Bayesian Reasoning
|
17 |
+
9. Quantum Reasoning
|
18 |
+
10. Emergent Reasoning
|
19 |
+
11. Multimodal Reasoning
|
20 |
+
12. Specialized Reasoning
|
21 |
+
|
22 |
+
Learning & Adaptation:
|
23 |
+
13. Market Analysis
|
24 |
+
14. Portfolio Optimization
|
25 |
+
15. Venture Strategies
|
26 |
+
16. Monetization Strategies
|
27 |
+
"""
|
28 |
+
|
29 |
+
from .base import ReasoningStrategy
|
30 |
+
from .multimodal import MultiModalReasoning
|
31 |
+
from .bayesian import BayesianReasoning
|
32 |
+
from .quantum import QuantumReasoning
|
33 |
+
from .neurosymbolic import NeurosymbolicReasoning
|
34 |
+
from .emergent import EmergentReasoning
|
35 |
+
from .meta_learning import MetaLearningStrategy
|
36 |
+
from .chain_of_thought import ChainOfThoughtStrategy
|
37 |
+
from .tree_of_thoughts import TreeOfThoughtsStrategy
|
38 |
+
from .recursive import RecursiveReasoning
|
39 |
+
from .analogical import AnalogicalReasoning
|
40 |
+
from .specialized import SpecializedReasoning
|
41 |
+
from .local_llm import LocalLLMStrategy
|
42 |
+
from .market_analysis import MarketAnalysisStrategy
|
43 |
+
from .portfolio_optimization import PortfolioOptimizationStrategy
|
44 |
+
from .venture_strategies import VentureStrategy
|
45 |
+
from .monetization import MonetizationStrategy
|
46 |
+
from .unified_engine import UnifiedReasoningEngine, StrategyType, StrategyResult, UnifiedResult
|
47 |
+
|
48 |
+
__all__ = [
|
49 |
+
'ReasoningStrategy',
|
50 |
+
'MultiModalReasoning',
|
51 |
+
'BayesianReasoning',
|
52 |
+
'QuantumReasoning',
|
53 |
+
'NeurosymbolicReasoning',
|
54 |
+
'EmergentReasoning',
|
55 |
+
'MetaLearningStrategy',
|
56 |
+
'ChainOfThoughtStrategy',
|
57 |
+
'TreeOfThoughtsStrategy',
|
58 |
+
'RecursiveReasoning',
|
59 |
+
'AnalogicalReasoning',
|
60 |
+
'SpecializedReasoning',
|
61 |
+
'LocalLLMStrategy',
|
62 |
+
'MarketAnalysisStrategy',
|
63 |
+
'PortfolioOptimizationStrategy',
|
64 |
+
'VentureStrategy',
|
65 |
+
'MonetizationStrategy',
|
66 |
+
'UnifiedReasoningEngine',
|
67 |
+
'StrategyType',
|
68 |
+
'StrategyResult',
|
69 |
+
'UnifiedResult'
|
70 |
+
]
|
reasoning/agentic.py
ADDED
@@ -0,0 +1,345 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Specialized reasoning strategies for Agentic Workflow."""
|
2 |
+
|
3 |
+
import logging
|
4 |
+
from typing import Dict, Any, List, Optional, Set, Union, Tuple
|
5 |
+
import json
|
6 |
+
from dataclasses import dataclass, field
|
7 |
+
from enum import Enum
|
8 |
+
from datetime import datetime
|
9 |
+
import asyncio
|
10 |
+
from collections import defaultdict
|
11 |
+
|
12 |
+
from .base import ReasoningStrategy
|
13 |
+
|
14 |
+
class TaskType(Enum):
|
15 |
+
"""Types of tasks in agentic workflow."""
|
16 |
+
CODE_GENERATION = "code_generation"
|
17 |
+
CODE_MODIFICATION = "code_modification"
|
18 |
+
CODE_REVIEW = "code_review"
|
19 |
+
DEBUGGING = "debugging"
|
20 |
+
ARCHITECTURE = "architecture"
|
21 |
+
OPTIMIZATION = "optimization"
|
22 |
+
DOCUMENTATION = "documentation"
|
23 |
+
TESTING = "testing"
|
24 |
+
|
25 |
+
class ResourceType(Enum):
|
26 |
+
"""Types of resources in agentic workflow."""
|
27 |
+
CODE_CONTEXT = "code_context"
|
28 |
+
SYSTEM_CONTEXT = "system_context"
|
29 |
+
USER_CONTEXT = "user_context"
|
30 |
+
TOOLS = "tools"
|
31 |
+
APIS = "apis"
|
32 |
+
DOCUMENTATION = "documentation"
|
33 |
+
DEPENDENCIES = "dependencies"
|
34 |
+
HISTORY = "history"
|
35 |
+
|
36 |
+
@dataclass
|
37 |
+
class TaskComponent:
|
38 |
+
"""Component of a decomposed task."""
|
39 |
+
id: str
|
40 |
+
type: TaskType
|
41 |
+
description: str
|
42 |
+
dependencies: List[str]
|
43 |
+
resources: Dict[ResourceType, Any]
|
44 |
+
constraints: List[str]
|
45 |
+
priority: float
|
46 |
+
metadata: Dict[str, Any] = field(default_factory=dict)
|
47 |
+
|
48 |
+
@dataclass
|
49 |
+
class ResourceAllocation:
|
50 |
+
"""Resource allocation for a task."""
|
51 |
+
resource_type: ResourceType
|
52 |
+
quantity: Union[int, float]
|
53 |
+
priority: float
|
54 |
+
constraints: List[str]
|
55 |
+
metadata: Dict[str, Any] = field(default_factory=dict)
|
56 |
+
|
57 |
+
@dataclass
|
58 |
+
class ExecutionStep:
|
59 |
+
"""Step in task execution."""
|
60 |
+
id: str
|
61 |
+
task_id: str
|
62 |
+
action: str
|
63 |
+
resources: Dict[ResourceType, Any]
|
64 |
+
status: str
|
65 |
+
result: Optional[Dict[str, Any]]
|
66 |
+
feedback: List[str]
|
67 |
+
timestamp: datetime = field(default_factory=datetime.now)
|
68 |
+
|
69 |
+
class TaskDecompositionStrategy(ReasoningStrategy):
|
70 |
+
"""
|
71 |
+
Advanced task decomposition strategy that:
|
72 |
+
1. Analyzes task complexity and dependencies
|
73 |
+
2. Breaks down tasks into manageable components
|
74 |
+
3. Identifies resource requirements
|
75 |
+
4. Establishes execution order
|
76 |
+
5. Manages constraints and priorities
|
77 |
+
"""
|
78 |
+
|
79 |
+
def __init__(self, max_components: int = 10):
|
80 |
+
self.max_components = max_components
|
81 |
+
self.components: Dict[str, TaskComponent] = {}
|
82 |
+
|
83 |
+
async def reason(self, query: str, context: Dict[str, Any]) -> Dict[str, Any]:
|
84 |
+
"""Decompose task into components."""
|
85 |
+
try:
|
86 |
+
# Analyze task
|
87 |
+
task_analysis = await self._analyze_task(query, context)
|
88 |
+
|
89 |
+
# Generate components
|
90 |
+
components = await self._generate_components(task_analysis, context)
|
91 |
+
|
92 |
+
# Establish dependencies
|
93 |
+
dependency_graph = await self._establish_dependencies(components, context)
|
94 |
+
|
95 |
+
# Determine execution order
|
96 |
+
execution_order = await self._determine_execution_order(
|
97 |
+
components, dependency_graph, context)
|
98 |
+
|
99 |
+
return {
|
100 |
+
"success": True,
|
101 |
+
"components": [self._component_to_dict(c) for c in components],
|
102 |
+
"dependency_graph": dependency_graph,
|
103 |
+
"execution_order": execution_order,
|
104 |
+
"metadata": {
|
105 |
+
"total_components": len(components),
|
106 |
+
"complexity_score": task_analysis.get("complexity_score", 0.0),
|
107 |
+
"resource_requirements": task_analysis.get("resource_requirements", {})
|
108 |
+
}
|
109 |
+
}
|
110 |
+
except Exception as e:
|
111 |
+
logging.error(f"Error in task decomposition: {str(e)}")
|
112 |
+
return {"success": False, "error": str(e)}
|
113 |
+
|
114 |
+
class ResourceManagementStrategy(ReasoningStrategy):
|
115 |
+
"""
|
116 |
+
Advanced resource management strategy that:
|
117 |
+
1. Tracks available resources
|
118 |
+
2. Allocates resources to tasks
|
119 |
+
3. Handles resource constraints
|
120 |
+
4. Optimizes resource utilization
|
121 |
+
5. Manages resource dependencies
|
122 |
+
"""
|
123 |
+
|
124 |
+
def __init__(self):
|
125 |
+
self.allocations: Dict[str, ResourceAllocation] = {}
|
126 |
+
self.utilization_history: List[Dict[str, Any]] = []
|
127 |
+
|
128 |
+
async def reason(self, query: str, context: Dict[str, Any]) -> Dict[str, Any]:
|
129 |
+
"""Manage resource allocation."""
|
130 |
+
try:
|
131 |
+
# Analyze resource requirements
|
132 |
+
requirements = await self._analyze_requirements(query, context)
|
133 |
+
|
134 |
+
# Check resource availability
|
135 |
+
availability = await self._check_availability(requirements, context)
|
136 |
+
|
137 |
+
# Generate allocation plan
|
138 |
+
allocation_plan = await self._generate_allocation_plan(
|
139 |
+
requirements, availability, context)
|
140 |
+
|
141 |
+
# Optimize allocations
|
142 |
+
optimized_plan = await self._optimize_allocations(allocation_plan, context)
|
143 |
+
|
144 |
+
return {
|
145 |
+
"success": True,
|
146 |
+
"allocation_plan": optimized_plan,
|
147 |
+
"resource_metrics": {
|
148 |
+
"utilization": self._calculate_utilization(),
|
149 |
+
"efficiency": self._calculate_efficiency(),
|
150 |
+
"constraints_satisfied": self._check_constraints(optimized_plan)
|
151 |
+
}
|
152 |
+
}
|
153 |
+
except Exception as e:
|
154 |
+
logging.error(f"Error in resource management: {str(e)}")
|
155 |
+
return {"success": False, "error": str(e)}
|
156 |
+
|
157 |
+
class ContextualPlanningStrategy(ReasoningStrategy):
|
158 |
+
"""
|
159 |
+
Advanced contextual planning strategy that:
|
160 |
+
1. Analyzes multiple context types
|
161 |
+
2. Generates context-aware plans
|
162 |
+
3. Handles context changes
|
163 |
+
4. Maintains context consistency
|
164 |
+
5. Optimizes for context constraints
|
165 |
+
"""
|
166 |
+
|
167 |
+
def __init__(self):
|
168 |
+
self.context_history: List[Dict[str, Any]] = []
|
169 |
+
self.plan_adaptations: List[Dict[str, Any]] = []
|
170 |
+
|
171 |
+
async def reason(self, query: str, context: Dict[str, Any]) -> Dict[str, Any]:
|
172 |
+
"""Generate context-aware plan."""
|
173 |
+
try:
|
174 |
+
# Analyze contexts
|
175 |
+
context_analysis = await self._analyze_contexts(query, context)
|
176 |
+
|
177 |
+
# Generate base plan
|
178 |
+
base_plan = await self._generate_base_plan(context_analysis, context)
|
179 |
+
|
180 |
+
# Adapt to contexts
|
181 |
+
adapted_plan = await self._adapt_to_contexts(base_plan, context_analysis)
|
182 |
+
|
183 |
+
# Validate plan
|
184 |
+
validation = await self._validate_plan(adapted_plan, context)
|
185 |
+
|
186 |
+
return {
|
187 |
+
"success": True,
|
188 |
+
"plan": adapted_plan,
|
189 |
+
"context_impact": context_analysis.get("impact_assessment", {}),
|
190 |
+
"adaptations": self.plan_adaptations,
|
191 |
+
"validation_results": validation
|
192 |
+
}
|
193 |
+
except Exception as e:
|
194 |
+
logging.error(f"Error in contextual planning: {str(e)}")
|
195 |
+
return {"success": False, "error": str(e)}
|
196 |
+
|
197 |
+
class AdaptiveExecutionStrategy(ReasoningStrategy):
|
198 |
+
"""
|
199 |
+
Advanced adaptive execution strategy that:
|
200 |
+
1. Monitors execution progress
|
201 |
+
2. Adapts to changes and feedback
|
202 |
+
3. Handles errors and exceptions
|
203 |
+
4. Optimizes execution flow
|
204 |
+
5. Maintains execution state
|
205 |
+
"""
|
206 |
+
|
207 |
+
def __init__(self):
|
208 |
+
self.execution_steps: List[ExecutionStep] = []
|
209 |
+
self.adaptation_history: List[Dict[str, Any]] = []
|
210 |
+
|
211 |
+
async def reason(self, query: str, context: Dict[str, Any]) -> Dict[str, Any]:
|
212 |
+
"""Execute task adaptively."""
|
213 |
+
try:
|
214 |
+
# Initialize execution
|
215 |
+
execution_state = await self._initialize_execution(query, context)
|
216 |
+
|
217 |
+
# Monitor and adapt
|
218 |
+
while not self._is_execution_complete(execution_state):
|
219 |
+
# Execute step
|
220 |
+
step_result = await self._execute_step(execution_state, context)
|
221 |
+
|
222 |
+
# Process feedback
|
223 |
+
feedback = await self._process_feedback(step_result, context)
|
224 |
+
|
225 |
+
# Adapt execution
|
226 |
+
execution_state = await self._adapt_execution(
|
227 |
+
execution_state, feedback, context)
|
228 |
+
|
229 |
+
# Record step
|
230 |
+
self._record_step(step_result, feedback)
|
231 |
+
|
232 |
+
return {
|
233 |
+
"success": True,
|
234 |
+
"execution_trace": [self._step_to_dict(s) for s in self.execution_steps],
|
235 |
+
"adaptations": self.adaptation_history,
|
236 |
+
"final_state": execution_state
|
237 |
+
}
|
238 |
+
except Exception as e:
|
239 |
+
logging.error(f"Error in adaptive execution: {str(e)}")
|
240 |
+
return {"success": False, "error": str(e)}
|
241 |
+
|
242 |
+
class FeedbackIntegrationStrategy(ReasoningStrategy):
|
243 |
+
"""
|
244 |
+
Advanced feedback integration strategy that:
|
245 |
+
1. Collects multiple types of feedback
|
246 |
+
2. Analyzes feedback patterns
|
247 |
+
3. Generates improvement suggestions
|
248 |
+
4. Tracks feedback implementation
|
249 |
+
5. Measures feedback impact
|
250 |
+
"""
|
251 |
+
|
252 |
+
def __init__(self):
|
253 |
+
self.feedback_history: List[Dict[str, Any]] = []
|
254 |
+
self.improvement_history: List[Dict[str, Any]] = []
|
255 |
+
|
256 |
+
async def reason(self, query: str, context: Dict[str, Any]) -> Dict[str, Any]:
|
257 |
+
"""Integrate and apply feedback."""
|
258 |
+
try:
|
259 |
+
# Collect feedback
|
260 |
+
feedback = await self._collect_feedback(query, context)
|
261 |
+
|
262 |
+
# Analyze patterns
|
263 |
+
patterns = await self._analyze_patterns(feedback, context)
|
264 |
+
|
265 |
+
# Generate improvements
|
266 |
+
improvements = await self._generate_improvements(patterns, context)
|
267 |
+
|
268 |
+
# Implement changes
|
269 |
+
implementation = await self._implement_improvements(improvements, context)
|
270 |
+
|
271 |
+
# Measure impact
|
272 |
+
impact = await self._measure_impact(implementation, context)
|
273 |
+
|
274 |
+
return {
|
275 |
+
"success": True,
|
276 |
+
"feedback_analysis": patterns,
|
277 |
+
"improvements": improvements,
|
278 |
+
"implementation_status": implementation,
|
279 |
+
"impact_metrics": impact
|
280 |
+
}
|
281 |
+
except Exception as e:
|
282 |
+
logging.error(f"Error in feedback integration: {str(e)}")
|
283 |
+
return {"success": False, "error": str(e)}
|
284 |
+
|
285 |
+
async def _collect_feedback(self, query: str, context: Dict[str, Any]) -> List[Dict[str, Any]]:
|
286 |
+
"""Collect feedback from multiple sources."""
|
287 |
+
prompt = f"""
|
288 |
+
Collect feedback from:
|
289 |
+
Query: {query}
|
290 |
+
Context: {json.dumps(context)}
|
291 |
+
|
292 |
+
Consider:
|
293 |
+
1. User feedback
|
294 |
+
2. System metrics
|
295 |
+
3. Code analysis
|
296 |
+
4. Performance data
|
297 |
+
5. Error patterns
|
298 |
+
|
299 |
+
Format as:
|
300 |
+
[Feedback]
|
301 |
+
Source: ...
|
302 |
+
Type: ...
|
303 |
+
Content: ...
|
304 |
+
Priority: ...
|
305 |
+
"""
|
306 |
+
|
307 |
+
response = await context["groq_api"].predict(prompt)
|
308 |
+
return self._parse_feedback(response["answer"])
|
309 |
+
|
310 |
+
def _parse_feedback(self, response: str) -> List[Dict[str, Any]]:
|
311 |
+
"""Parse feedback from response."""
|
312 |
+
feedback_items = []
|
313 |
+
current = None
|
314 |
+
|
315 |
+
for line in response.split('\n'):
|
316 |
+
line = line.strip()
|
317 |
+
if not line:
|
318 |
+
continue
|
319 |
+
|
320 |
+
if line.startswith('[Feedback]'):
|
321 |
+
if current:
|
322 |
+
feedback_items.append(current)
|
323 |
+
current = {
|
324 |
+
"source": "",
|
325 |
+
"type": "",
|
326 |
+
"content": "",
|
327 |
+
"priority": 0.0
|
328 |
+
}
|
329 |
+
elif current:
|
330 |
+
if line.startswith('Source:'):
|
331 |
+
current["source"] = line[7:].strip()
|
332 |
+
elif line.startswith('Type:'):
|
333 |
+
current["type"] = line[5:].strip()
|
334 |
+
elif line.startswith('Content:'):
|
335 |
+
current["content"] = line[8:].strip()
|
336 |
+
elif line.startswith('Priority:'):
|
337 |
+
try:
|
338 |
+
current["priority"] = float(line[9:].strip())
|
339 |
+
except:
|
340 |
+
pass
|
341 |
+
|
342 |
+
if current:
|
343 |
+
feedback_items.append(current)
|
344 |
+
|
345 |
+
return feedback_items
|
reasoning/analogical.py
ADDED
@@ -0,0 +1,611 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Analogical reasoning implementation with advanced pattern matching and transfer learning."""
|
2 |
+
|
3 |
+
import logging
|
4 |
+
from typing import Dict, Any, List, Optional, Set, Tuple, Callable
|
5 |
+
import json
|
6 |
+
from dataclasses import dataclass, field
|
7 |
+
from enum import Enum
|
8 |
+
from datetime import datetime
|
9 |
+
import numpy as np
|
10 |
+
from collections import defaultdict
|
11 |
+
|
12 |
+
from .base import ReasoningStrategy
|
13 |
+
|
14 |
+
class AnalogicalLevel(Enum):
|
15 |
+
"""Levels of analogical similarity."""
|
16 |
+
SURFACE = "surface"
|
17 |
+
STRUCTURAL = "structural"
|
18 |
+
SEMANTIC = "semantic"
|
19 |
+
FUNCTIONAL = "functional"
|
20 |
+
CAUSAL = "causal"
|
21 |
+
ABSTRACT = "abstract"
|
22 |
+
|
23 |
+
class MappingType(Enum):
|
24 |
+
"""Types of analogical mappings."""
|
25 |
+
DIRECT = "direct"
|
26 |
+
TRANSFORMED = "transformed"
|
27 |
+
COMPOSITE = "composite"
|
28 |
+
ABSTRACT = "abstract"
|
29 |
+
METAPHORICAL = "metaphorical"
|
30 |
+
HYBRID = "hybrid"
|
31 |
+
|
32 |
+
@dataclass
|
33 |
+
class AnalogicalPattern:
|
34 |
+
"""Represents a pattern for analogical matching."""
|
35 |
+
id: str
|
36 |
+
level: AnalogicalLevel
|
37 |
+
features: Dict[str, Any]
|
38 |
+
relations: List[Tuple[str, str, str]] # (entity1, relation, entity2)
|
39 |
+
constraints: List[str]
|
40 |
+
metadata: Dict[str, Any] = field(default_factory=dict)
|
41 |
+
|
42 |
+
@dataclass
|
43 |
+
class AnalogicalMapping:
|
44 |
+
"""Represents a mapping between source and target domains."""
|
45 |
+
id: str
|
46 |
+
type: MappingType
|
47 |
+
source_elements: Dict[str, Any]
|
48 |
+
target_elements: Dict[str, Any]
|
49 |
+
correspondences: List[Tuple[str, str, float]] # (source, target, strength)
|
50 |
+
transformations: List[Dict[str, Any]]
|
51 |
+
confidence: float
|
52 |
+
metadata: Dict[str, Any] = field(default_factory=dict)
|
53 |
+
|
54 |
+
@dataclass
|
55 |
+
class AnalogicalSolution:
|
56 |
+
"""Represents a solution derived through analogical reasoning."""
|
57 |
+
id: str
|
58 |
+
source_analogy: str
|
59 |
+
mapping: AnalogicalMapping
|
60 |
+
adaptation: Dict[str, Any]
|
61 |
+
inference: Dict[str, Any]
|
62 |
+
confidence: float
|
63 |
+
validation: Dict[str, Any]
|
64 |
+
metadata: Dict[str, Any] = field(default_factory=dict)
|
65 |
+
|
66 |
+
class AnalogicalReasoning(ReasoningStrategy):
|
67 |
+
"""
|
68 |
+
Advanced Analogical Reasoning implementation with:
|
69 |
+
- Multi-level pattern matching
|
70 |
+
- Sophisticated similarity metrics
|
71 |
+
- Transfer learning capabilities
|
72 |
+
- Dynamic adaptation mechanisms
|
73 |
+
- Quality assessment
|
74 |
+
- Learning from experience
|
75 |
+
"""
|
76 |
+
|
77 |
+
def __init__(self, config: Optional[Dict[str, Any]] = None):
|
78 |
+
"""Initialize analogical reasoning."""
|
79 |
+
super().__init__()
|
80 |
+
self.config = config or {}
|
81 |
+
|
82 |
+
# Standard reasoning parameters
|
83 |
+
self.min_confidence = self.config.get('min_confidence', 0.7)
|
84 |
+
self.parallel_threshold = self.config.get('parallel_threshold', 3)
|
85 |
+
self.learning_rate = self.config.get('learning_rate', 0.1)
|
86 |
+
self.strategy_weights = self.config.get('strategy_weights', {
|
87 |
+
"LOCAL_LLM": 0.8,
|
88 |
+
"CHAIN_OF_THOUGHT": 0.6,
|
89 |
+
"TREE_OF_THOUGHTS": 0.5,
|
90 |
+
"META_LEARNING": 0.4
|
91 |
+
})
|
92 |
+
|
93 |
+
# Analogical reasoning specific parameters
|
94 |
+
self.min_similarity = self.config.get('min_similarity', 0.6)
|
95 |
+
self.max_candidates = self.config.get('max_candidates', 5)
|
96 |
+
self.adaptation_threshold = self.config.get('adaptation_threshold', 0.7)
|
97 |
+
|
98 |
+
# Knowledge base
|
99 |
+
self.patterns: Dict[str, AnalogicalPattern] = {}
|
100 |
+
self.mappings: Dict[str, AnalogicalMapping] = {}
|
101 |
+
self.solutions: Dict[str, AnalogicalSolution] = {}
|
102 |
+
|
103 |
+
# Learning components
|
104 |
+
self.pattern_weights: Dict[str, float] = defaultdict(float)
|
105 |
+
self.success_history: List[Dict[str, Any]] = []
|
106 |
+
self.adaptation_history: List[Dict[str, Any]] = []
|
107 |
+
|
108 |
+
async def reason(self, query: str, context: Dict[str, Any]) -> Dict[str, Any]:
|
109 |
+
"""Main reasoning method implementing analogical reasoning."""
|
110 |
+
try:
|
111 |
+
# Extract patterns from query
|
112 |
+
patterns = await self._extract_patterns(query, context)
|
113 |
+
|
114 |
+
# Find analogical matches
|
115 |
+
matches = await self._find_matches(patterns, context)
|
116 |
+
|
117 |
+
# Create and evaluate mappings
|
118 |
+
mappings = await self._create_mappings(matches, context)
|
119 |
+
|
120 |
+
# Generate and adapt solutions
|
121 |
+
solutions = await self._generate_solutions(mappings, context)
|
122 |
+
|
123 |
+
# Select best solution
|
124 |
+
best_solution = await self._select_best_solution(solutions, context)
|
125 |
+
|
126 |
+
# Learn from experience
|
127 |
+
self._update_knowledge(patterns, mappings, best_solution)
|
128 |
+
|
129 |
+
return {
|
130 |
+
"success": True,
|
131 |
+
"answer": best_solution.inference["conclusion"],
|
132 |
+
"confidence": best_solution.confidence,
|
133 |
+
"analogy": {
|
134 |
+
"source": best_solution.source_analogy,
|
135 |
+
"mapping": self._mapping_to_dict(best_solution.mapping),
|
136 |
+
"adaptation": best_solution.adaptation
|
137 |
+
},
|
138 |
+
"reasoning_trace": best_solution.metadata.get("reasoning_trace", []),
|
139 |
+
"meta_insights": best_solution.metadata.get("meta_insights", [])
|
140 |
+
}
|
141 |
+
except Exception as e:
|
142 |
+
logging.error(f"Error in analogical reasoning: {str(e)}")
|
143 |
+
return {"success": False, "error": str(e)}
|
144 |
+
|
145 |
+
async def _extract_patterns(self, query: str, context: Dict[str, Any]) -> List[AnalogicalPattern]:
|
146 |
+
"""Extract patterns from query for analogical matching."""
|
147 |
+
prompt = f"""
|
148 |
+
Extract analogical patterns from query:
|
149 |
+
Query: {query}
|
150 |
+
Context: {json.dumps(context)}
|
151 |
+
|
152 |
+
For each pattern level:
|
153 |
+
1. Surface features
|
154 |
+
2. Structural relations
|
155 |
+
3. Semantic concepts
|
156 |
+
4. Functional roles
|
157 |
+
5. Causal relationships
|
158 |
+
6. Abstract principles
|
159 |
+
|
160 |
+
Format as:
|
161 |
+
[P1]
|
162 |
+
Level: ...
|
163 |
+
Features: ...
|
164 |
+
Relations: ...
|
165 |
+
Constraints: ...
|
166 |
+
|
167 |
+
[P2]
|
168 |
+
...
|
169 |
+
"""
|
170 |
+
|
171 |
+
response = await context["groq_api"].predict(prompt)
|
172 |
+
return self._parse_patterns(response["answer"])
|
173 |
+
|
174 |
+
async def _find_matches(self, patterns: List[AnalogicalPattern], context: Dict[str, Any]) -> List[Dict[str, Any]]:
|
175 |
+
"""Find matching patterns in knowledge base."""
|
176 |
+
prompt = f"""
|
177 |
+
Find analogical matches:
|
178 |
+
Patterns: {json.dumps([self._pattern_to_dict(p) for p in patterns])}
|
179 |
+
Context: {json.dumps(context)}
|
180 |
+
|
181 |
+
For each match provide:
|
182 |
+
1. Source domain
|
183 |
+
2. Similarity assessment
|
184 |
+
3. Key correspondences
|
185 |
+
4. Transfer potential
|
186 |
+
|
187 |
+
Format as:
|
188 |
+
[M1]
|
189 |
+
Source: ...
|
190 |
+
Similarity: ...
|
191 |
+
Correspondences: ...
|
192 |
+
Transfer: ...
|
193 |
+
|
194 |
+
[M2]
|
195 |
+
...
|
196 |
+
"""
|
197 |
+
|
198 |
+
response = await context["groq_api"].predict(prompt)
|
199 |
+
return self._parse_matches(response["answer"])
|
200 |
+
|
201 |
+
async def _create_mappings(self, matches: List[Dict[str, Any]], context: Dict[str, Any]) -> List[AnalogicalMapping]:
|
202 |
+
"""Create mappings between source and target domains."""
|
203 |
+
prompt = f"""
|
204 |
+
Create analogical mappings:
|
205 |
+
Matches: {json.dumps(matches)}
|
206 |
+
Context: {json.dumps(context)}
|
207 |
+
|
208 |
+
For each mapping specify:
|
209 |
+
1. [Type]: {" | ".join([t.value for t in MappingType])}
|
210 |
+
2. [Elements]: Source and target elements
|
211 |
+
3. [Correspondences]: Element mappings
|
212 |
+
4. [Transformations]: Required adaptations
|
213 |
+
5. [Confidence]: Mapping strength
|
214 |
+
|
215 |
+
Format as:
|
216 |
+
[Map1]
|
217 |
+
Type: ...
|
218 |
+
Elements: ...
|
219 |
+
Correspondences: ...
|
220 |
+
Transformations: ...
|
221 |
+
Confidence: ...
|
222 |
+
"""
|
223 |
+
|
224 |
+
response = await context["groq_api"].predict(prompt)
|
225 |
+
return self._parse_mappings(response["answer"])
|
226 |
+
|
227 |
+
async def _generate_solutions(self, mappings: List[AnalogicalMapping], context: Dict[str, Any]) -> List[AnalogicalSolution]:
|
228 |
+
"""Generate solutions through analogical transfer."""
|
229 |
+
prompt = f"""
|
230 |
+
Generate analogical solutions:
|
231 |
+
Mappings: {json.dumps([self._mapping_to_dict(m) for m in mappings])}
|
232 |
+
Context: {json.dumps(context)}
|
233 |
+
|
234 |
+
For each solution provide:
|
235 |
+
1. Analogical inference
|
236 |
+
2. Required adaptations
|
237 |
+
3. Validation criteria
|
238 |
+
4. Confidence assessment
|
239 |
+
5. Reasoning trace
|
240 |
+
|
241 |
+
Format as:
|
242 |
+
[S1]
|
243 |
+
Inference: ...
|
244 |
+
Adaptation: ...
|
245 |
+
Validation: ...
|
246 |
+
Confidence: ...
|
247 |
+
Trace: ...
|
248 |
+
"""
|
249 |
+
|
250 |
+
response = await context["groq_api"].predict(prompt)
|
251 |
+
return self._parse_solutions(response["answer"], mappings)
|
252 |
+
|
253 |
+
async def _select_best_solution(self, solutions: List[AnalogicalSolution], context: Dict[str, Any]) -> AnalogicalSolution:
|
254 |
+
"""Select the best solution based on multiple criteria."""
|
255 |
+
prompt = f"""
|
256 |
+
Evaluate and select best solution:
|
257 |
+
Solutions: {json.dumps([self._solution_to_dict(s) for s in solutions])}
|
258 |
+
Context: {json.dumps(context)}
|
259 |
+
|
260 |
+
Evaluate based on:
|
261 |
+
1. Inference quality
|
262 |
+
2. Adaptation feasibility
|
263 |
+
3. Validation strength
|
264 |
+
4. Overall confidence
|
265 |
+
|
266 |
+
Format as:
|
267 |
+
[Evaluation]
|
268 |
+
Rankings: ...
|
269 |
+
Rationale: ...
|
270 |
+
Selection: ...
|
271 |
+
Confidence: ...
|
272 |
+
"""
|
273 |
+
|
274 |
+
response = await context["groq_api"].predict(prompt)
|
275 |
+
selection = self._parse_selection(response["answer"])
|
276 |
+
|
277 |
+
# Find selected solution
|
278 |
+
selected = max(solutions, key=lambda s: s.confidence)
|
279 |
+
for solution in solutions:
|
280 |
+
if solution.id == selection.get("selected_id"):
|
281 |
+
selected = solution
|
282 |
+
break
|
283 |
+
|
284 |
+
return selected
|
285 |
+
|
286 |
+
def _update_knowledge(self, patterns: List[AnalogicalPattern], mappings: List[AnalogicalMapping], solution: AnalogicalSolution):
|
287 |
+
"""Update knowledge base with new patterns and successful mappings."""
|
288 |
+
# Update patterns
|
289 |
+
for pattern in patterns:
|
290 |
+
if pattern.id not in self.patterns:
|
291 |
+
self.patterns[pattern.id] = pattern
|
292 |
+
self.pattern_weights[pattern.id] += self.learning_rate * solution.confidence
|
293 |
+
|
294 |
+
# Update mappings
|
295 |
+
if solution.mapping.id not in self.mappings:
|
296 |
+
self.mappings[solution.mapping.id] = solution.mapping
|
297 |
+
|
298 |
+
# Record solution
|
299 |
+
self.solutions[solution.id] = solution
|
300 |
+
|
301 |
+
# Update history
|
302 |
+
self.success_history.append({
|
303 |
+
"timestamp": datetime.now().isoformat(),
|
304 |
+
"solution_id": solution.id,
|
305 |
+
"confidence": solution.confidence,
|
306 |
+
"patterns": [p.id for p in patterns],
|
307 |
+
"mapping_type": solution.mapping.type.value
|
308 |
+
})
|
309 |
+
|
310 |
+
# Update adaptation history
|
311 |
+
self.adaptation_history.append({
|
312 |
+
"timestamp": datetime.now().isoformat(),
|
313 |
+
"solution_id": solution.id,
|
314 |
+
"adaptations": solution.adaptation,
|
315 |
+
"success": solution.confidence >= self.adaptation_threshold
|
316 |
+
})
|
317 |
+
|
318 |
+
def _parse_patterns(self, response: str) -> List[AnalogicalPattern]:
|
319 |
+
"""Parse patterns from response."""
|
320 |
+
patterns = []
|
321 |
+
current = None
|
322 |
+
|
323 |
+
for line in response.split('\n'):
|
324 |
+
line = line.strip()
|
325 |
+
if not line:
|
326 |
+
continue
|
327 |
+
|
328 |
+
if line.startswith('[P'):
|
329 |
+
if current:
|
330 |
+
patterns.append(current)
|
331 |
+
current = None
|
332 |
+
elif line.startswith('Level:'):
|
333 |
+
level_str = line[6:].strip().lower()
|
334 |
+
try:
|
335 |
+
level = AnalogicalLevel(level_str)
|
336 |
+
current = AnalogicalPattern(
|
337 |
+
id=f"pattern_{len(patterns)}",
|
338 |
+
level=level,
|
339 |
+
features={},
|
340 |
+
relations=[],
|
341 |
+
constraints=[],
|
342 |
+
metadata={}
|
343 |
+
)
|
344 |
+
except ValueError:
|
345 |
+
logging.warning(f"Invalid analogical level: {level_str}")
|
346 |
+
elif current:
|
347 |
+
if line.startswith('Features:'):
|
348 |
+
try:
|
349 |
+
current.features = json.loads(line[9:].strip())
|
350 |
+
except:
|
351 |
+
current.features = {"raw": line[9:].strip()}
|
352 |
+
elif line.startswith('Relations:'):
|
353 |
+
relations = [r.strip() for r in line[10:].split(',')]
|
354 |
+
current.relations = [(r.split()[0], r.split()[1], r.split()[2])
|
355 |
+
for r in relations if len(r.split()) >= 3]
|
356 |
+
elif line.startswith('Constraints:'):
|
357 |
+
current.constraints = [c.strip() for c in line[12:].split(',')]
|
358 |
+
|
359 |
+
if current:
|
360 |
+
patterns.append(current)
|
361 |
+
|
362 |
+
return patterns
|
363 |
+
|
364 |
+
def _parse_matches(self, response: str) -> List[Dict[str, Any]]:
|
365 |
+
"""Parse matches from response."""
|
366 |
+
matches = []
|
367 |
+
current = None
|
368 |
+
|
369 |
+
for line in response.split('\n'):
|
370 |
+
line = line.strip()
|
371 |
+
if not line:
|
372 |
+
continue
|
373 |
+
|
374 |
+
if line.startswith('[M'):
|
375 |
+
if current:
|
376 |
+
matches.append(current)
|
377 |
+
current = {
|
378 |
+
"source": "",
|
379 |
+
"similarity": 0.0,
|
380 |
+
"correspondences": [],
|
381 |
+
"transfer": []
|
382 |
+
}
|
383 |
+
elif current:
|
384 |
+
if line.startswith('Source:'):
|
385 |
+
current["source"] = line[7:].strip()
|
386 |
+
elif line.startswith('Similarity:'):
|
387 |
+
try:
|
388 |
+
current["similarity"] = float(line[11:].strip())
|
389 |
+
except:
|
390 |
+
pass
|
391 |
+
elif line.startswith('Correspondences:'):
|
392 |
+
current["correspondences"] = [c.strip() for c in line[16:].split(',')]
|
393 |
+
elif line.startswith('Transfer:'):
|
394 |
+
current["transfer"] = [t.strip() for t in line[9:].split(',')]
|
395 |
+
|
396 |
+
if current:
|
397 |
+
matches.append(current)
|
398 |
+
|
399 |
+
return matches
|
400 |
+
|
401 |
+
def _parse_mappings(self, response: str) -> List[AnalogicalMapping]:
|
402 |
+
"""Parse mappings from response."""
|
403 |
+
mappings = []
|
404 |
+
current = None
|
405 |
+
|
406 |
+
for line in response.split('\n'):
|
407 |
+
line = line.strip()
|
408 |
+
if not line:
|
409 |
+
continue
|
410 |
+
|
411 |
+
if line.startswith('[Map'):
|
412 |
+
if current:
|
413 |
+
mappings.append(current)
|
414 |
+
current = None
|
415 |
+
elif line.startswith('Type:'):
|
416 |
+
type_str = line[5:].strip().lower()
|
417 |
+
try:
|
418 |
+
mapping_type = MappingType(type_str)
|
419 |
+
current = AnalogicalMapping(
|
420 |
+
id=f"mapping_{len(mappings)}",
|
421 |
+
type=mapping_type,
|
422 |
+
source_elements={},
|
423 |
+
target_elements={},
|
424 |
+
correspondences=[],
|
425 |
+
transformations=[],
|
426 |
+
confidence=0.0,
|
427 |
+
metadata={}
|
428 |
+
)
|
429 |
+
except ValueError:
|
430 |
+
logging.warning(f"Invalid mapping type: {type_str}")
|
431 |
+
elif current:
|
432 |
+
if line.startswith('Elements:'):
|
433 |
+
try:
|
434 |
+
elements = json.loads(line[9:].strip())
|
435 |
+
current.source_elements = elements.get("source", {})
|
436 |
+
current.target_elements = elements.get("target", {})
|
437 |
+
except:
|
438 |
+
pass
|
439 |
+
elif line.startswith('Correspondences:'):
|
440 |
+
pairs = [c.strip() for c in line[16:].split(',')]
|
441 |
+
for pair in pairs:
|
442 |
+
parts = pair.split(':')
|
443 |
+
if len(parts) >= 2:
|
444 |
+
source = parts[0].strip()
|
445 |
+
target = parts[1].strip()
|
446 |
+
strength = float(parts[2]) if len(parts) > 2 else 1.0
|
447 |
+
current.correspondences.append((source, target, strength))
|
448 |
+
elif line.startswith('Transformations:'):
|
449 |
+
try:
|
450 |
+
current.transformations = json.loads(line[16:].strip())
|
451 |
+
except:
|
452 |
+
current.transformations = [{"raw": line[16:].strip()}]
|
453 |
+
elif line.startswith('Confidence:'):
|
454 |
+
try:
|
455 |
+
current.confidence = float(line[11:].strip())
|
456 |
+
except:
|
457 |
+
pass
|
458 |
+
|
459 |
+
if current:
|
460 |
+
mappings.append(current)
|
461 |
+
|
462 |
+
return mappings
|
463 |
+
|
464 |
+
def _parse_solutions(self, response: str, mappings: List[AnalogicalMapping]) -> List[AnalogicalSolution]:
|
465 |
+
"""Parse solutions from response."""
|
466 |
+
solutions = []
|
467 |
+
current = None
|
468 |
+
|
469 |
+
for line in response.split('\n'):
|
470 |
+
line = line.strip()
|
471 |
+
if not line:
|
472 |
+
continue
|
473 |
+
|
474 |
+
if line.startswith('[S'):
|
475 |
+
if current:
|
476 |
+
solutions.append(current)
|
477 |
+
current = None
|
478 |
+
mapping_idx = len(solutions)
|
479 |
+
if mapping_idx < len(mappings):
|
480 |
+
current = AnalogicalSolution(
|
481 |
+
id=f"solution_{len(solutions)}",
|
482 |
+
source_analogy="",
|
483 |
+
mapping=mappings[mapping_idx],
|
484 |
+
adaptation={},
|
485 |
+
inference={},
|
486 |
+
confidence=0.0,
|
487 |
+
validation={},
|
488 |
+
metadata={}
|
489 |
+
)
|
490 |
+
elif current:
|
491 |
+
if line.startswith('Inference:'):
|
492 |
+
try:
|
493 |
+
current.inference = json.loads(line[10:].strip())
|
494 |
+
except:
|
495 |
+
current.inference = {"conclusion": line[10:].strip()}
|
496 |
+
elif line.startswith('Adaptation:'):
|
497 |
+
try:
|
498 |
+
current.adaptation = json.loads(line[11:].strip())
|
499 |
+
except:
|
500 |
+
current.adaptation = {"steps": [line[11:].strip()]}
|
501 |
+
elif line.startswith('Validation:'):
|
502 |
+
try:
|
503 |
+
current.validation = json.loads(line[11:].strip())
|
504 |
+
except:
|
505 |
+
current.validation = {"criteria": [line[11:].strip()]}
|
506 |
+
elif line.startswith('Confidence:'):
|
507 |
+
try:
|
508 |
+
current.confidence = float(line[11:].strip())
|
509 |
+
except:
|
510 |
+
pass
|
511 |
+
elif line.startswith('Trace:'):
|
512 |
+
current.metadata["reasoning_trace"] = [t.strip() for t in line[6:].split(',')]
|
513 |
+
|
514 |
+
if current:
|
515 |
+
solutions.append(current)
|
516 |
+
|
517 |
+
return solutions
|
518 |
+
|
519 |
+
def _parse_selection(self, response: str) -> Dict[str, Any]:
|
520 |
+
"""Parse solution selection from response."""
|
521 |
+
selection = {
|
522 |
+
"selected_id": None,
|
523 |
+
"confidence": 0.0,
|
524 |
+
"rationale": []
|
525 |
+
}
|
526 |
+
|
527 |
+
for line in response.split('\n'):
|
528 |
+
line = line.strip()
|
529 |
+
if line.startswith('Selection:'):
|
530 |
+
selection["selected_id"] = line[10:].strip()
|
531 |
+
elif line.startswith('Confidence:'):
|
532 |
+
try:
|
533 |
+
selection["confidence"] = float(line[11:].strip())
|
534 |
+
except:
|
535 |
+
pass
|
536 |
+
elif line.startswith('Rationale:'):
|
537 |
+
selection["rationale"] = [r.strip() for r in line[10:].split(',')]
|
538 |
+
|
539 |
+
return selection
|
540 |
+
|
541 |
+
def _pattern_to_dict(self, pattern: AnalogicalPattern) -> Dict[str, Any]:
|
542 |
+
"""Convert pattern to dictionary for serialization."""
|
543 |
+
return {
|
544 |
+
"id": pattern.id,
|
545 |
+
"level": pattern.level.value,
|
546 |
+
"features": pattern.features,
|
547 |
+
"relations": pattern.relations,
|
548 |
+
"constraints": pattern.constraints,
|
549 |
+
"metadata": pattern.metadata
|
550 |
+
}
|
551 |
+
|
552 |
+
def _mapping_to_dict(self, mapping: AnalogicalMapping) -> Dict[str, Any]:
|
553 |
+
"""Convert mapping to dictionary for serialization."""
|
554 |
+
return {
|
555 |
+
"id": mapping.id,
|
556 |
+
"type": mapping.type.value,
|
557 |
+
"source_elements": mapping.source_elements,
|
558 |
+
"target_elements": mapping.target_elements,
|
559 |
+
"correspondences": mapping.correspondences,
|
560 |
+
"transformations": mapping.transformations,
|
561 |
+
"confidence": mapping.confidence,
|
562 |
+
"metadata": mapping.metadata
|
563 |
+
}
|
564 |
+
|
565 |
+
def _solution_to_dict(self, solution: AnalogicalSolution) -> Dict[str, Any]:
|
566 |
+
"""Convert solution to dictionary for serialization."""
|
567 |
+
return {
|
568 |
+
"id": solution.id,
|
569 |
+
"source_analogy": solution.source_analogy,
|
570 |
+
"mapping": self._mapping_to_dict(solution.mapping),
|
571 |
+
"adaptation": solution.adaptation,
|
572 |
+
"inference": solution.inference,
|
573 |
+
"confidence": solution.confidence,
|
574 |
+
"validation": solution.validation,
|
575 |
+
"metadata": solution.metadata
|
576 |
+
}
|
577 |
+
|
578 |
+
def get_pattern_statistics(self) -> Dict[str, Any]:
|
579 |
+
"""Get statistics about pattern usage and effectiveness."""
|
580 |
+
return {
|
581 |
+
"total_patterns": len(self.patterns),
|
582 |
+
"level_distribution": defaultdict(int, {p.level.value: 1 for p in self.patterns.values()}),
|
583 |
+
"average_constraints": sum(len(p.constraints) for p in self.patterns.values()) / len(self.patterns) if self.patterns else 0,
|
584 |
+
"pattern_weights": dict(self.pattern_weights)
|
585 |
+
}
|
586 |
+
|
587 |
+
def get_mapping_statistics(self) -> Dict[str, Any]:
|
588 |
+
"""Get statistics about mapping effectiveness."""
|
589 |
+
return {
|
590 |
+
"total_mappings": len(self.mappings),
|
591 |
+
"type_distribution": defaultdict(int, {m.type.value: 1 for m in self.mappings.values()}),
|
592 |
+
"average_confidence": sum(m.confidence for m in self.mappings.values()) / len(self.mappings) if self.mappings else 0,
|
593 |
+
"transformation_counts": defaultdict(int, {m.id: len(m.transformations) for m in self.mappings.values()})
|
594 |
+
}
|
595 |
+
|
596 |
+
def get_solution_statistics(self) -> Dict[str, Any]:
|
597 |
+
"""Get statistics about solution quality."""
|
598 |
+
return {
|
599 |
+
"total_solutions": len(self.solutions),
|
600 |
+
"average_confidence": sum(s.confidence for s in self.solutions.values()) / len(self.solutions) if self.solutions else 0,
|
601 |
+
"adaptation_success_rate": sum(1 for h in self.adaptation_history if h["success"]) / len(self.adaptation_history) if self.adaptation_history else 0
|
602 |
+
}
|
603 |
+
|
604 |
+
def clear_knowledge_base(self):
|
605 |
+
"""Clear the knowledge base."""
|
606 |
+
self.patterns.clear()
|
607 |
+
self.mappings.clear()
|
608 |
+
self.solutions.clear()
|
609 |
+
self.pattern_weights.clear()
|
610 |
+
self.success_history.clear()
|
611 |
+
self.adaptation_history.clear()
|
reasoning/base.py
ADDED
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Base class for all reasoning strategies."""
|
2 |
+
|
3 |
+
from typing import Dict, Any
|
4 |
+
|
5 |
+
class ReasoningStrategy:
|
6 |
+
"""Base class for reasoning strategies."""
|
7 |
+
async def reason(self, query: str, context: Dict[str, Any]) -> Dict[str, Any]:
|
8 |
+
"""Apply reasoning strategy to query with context.
|
9 |
+
|
10 |
+
Args:
|
11 |
+
query: The query to reason about
|
12 |
+
context: Additional context for reasoning
|
13 |
+
|
14 |
+
Returns:
|
15 |
+
Dictionary containing reasoning results
|
16 |
+
"""
|
17 |
+
raise NotImplementedError
|
reasoning/bayesian.py
ADDED
@@ -0,0 +1,325 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Advanced Bayesian reasoning for probabilistic analysis."""
|
2 |
+
|
3 |
+
import logging
|
4 |
+
from typing import Dict, Any, List, Optional, Set, Union, Type, Tuple
|
5 |
+
import json
|
6 |
+
from dataclasses import dataclass, field
|
7 |
+
from enum import Enum
|
8 |
+
from datetime import datetime
|
9 |
+
import numpy as np
|
10 |
+
from collections import defaultdict
|
11 |
+
|
12 |
+
from .base import ReasoningStrategy
|
13 |
+
|
14 |
+
@dataclass
|
15 |
+
class BayesianHypothesis:
|
16 |
+
"""Bayesian hypothesis with probabilities."""
|
17 |
+
name: str
|
18 |
+
prior: float
|
19 |
+
likelihood: float
|
20 |
+
posterior: float = 0.0
|
21 |
+
evidence: List[Dict[str, Any]] = field(default_factory=list)
|
22 |
+
|
23 |
+
class BayesianReasoning(ReasoningStrategy):
|
24 |
+
"""
|
25 |
+
Advanced Bayesian reasoning that:
|
26 |
+
1. Generates hypotheses
|
27 |
+
2. Calculates prior probabilities
|
28 |
+
3. Updates with evidence
|
29 |
+
4. Computes posteriors
|
30 |
+
5. Provides probabilistic analysis
|
31 |
+
"""
|
32 |
+
|
33 |
+
def __init__(self, config: Optional[Dict[str, Any]] = None):
|
34 |
+
"""Initialize Bayesian reasoning."""
|
35 |
+
super().__init__()
|
36 |
+
self.config = config or {}
|
37 |
+
|
38 |
+
# Configure Bayesian parameters
|
39 |
+
self.prior_weight = self.config.get('prior_weight', 0.3)
|
40 |
+
self.evidence_threshold = self.config.get('evidence_threshold', 0.1)
|
41 |
+
self.min_likelihood = self.config.get('min_likelihood', 0.01)
|
42 |
+
|
43 |
+
async def reason(self, query: str, context: Dict[str, Any]) -> Dict[str, Any]:
|
44 |
+
"""
|
45 |
+
Apply Bayesian reasoning to analyze probabilities and update beliefs.
|
46 |
+
|
47 |
+
Args:
|
48 |
+
query: The input query to reason about
|
49 |
+
context: Additional context and parameters
|
50 |
+
|
51 |
+
Returns:
|
52 |
+
Dict containing reasoning results and confidence scores
|
53 |
+
"""
|
54 |
+
try:
|
55 |
+
# Generate hypotheses
|
56 |
+
hypotheses = await self._generate_hypotheses(query, context)
|
57 |
+
|
58 |
+
# Calculate priors
|
59 |
+
priors = await self._calculate_priors(hypotheses, context)
|
60 |
+
|
61 |
+
# Update with evidence
|
62 |
+
posteriors = await self._update_with_evidence(
|
63 |
+
hypotheses,
|
64 |
+
priors,
|
65 |
+
context
|
66 |
+
)
|
67 |
+
|
68 |
+
# Generate analysis
|
69 |
+
analysis = await self._generate_analysis(posteriors, context)
|
70 |
+
|
71 |
+
return {
|
72 |
+
'answer': self._format_analysis(analysis),
|
73 |
+
'confidence': self._calculate_confidence(posteriors),
|
74 |
+
'hypotheses': hypotheses,
|
75 |
+
'priors': priors,
|
76 |
+
'posteriors': posteriors,
|
77 |
+
'analysis': analysis
|
78 |
+
}
|
79 |
+
|
80 |
+
except Exception as e:
|
81 |
+
logging.error(f"Bayesian reasoning failed: {str(e)}")
|
82 |
+
return {
|
83 |
+
'error': f"Bayesian reasoning failed: {str(e)}",
|
84 |
+
'confidence': 0.0
|
85 |
+
}
|
86 |
+
|
87 |
+
async def _generate_hypotheses(
|
88 |
+
self,
|
89 |
+
query: str,
|
90 |
+
context: Dict[str, Any]
|
91 |
+
) -> List[Dict[str, Any]]:
|
92 |
+
"""Generate plausible hypotheses."""
|
93 |
+
hypotheses = []
|
94 |
+
|
95 |
+
# Extract key terms for hypothesis generation
|
96 |
+
terms = set(query.lower().split())
|
97 |
+
|
98 |
+
# Generate hypotheses based on context and terms
|
99 |
+
if 'options' in context:
|
100 |
+
# Use provided options as hypotheses
|
101 |
+
for option in context['options']:
|
102 |
+
hypotheses.append({
|
103 |
+
'name': option,
|
104 |
+
'description': f"Hypothesis based on option: {option}",
|
105 |
+
'factors': self._extract_factors(option, terms)
|
106 |
+
})
|
107 |
+
else:
|
108 |
+
# Generate default hypotheses
|
109 |
+
hypotheses.extend([
|
110 |
+
{
|
111 |
+
'name': 'primary',
|
112 |
+
'description': "Primary hypothesis based on direct interpretation",
|
113 |
+
'factors': self._extract_factors(query, terms)
|
114 |
+
},
|
115 |
+
{
|
116 |
+
'name': 'alternative',
|
117 |
+
'description': "Alternative hypothesis considering other factors",
|
118 |
+
'factors': self._generate_alternative_factors(terms)
|
119 |
+
}
|
120 |
+
])
|
121 |
+
|
122 |
+
return hypotheses
|
123 |
+
|
124 |
+
async def _calculate_priors(
|
125 |
+
self,
|
126 |
+
hypotheses: List[Dict[str, Any]],
|
127 |
+
context: Dict[str, Any]
|
128 |
+
) -> Dict[str, float]:
|
129 |
+
"""Calculate prior probabilities."""
|
130 |
+
priors = {}
|
131 |
+
|
132 |
+
# Get historical data if available
|
133 |
+
history = context.get('history', {})
|
134 |
+
total_cases = sum(history.values()) if history else len(hypotheses)
|
135 |
+
|
136 |
+
for hypothesis in hypotheses:
|
137 |
+
name = hypothesis['name']
|
138 |
+
|
139 |
+
# Calculate prior from history or use uniform prior
|
140 |
+
if name in history:
|
141 |
+
priors[name] = history[name] / total_cases
|
142 |
+
else:
|
143 |
+
priors[name] = 1.0 / len(hypotheses)
|
144 |
+
|
145 |
+
# Adjust prior based on factors
|
146 |
+
factor_weight = len(hypothesis['factors']) / 10 # Normalize factor count
|
147 |
+
priors[name] = (
|
148 |
+
priors[name] * (1 - self.prior_weight) +
|
149 |
+
factor_weight * self.prior_weight
|
150 |
+
)
|
151 |
+
|
152 |
+
# Normalize priors
|
153 |
+
total_prior = sum(priors.values())
|
154 |
+
if total_prior > 0:
|
155 |
+
priors = {
|
156 |
+
name: prob / total_prior
|
157 |
+
for name, prob in priors.items()
|
158 |
+
}
|
159 |
+
|
160 |
+
return priors
|
161 |
+
|
162 |
+
async def _update_with_evidence(
|
163 |
+
self,
|
164 |
+
hypotheses: List[Dict[str, Any]],
|
165 |
+
priors: Dict[str, float],
|
166 |
+
context: Dict[str, Any]
|
167 |
+
) -> Dict[str, float]:
|
168 |
+
"""Update probabilities with evidence."""
|
169 |
+
posteriors = priors.copy()
|
170 |
+
|
171 |
+
# Get evidence from context
|
172 |
+
evidence = context.get('evidence', [])
|
173 |
+
if not evidence:
|
174 |
+
return posteriors
|
175 |
+
|
176 |
+
for e in evidence:
|
177 |
+
# Calculate likelihood for each hypothesis
|
178 |
+
likelihoods = {}
|
179 |
+
for hypothesis in hypotheses:
|
180 |
+
name = hypothesis['name']
|
181 |
+
likelihood = self._calculate_likelihood(hypothesis, e)
|
182 |
+
likelihoods[name] = max(likelihood, self.min_likelihood)
|
183 |
+
|
184 |
+
# Update posteriors using Bayes' rule
|
185 |
+
total_probability = sum(
|
186 |
+
likelihoods[name] * posteriors[name]
|
187 |
+
for name in posteriors
|
188 |
+
)
|
189 |
+
|
190 |
+
if total_probability > 0:
|
191 |
+
posteriors = {
|
192 |
+
name: (likelihoods[name] * posteriors[name]) / total_probability
|
193 |
+
for name in posteriors
|
194 |
+
}
|
195 |
+
|
196 |
+
return posteriors
|
197 |
+
|
198 |
+
def _calculate_likelihood(
|
199 |
+
self,
|
200 |
+
hypothesis: Dict[str, Any],
|
201 |
+
evidence: Dict[str, Any]
|
202 |
+
) -> float:
|
203 |
+
"""Calculate likelihood of evidence given hypothesis."""
|
204 |
+
# Extract evidence factors
|
205 |
+
evidence_factors = set(
|
206 |
+
str(v).lower()
|
207 |
+
for v in evidence.values()
|
208 |
+
if isinstance(v, (str, int, float))
|
209 |
+
)
|
210 |
+
|
211 |
+
# Compare with hypothesis factors
|
212 |
+
common_factors = evidence_factors.intersection(hypothesis['factors'])
|
213 |
+
|
214 |
+
if not evidence_factors:
|
215 |
+
return 0.5 # Neutral likelihood if no factors
|
216 |
+
|
217 |
+
return len(common_factors) / len(evidence_factors)
|
218 |
+
|
219 |
+
async def _generate_analysis(
|
220 |
+
self,
|
221 |
+
posteriors: Dict[str, float],
|
222 |
+
context: Dict[str, Any]
|
223 |
+
) -> Dict[str, Any]:
|
224 |
+
"""Generate probabilistic analysis."""
|
225 |
+
# Sort hypotheses by posterior probability
|
226 |
+
ranked_hypotheses = sorted(
|
227 |
+
posteriors.items(),
|
228 |
+
key=lambda x: x[1],
|
229 |
+
reverse=True
|
230 |
+
)
|
231 |
+
|
232 |
+
# Calculate statistics
|
233 |
+
mean = np.mean(list(posteriors.values()))
|
234 |
+
std = np.std(list(posteriors.values()))
|
235 |
+
entropy = -sum(
|
236 |
+
p * np.log2(p) if p > 0 else 0
|
237 |
+
for p in posteriors.values()
|
238 |
+
)
|
239 |
+
|
240 |
+
return {
|
241 |
+
'top_hypothesis': ranked_hypotheses[0][0],
|
242 |
+
'probability': ranked_hypotheses[0][1],
|
243 |
+
'alternatives': [
|
244 |
+
{'name': name, 'probability': prob}
|
245 |
+
for name, prob in ranked_hypotheses[1:]
|
246 |
+
],
|
247 |
+
'statistics': {
|
248 |
+
'mean': mean,
|
249 |
+
'std': std,
|
250 |
+
'entropy': entropy
|
251 |
+
}
|
252 |
+
}
|
253 |
+
|
254 |
+
def _format_analysis(self, analysis: Dict[str, Any]) -> str:
|
255 |
+
"""Format analysis into readable text."""
|
256 |
+
sections = []
|
257 |
+
|
258 |
+
# Top hypothesis
|
259 |
+
sections.append(
|
260 |
+
f"Most likely hypothesis: {analysis['top_hypothesis']} "
|
261 |
+
f"(probability: {analysis['probability']:.2%})"
|
262 |
+
)
|
263 |
+
|
264 |
+
# Alternative hypotheses
|
265 |
+
if analysis['alternatives']:
|
266 |
+
sections.append("\nAlternative hypotheses:")
|
267 |
+
for alt in analysis['alternatives']:
|
268 |
+
sections.append(
|
269 |
+
f"- {alt['name']}: {alt['probability']:.2%}"
|
270 |
+
)
|
271 |
+
|
272 |
+
# Statistics
|
273 |
+
stats = analysis['statistics']
|
274 |
+
sections.append("\nDistribution statistics:")
|
275 |
+
sections.append(f"- Mean probability: {stats['mean']:.2%}")
|
276 |
+
sections.append(f"- Standard deviation: {stats['std']:.2%}")
|
277 |
+
sections.append(f"- Entropy: {stats['entropy']:.2f} bits")
|
278 |
+
|
279 |
+
return "\n".join(sections)
|
280 |
+
|
281 |
+
def _calculate_confidence(self, posteriors: Dict[str, float]) -> float:
|
282 |
+
"""Calculate overall confidence score."""
|
283 |
+
if not posteriors:
|
284 |
+
return 0.0
|
285 |
+
|
286 |
+
# Base confidence
|
287 |
+
confidence = 0.5
|
288 |
+
|
289 |
+
# Adjust based on probability distribution
|
290 |
+
probs = list(posteriors.values())
|
291 |
+
|
292 |
+
# Strong leading hypothesis increases confidence
|
293 |
+
max_prob = max(probs)
|
294 |
+
if max_prob > 0.8:
|
295 |
+
confidence += 0.3
|
296 |
+
elif max_prob > 0.6:
|
297 |
+
confidence += 0.2
|
298 |
+
elif max_prob > 0.4:
|
299 |
+
confidence += 0.1
|
300 |
+
|
301 |
+
# Low entropy (clear distinction) increases confidence
|
302 |
+
entropy = -sum(p * np.log2(p) if p > 0 else 0 for p in probs)
|
303 |
+
max_entropy = -np.log2(1/len(probs)) # Maximum possible entropy
|
304 |
+
|
305 |
+
if entropy < 0.3 * max_entropy:
|
306 |
+
confidence += 0.2
|
307 |
+
elif entropy < 0.6 * max_entropy:
|
308 |
+
confidence += 0.1
|
309 |
+
|
310 |
+
return min(confidence, 1.0)
|
311 |
+
|
312 |
+
def _extract_factors(self, text: str, terms: Set[str]) -> Set[str]:
|
313 |
+
"""Extract relevant factors from text."""
|
314 |
+
return set(word.lower() for word in text.split() if word.lower() in terms)
|
315 |
+
|
316 |
+
def _generate_alternative_factors(self, terms: Set[str]) -> Set[str]:
|
317 |
+
"""Generate factors for alternative hypothesis."""
|
318 |
+
# Simple approach: use terms not in primary hypothesis
|
319 |
+
return set(
|
320 |
+
word for word in terms
|
321 |
+
if not any(
|
322 |
+
similar in word or word in similar
|
323 |
+
for similar in terms
|
324 |
+
)
|
325 |
+
)
|
reasoning/chain_of_thought.py
ADDED
@@ -0,0 +1,415 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Chain of Thought reasoning implementation with advanced features."""
|
2 |
+
|
3 |
+
import logging
|
4 |
+
from typing import Dict, Any, List, Optional, Tuple
|
5 |
+
import json
|
6 |
+
from dataclasses import dataclass
|
7 |
+
from enum import Enum
|
8 |
+
|
9 |
+
from .base import ReasoningStrategy
|
10 |
+
|
11 |
+
class ThoughtType(Enum):
|
12 |
+
"""Types of thoughts in the chain."""
|
13 |
+
OBSERVATION = "observation"
|
14 |
+
ANALYSIS = "analysis"
|
15 |
+
HYPOTHESIS = "hypothesis"
|
16 |
+
VERIFICATION = "verification"
|
17 |
+
CONCLUSION = "conclusion"
|
18 |
+
REFLECTION = "reflection"
|
19 |
+
REFINEMENT = "refinement"
|
20 |
+
|
21 |
+
@dataclass
|
22 |
+
class Thought:
|
23 |
+
"""Represents a single thought in the chain."""
|
24 |
+
type: ThoughtType
|
25 |
+
content: str
|
26 |
+
confidence: float
|
27 |
+
evidence: List[str]
|
28 |
+
alternatives: List[str]
|
29 |
+
next_steps: List[str]
|
30 |
+
metadata: Dict[str, Any]
|
31 |
+
|
32 |
+
class ChainOfThoughtStrategy(ReasoningStrategy):
|
33 |
+
"""
|
34 |
+
Advanced Chain of Thought reasoning implementation with:
|
35 |
+
- Hierarchical thought chains
|
36 |
+
- Confidence scoring
|
37 |
+
- Alternative path exploration
|
38 |
+
- Self-reflection and refinement
|
39 |
+
- Evidence tracking
|
40 |
+
- Meta-learning capabilities
|
41 |
+
"""
|
42 |
+
|
43 |
+
def __init__(self,
|
44 |
+
min_confidence: float = 0.7,
|
45 |
+
parallel_threshold: int = 3,
|
46 |
+
learning_rate: float = 0.1,
|
47 |
+
strategy_weights: Optional[Dict[str, float]] = None):
|
48 |
+
self.min_confidence = min_confidence
|
49 |
+
self.parallel_threshold = parallel_threshold
|
50 |
+
self.learning_rate = learning_rate
|
51 |
+
self.strategy_weights = strategy_weights or {
|
52 |
+
"LOCAL_LLM": 0.8,
|
53 |
+
"CHAIN_OF_THOUGHT": 0.6,
|
54 |
+
"TREE_OF_THOUGHTS": 0.5,
|
55 |
+
"META_LEARNING": 0.4
|
56 |
+
}
|
57 |
+
self.thought_history: List[Thought] = []
|
58 |
+
|
59 |
+
async def reason(self, query: str, context: Dict[str, Any]) -> Dict[str, Any]:
|
60 |
+
"""Main reasoning method implementing chain of thought."""
|
61 |
+
try:
|
62 |
+
# Initialize reasoning chain
|
63 |
+
chain = await self._initialize_chain(query, context)
|
64 |
+
|
65 |
+
# Generate initial thoughts
|
66 |
+
thoughts = await self._generate_thoughts(query, context)
|
67 |
+
|
68 |
+
# Build thought chain
|
69 |
+
chain = await self._build_chain(thoughts, context)
|
70 |
+
|
71 |
+
# Reflect and refine
|
72 |
+
if self.enable_reflection:
|
73 |
+
chain = await self._reflect_and_refine(chain, context)
|
74 |
+
|
75 |
+
# Extract conclusion
|
76 |
+
conclusion = await self._extract_conclusion(chain, context)
|
77 |
+
|
78 |
+
# Update thought history
|
79 |
+
self.thought_history.extend(chain)
|
80 |
+
|
81 |
+
return {
|
82 |
+
"success": True,
|
83 |
+
"answer": conclusion["answer"],
|
84 |
+
"confidence": conclusion["confidence"],
|
85 |
+
"reasoning_chain": [self._thought_to_dict(t) for t in chain],
|
86 |
+
"alternatives": conclusion["alternatives"],
|
87 |
+
"evidence": conclusion["evidence"],
|
88 |
+
"meta_insights": conclusion["meta_insights"]
|
89 |
+
}
|
90 |
+
except Exception as e:
|
91 |
+
logging.error(f"Error in chain of thought reasoning: {str(e)}")
|
92 |
+
return {"success": False, "error": str(e)}
|
93 |
+
|
94 |
+
async def _initialize_chain(self, query: str, context: Dict[str, Any]) -> List[Thought]:
|
95 |
+
"""Initialize the thought chain with observations."""
|
96 |
+
prompt = f"""
|
97 |
+
Initialize chain of thought for query:
|
98 |
+
Query: {query}
|
99 |
+
Context: {json.dumps(context)}
|
100 |
+
|
101 |
+
Provide initial observations:
|
102 |
+
1. Key elements in query
|
103 |
+
2. Relevant context factors
|
104 |
+
3. Initial hypotheses
|
105 |
+
4. Potential approaches
|
106 |
+
|
107 |
+
Format as:
|
108 |
+
[O1] Element: ... | Relevance: ... | Confidence: ...
|
109 |
+
[O2] Context: ... | Impact: ... | Confidence: ...
|
110 |
+
[O3] Hypothesis: ... | Support: ... | Confidence: ...
|
111 |
+
[O4] Approach: ... | Rationale: ... | Confidence: ...
|
112 |
+
"""
|
113 |
+
|
114 |
+
response = await context["groq_api"].predict(prompt)
|
115 |
+
return self._parse_observations(response["answer"])
|
116 |
+
|
117 |
+
async def _generate_thoughts(self, query: str, context: Dict[str, Any]) -> List[Thought]:
|
118 |
+
"""Generate candidate thoughts for the chain."""
|
119 |
+
prompt = f"""
|
120 |
+
Generate thoughts for query analysis:
|
121 |
+
Query: {query}
|
122 |
+
Context: {json.dumps(context)}
|
123 |
+
|
124 |
+
For each thought provide:
|
125 |
+
1. [Type]: {" | ".join([t.value for t in ThoughtType])}
|
126 |
+
2. [Content]: Main thought
|
127 |
+
3. [Evidence]: Supporting evidence
|
128 |
+
4. [Alternatives]: Alternative perspectives
|
129 |
+
5. [Next]: Potential next steps
|
130 |
+
6. [Confidence]: 0-1 score
|
131 |
+
|
132 |
+
Format as:
|
133 |
+
[T1]
|
134 |
+
Type: ...
|
135 |
+
Content: ...
|
136 |
+
Evidence: ...
|
137 |
+
Alternatives: ...
|
138 |
+
Next: ...
|
139 |
+
Confidence: ...
|
140 |
+
"""
|
141 |
+
|
142 |
+
response = await context["groq_api"].predict(prompt)
|
143 |
+
return self._parse_thoughts(response["answer"])
|
144 |
+
|
145 |
+
async def _build_chain(self, thoughts: List[Thought], context: Dict[str, Any]) -> List[Thought]:
|
146 |
+
"""Build coherent chain from candidate thoughts."""
|
147 |
+
prompt = f"""
|
148 |
+
Build coherent thought chain:
|
149 |
+
Thoughts: {json.dumps([self._thought_to_dict(t) for t in thoughts])}
|
150 |
+
Context: {json.dumps(context)}
|
151 |
+
|
152 |
+
For each step specify:
|
153 |
+
1. Selected thought
|
154 |
+
2. Reasoning for selection
|
155 |
+
3. Connection to previous
|
156 |
+
4. Expected impact
|
157 |
+
|
158 |
+
Format as:
|
159 |
+
[S1]
|
160 |
+
Thought: ...
|
161 |
+
Reason: ...
|
162 |
+
Connection: ...
|
163 |
+
Impact: ...
|
164 |
+
"""
|
165 |
+
|
166 |
+
response = await context["groq_api"].predict(prompt)
|
167 |
+
return self._parse_chain(response["answer"], thoughts)
|
168 |
+
|
169 |
+
async def _reflect_and_refine(self, chain: List[Thought], context: Dict[str, Any]) -> List[Thought]:
|
170 |
+
"""Reflect on and refine the thought chain."""
|
171 |
+
prompt = f"""
|
172 |
+
Reflect on thought chain:
|
173 |
+
Chain: {json.dumps([self._thought_to_dict(t) for t in chain])}
|
174 |
+
Context: {json.dumps(context)}
|
175 |
+
|
176 |
+
Analyze for:
|
177 |
+
1. Logical gaps
|
178 |
+
2. Weak assumptions
|
179 |
+
3. Missing evidence
|
180 |
+
4. Alternative perspectives
|
181 |
+
|
182 |
+
Suggest refinements:
|
183 |
+
1. Additional thoughts
|
184 |
+
2. Modified reasoning
|
185 |
+
3. New connections
|
186 |
+
4. Evidence needs
|
187 |
+
|
188 |
+
Format as:
|
189 |
+
[Analysis]
|
190 |
+
Gaps: ...
|
191 |
+
Assumptions: ...
|
192 |
+
Missing: ...
|
193 |
+
Alternatives: ...
|
194 |
+
|
195 |
+
[Refinements]
|
196 |
+
Thoughts: ...
|
197 |
+
Reasoning: ...
|
198 |
+
Connections: ...
|
199 |
+
Evidence: ...
|
200 |
+
"""
|
201 |
+
|
202 |
+
response = await context["groq_api"].predict(prompt)
|
203 |
+
return self._apply_refinements(chain, response["answer"])
|
204 |
+
|
205 |
+
async def _extract_conclusion(self, chain: List[Thought], context: Dict[str, Any]) -> Dict[str, Any]:
|
206 |
+
"""Extract final conclusion from thought chain."""
|
207 |
+
prompt = f"""
|
208 |
+
Extract conclusion from thought chain:
|
209 |
+
Chain: {json.dumps([self._thought_to_dict(t) for t in chain])}
|
210 |
+
Context: {json.dumps(context)}
|
211 |
+
|
212 |
+
Provide:
|
213 |
+
1. Main conclusion
|
214 |
+
2. Confidence level
|
215 |
+
3. Supporting evidence
|
216 |
+
4. Alternative conclusions
|
217 |
+
5. Meta-insights gained
|
218 |
+
6. Future considerations
|
219 |
+
|
220 |
+
Format as:
|
221 |
+
[Conclusion]
|
222 |
+
Answer: ...
|
223 |
+
Confidence: ...
|
224 |
+
Evidence: ...
|
225 |
+
Alternatives: ...
|
226 |
+
|
227 |
+
[Meta]
|
228 |
+
Insights: ...
|
229 |
+
Future: ...
|
230 |
+
"""
|
231 |
+
|
232 |
+
response = await context["groq_api"].predict(prompt)
|
233 |
+
return self._parse_conclusion(response["answer"])
|
234 |
+
|
235 |
+
def _parse_observations(self, response: str) -> List[Thought]:
|
236 |
+
"""Parse initial observations into thoughts."""
|
237 |
+
observations = []
|
238 |
+
lines = response.split('\n')
|
239 |
+
|
240 |
+
for line in lines:
|
241 |
+
if line.startswith('[O'):
|
242 |
+
parts = line.split('|')
|
243 |
+
if len(parts) >= 3:
|
244 |
+
main_part = parts[0].split(']')[1].strip()
|
245 |
+
key, content = main_part.split(':', 1)
|
246 |
+
|
247 |
+
evidence = [p.strip() for p in parts[1].split(':')[1].strip().split(',')]
|
248 |
+
|
249 |
+
try:
|
250 |
+
confidence = float(parts[2].split(':')[1].strip())
|
251 |
+
except:
|
252 |
+
confidence = 0.5
|
253 |
+
|
254 |
+
observations.append(Thought(
|
255 |
+
type=ThoughtType.OBSERVATION,
|
256 |
+
content=content.strip(),
|
257 |
+
confidence=confidence,
|
258 |
+
evidence=evidence,
|
259 |
+
alternatives=[],
|
260 |
+
next_steps=[],
|
261 |
+
metadata={"key": key}
|
262 |
+
))
|
263 |
+
|
264 |
+
return observations
|
265 |
+
|
266 |
+
def _parse_thoughts(self, response: str) -> List[Thought]:
|
267 |
+
"""Parse generated thoughts."""
|
268 |
+
thoughts = []
|
269 |
+
current = None
|
270 |
+
|
271 |
+
for line in response.split('\n'):
|
272 |
+
line = line.strip()
|
273 |
+
if not line:
|
274 |
+
continue
|
275 |
+
|
276 |
+
if line.startswith('[T'):
|
277 |
+
if current:
|
278 |
+
thoughts.append(current)
|
279 |
+
current = None
|
280 |
+
elif line.startswith('Type:'):
|
281 |
+
type_str = line[5:].strip()
|
282 |
+
try:
|
283 |
+
thought_type = ThoughtType(type_str.lower())
|
284 |
+
current = Thought(
|
285 |
+
type=thought_type,
|
286 |
+
content="",
|
287 |
+
confidence=0.0,
|
288 |
+
evidence=[],
|
289 |
+
alternatives=[],
|
290 |
+
next_steps=[],
|
291 |
+
metadata={}
|
292 |
+
)
|
293 |
+
except ValueError:
|
294 |
+
logging.warning(f"Invalid thought type: {type_str}")
|
295 |
+
elif current:
|
296 |
+
if line.startswith('Content:'):
|
297 |
+
current.content = line[8:].strip()
|
298 |
+
elif line.startswith('Evidence:'):
|
299 |
+
current.evidence = [e.strip() for e in line[9:].split(',')]
|
300 |
+
elif line.startswith('Alternatives:'):
|
301 |
+
current.alternatives = [a.strip() for a in line[13:].split(',')]
|
302 |
+
elif line.startswith('Next:'):
|
303 |
+
current.next_steps = [n.strip() for n in line[5:].split(',')]
|
304 |
+
elif line.startswith('Confidence:'):
|
305 |
+
try:
|
306 |
+
current.confidence = float(line[11:].strip())
|
307 |
+
except:
|
308 |
+
current.confidence = 0.5
|
309 |
+
|
310 |
+
if current:
|
311 |
+
thoughts.append(current)
|
312 |
+
|
313 |
+
return thoughts
|
314 |
+
|
315 |
+
def _parse_chain(self, response: str, thoughts: List[Thought]) -> List[Thought]:
|
316 |
+
"""Parse and order thoughts into a chain."""
|
317 |
+
chain = []
|
318 |
+
thought_map = {self._thought_to_dict(t)["content"]: t for t in thoughts}
|
319 |
+
|
320 |
+
for line in response.split('\n'):
|
321 |
+
if line.startswith('Thought:'):
|
322 |
+
content = line[8:].strip()
|
323 |
+
if content in thought_map:
|
324 |
+
chain.append(thought_map[content])
|
325 |
+
|
326 |
+
return chain
|
327 |
+
|
328 |
+
def _apply_refinements(self, chain: List[Thought], response: str) -> List[Thought]:
|
329 |
+
"""Apply refinements to thought chain."""
|
330 |
+
refined_chain = chain.copy()
|
331 |
+
|
332 |
+
# Parse refinements
|
333 |
+
sections = response.split('[')
|
334 |
+
for section in sections:
|
335 |
+
if section.startswith('Refinements]'):
|
336 |
+
lines = section.split('\n')[1:]
|
337 |
+
for line in lines:
|
338 |
+
if line.startswith('Thoughts:'):
|
339 |
+
new_thoughts = self._parse_refinement_thoughts(line[9:])
|
340 |
+
refined_chain.extend(new_thoughts)
|
341 |
+
|
342 |
+
return refined_chain
|
343 |
+
|
344 |
+
def _parse_refinement_thoughts(self, refinements: str) -> List[Thought]:
|
345 |
+
"""Parse refinement thoughts."""
|
346 |
+
thoughts = []
|
347 |
+
for refinement in refinements.split(';'):
|
348 |
+
if refinement.strip():
|
349 |
+
thoughts.append(Thought(
|
350 |
+
type=ThoughtType.REFINEMENT,
|
351 |
+
content=refinement.strip(),
|
352 |
+
confidence=0.8, # Refinements typically have high confidence
|
353 |
+
evidence=[],
|
354 |
+
alternatives=[],
|
355 |
+
next_steps=[],
|
356 |
+
metadata={"refined": True}
|
357 |
+
))
|
358 |
+
return thoughts
|
359 |
+
|
360 |
+
def _parse_conclusion(self, response: str) -> Dict[str, Any]:
|
361 |
+
"""Parse final conclusion."""
|
362 |
+
conclusion = {
|
363 |
+
"answer": "",
|
364 |
+
"confidence": 0.0,
|
365 |
+
"evidence": [],
|
366 |
+
"alternatives": [],
|
367 |
+
"meta_insights": [],
|
368 |
+
"future_considerations": []
|
369 |
+
}
|
370 |
+
|
371 |
+
sections = response.split('[')
|
372 |
+
for section in sections:
|
373 |
+
if section.startswith('Conclusion]'):
|
374 |
+
lines = section.split('\n')[1:]
|
375 |
+
for line in lines:
|
376 |
+
if line.startswith('Answer:'):
|
377 |
+
conclusion["answer"] = line[7:].strip()
|
378 |
+
elif line.startswith('Confidence:'):
|
379 |
+
try:
|
380 |
+
conclusion["confidence"] = float(line[11:].strip())
|
381 |
+
except:
|
382 |
+
conclusion["confidence"] = 0.5
|
383 |
+
elif line.startswith('Evidence:'):
|
384 |
+
conclusion["evidence"] = [e.strip() for e in line[9:].split(',')]
|
385 |
+
elif line.startswith('Alternatives:'):
|
386 |
+
conclusion["alternatives"] = [a.strip() for a in line[13:].split(',')]
|
387 |
+
elif section.startswith('Meta]'):
|
388 |
+
lines = section.split('\n')[1:]
|
389 |
+
for line in lines:
|
390 |
+
if line.startswith('Insights:'):
|
391 |
+
conclusion["meta_insights"] = [i.strip() for i in line[9:].split(',')]
|
392 |
+
elif line.startswith('Future:'):
|
393 |
+
conclusion["future_considerations"] = [f.strip() for f in line[7:].split(',')]
|
394 |
+
|
395 |
+
return conclusion
|
396 |
+
|
397 |
+
def _thought_to_dict(self, thought: Thought) -> Dict[str, Any]:
|
398 |
+
"""Convert thought to dictionary for serialization."""
|
399 |
+
return {
|
400 |
+
"type": thought.type.value,
|
401 |
+
"content": thought.content,
|
402 |
+
"confidence": thought.confidence,
|
403 |
+
"evidence": thought.evidence,
|
404 |
+
"alternatives": thought.alternatives,
|
405 |
+
"next_steps": thought.next_steps,
|
406 |
+
"metadata": thought.metadata
|
407 |
+
}
|
408 |
+
|
409 |
+
def get_thought_history(self) -> List[Dict[str, Any]]:
|
410 |
+
"""Get the history of all thoughts processed."""
|
411 |
+
return [self._thought_to_dict(t) for t in self.thought_history]
|
412 |
+
|
413 |
+
def clear_history(self) -> None:
|
414 |
+
"""Clear thought history."""
|
415 |
+
self.thought_history = []
|
reasoning/coordination.py
ADDED
@@ -0,0 +1,525 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Advanced strategy coordination patterns for the unified reasoning engine."""
|
2 |
+
|
3 |
+
import logging
|
4 |
+
from typing import Dict, Any, List, Optional, Set, Union, Type, Callable
|
5 |
+
import json
|
6 |
+
from dataclasses import dataclass, field
|
7 |
+
from enum import Enum
|
8 |
+
from datetime import datetime
|
9 |
+
import asyncio
|
10 |
+
from collections import defaultdict
|
11 |
+
|
12 |
+
from .base import ReasoningStrategy
|
13 |
+
from .unified_engine import StrategyType, StrategyResult, UnifiedResult
|
14 |
+
|
15 |
+
class CoordinationPattern(Enum):
|
16 |
+
"""Types of strategy coordination patterns."""
|
17 |
+
PIPELINE = "pipeline"
|
18 |
+
PARALLEL = "parallel"
|
19 |
+
HIERARCHICAL = "hierarchical"
|
20 |
+
FEEDBACK = "feedback"
|
21 |
+
ADAPTIVE = "adaptive"
|
22 |
+
ENSEMBLE = "ensemble"
|
23 |
+
|
24 |
+
class CoordinationPhase(Enum):
|
25 |
+
"""Phases in strategy coordination."""
|
26 |
+
INITIALIZATION = "initialization"
|
27 |
+
EXECUTION = "execution"
|
28 |
+
SYNCHRONIZATION = "synchronization"
|
29 |
+
ADAPTATION = "adaptation"
|
30 |
+
COMPLETION = "completion"
|
31 |
+
|
32 |
+
@dataclass
|
33 |
+
class CoordinationState:
|
34 |
+
"""State of strategy coordination."""
|
35 |
+
pattern: CoordinationPattern
|
36 |
+
active_strategies: Dict[StrategyType, bool]
|
37 |
+
phase: CoordinationPhase
|
38 |
+
shared_context: Dict[str, Any]
|
39 |
+
synchronization_points: List[str]
|
40 |
+
adaptation_history: List[Dict[str, Any]]
|
41 |
+
metadata: Dict[str, Any] = field(default_factory=dict)
|
42 |
+
|
43 |
+
@dataclass
|
44 |
+
class StrategyInteraction:
|
45 |
+
"""Interaction between strategies."""
|
46 |
+
source: StrategyType
|
47 |
+
target: StrategyType
|
48 |
+
interaction_type: str
|
49 |
+
data: Dict[str, Any]
|
50 |
+
timestamp: datetime = field(default_factory=datetime.now)
|
51 |
+
|
52 |
+
class StrategyCoordinator:
|
53 |
+
"""
|
54 |
+
Advanced strategy coordinator that:
|
55 |
+
1. Manages strategy interactions
|
56 |
+
2. Implements coordination patterns
|
57 |
+
3. Handles state synchronization
|
58 |
+
4. Adapts coordination dynamically
|
59 |
+
5. Optimizes strategy combinations
|
60 |
+
"""
|
61 |
+
|
62 |
+
def __init__(self,
|
63 |
+
strategies: Dict[StrategyType, ReasoningStrategy],
|
64 |
+
learning_rate: float = 0.1):
|
65 |
+
self.strategies = strategies
|
66 |
+
self.learning_rate = learning_rate
|
67 |
+
|
68 |
+
# Coordination state
|
69 |
+
self.states: Dict[str, CoordinationState] = {}
|
70 |
+
self.interactions: List[StrategyInteraction] = []
|
71 |
+
|
72 |
+
# Pattern performance
|
73 |
+
self.pattern_performance: Dict[CoordinationPattern, List[float]] = defaultdict(list)
|
74 |
+
self.pattern_weights: Dict[CoordinationPattern, float] = {
|
75 |
+
pattern: 1.0 for pattern in CoordinationPattern
|
76 |
+
}
|
77 |
+
|
78 |
+
async def coordinate(self,
|
79 |
+
query: str,
|
80 |
+
context: Dict[str, Any],
|
81 |
+
pattern: Optional[CoordinationPattern] = None) -> Dict[str, Any]:
|
82 |
+
"""Coordinate strategy execution using specified pattern."""
|
83 |
+
try:
|
84 |
+
# Select pattern if not specified
|
85 |
+
if not pattern:
|
86 |
+
pattern = await self._select_pattern(query, context)
|
87 |
+
|
88 |
+
# Initialize coordination
|
89 |
+
state = await self._initialize_coordination(pattern, context)
|
90 |
+
|
91 |
+
# Execute coordination pattern
|
92 |
+
if pattern == CoordinationPattern.PIPELINE:
|
93 |
+
result = await self._coordinate_pipeline(query, context, state)
|
94 |
+
elif pattern == CoordinationPattern.PARALLEL:
|
95 |
+
result = await self._coordinate_parallel(query, context, state)
|
96 |
+
elif pattern == CoordinationPattern.HIERARCHICAL:
|
97 |
+
result = await self._coordinate_hierarchical(query, context, state)
|
98 |
+
elif pattern == CoordinationPattern.FEEDBACK:
|
99 |
+
result = await self._coordinate_feedback(query, context, state)
|
100 |
+
elif pattern == CoordinationPattern.ADAPTIVE:
|
101 |
+
result = await self._coordinate_adaptive(query, context, state)
|
102 |
+
elif pattern == CoordinationPattern.ENSEMBLE:
|
103 |
+
result = await self._coordinate_ensemble(query, context, state)
|
104 |
+
else:
|
105 |
+
raise ValueError(f"Unsupported coordination pattern: {pattern}")
|
106 |
+
|
107 |
+
# Update performance metrics
|
108 |
+
self._update_pattern_performance(pattern, result)
|
109 |
+
|
110 |
+
return result
|
111 |
+
|
112 |
+
except Exception as e:
|
113 |
+
logging.error(f"Error in strategy coordination: {str(e)}")
|
114 |
+
return {
|
115 |
+
"success": False,
|
116 |
+
"error": str(e),
|
117 |
+
"pattern": pattern.value if pattern else None
|
118 |
+
}
|
119 |
+
|
120 |
+
async def _select_pattern(self, query: str, context: Dict[str, Any]) -> CoordinationPattern:
|
121 |
+
"""Select appropriate coordination pattern."""
|
122 |
+
prompt = f"""
|
123 |
+
Select coordination pattern:
|
124 |
+
Query: {query}
|
125 |
+
Context: {json.dumps(context)}
|
126 |
+
|
127 |
+
Consider:
|
128 |
+
1. Task complexity and type
|
129 |
+
2. Strategy dependencies
|
130 |
+
3. Resource constraints
|
131 |
+
4. Performance history
|
132 |
+
5. Adaptation needs
|
133 |
+
|
134 |
+
Format as:
|
135 |
+
[Selection]
|
136 |
+
Pattern: ...
|
137 |
+
Rationale: ...
|
138 |
+
Confidence: ...
|
139 |
+
"""
|
140 |
+
|
141 |
+
response = await context["groq_api"].predict(prompt)
|
142 |
+
selection = self._parse_pattern_selection(response["answer"])
|
143 |
+
|
144 |
+
# Weight by performance history
|
145 |
+
weighted_patterns = {
|
146 |
+
pattern: self.pattern_weights[pattern] * selection.get(pattern.value, 0.0)
|
147 |
+
for pattern in CoordinationPattern
|
148 |
+
}
|
149 |
+
|
150 |
+
return max(weighted_patterns.items(), key=lambda x: x[1])[0]
|
151 |
+
|
152 |
+
async def _coordinate_pipeline(self,
|
153 |
+
query: str,
|
154 |
+
context: Dict[str, Any],
|
155 |
+
state: CoordinationState) -> Dict[str, Any]:
|
156 |
+
"""Coordinate strategies in pipeline pattern."""
|
157 |
+
results = []
|
158 |
+
current_context = context.copy()
|
159 |
+
|
160 |
+
# Determine optimal order
|
161 |
+
strategy_order = await self._determine_pipeline_order(query, context)
|
162 |
+
|
163 |
+
for strategy_type in strategy_order:
|
164 |
+
try:
|
165 |
+
# Execute strategy
|
166 |
+
strategy = self.strategies[strategy_type]
|
167 |
+
result = await strategy.reason(query, current_context)
|
168 |
+
|
169 |
+
# Update context with result
|
170 |
+
current_context.update({
|
171 |
+
"previous_result": result,
|
172 |
+
"pipeline_position": len(results)
|
173 |
+
})
|
174 |
+
|
175 |
+
results.append(StrategyResult(
|
176 |
+
strategy_type=strategy_type,
|
177 |
+
success=result.get("success", False),
|
178 |
+
answer=result.get("answer"),
|
179 |
+
confidence=result.get("confidence", 0.0),
|
180 |
+
reasoning_trace=result.get("reasoning_trace", []),
|
181 |
+
metadata=result.get("metadata", {}),
|
182 |
+
performance_metrics=result.get("performance_metrics", {})
|
183 |
+
))
|
184 |
+
|
185 |
+
# Record interaction
|
186 |
+
self._record_interaction(
|
187 |
+
source=strategy_type,
|
188 |
+
target=strategy_order[len(results)] if len(results) < len(strategy_order) else None,
|
189 |
+
interaction_type="pipeline_transfer",
|
190 |
+
data={"result": result}
|
191 |
+
)
|
192 |
+
|
193 |
+
except Exception as e:
|
194 |
+
logging.error(f"Error in pipeline strategy {strategy_type}: {str(e)}")
|
195 |
+
|
196 |
+
return {
|
197 |
+
"success": any(r.success for r in results),
|
198 |
+
"results": results,
|
199 |
+
"pattern": CoordinationPattern.PIPELINE.value,
|
200 |
+
"metrics": {
|
201 |
+
"total_steps": len(results),
|
202 |
+
"success_rate": sum(1 for r in results if r.success) / len(results) if results else 0
|
203 |
+
}
|
204 |
+
}
|
205 |
+
|
206 |
+
async def _coordinate_parallel(self,
|
207 |
+
query: str,
|
208 |
+
context: Dict[str, Any],
|
209 |
+
state: CoordinationState) -> Dict[str, Any]:
|
210 |
+
"""Coordinate strategies in parallel pattern."""
|
211 |
+
async def execute_strategy(strategy_type: StrategyType) -> StrategyResult:
|
212 |
+
try:
|
213 |
+
strategy = self.strategies[strategy_type]
|
214 |
+
result = await strategy.reason(query, context)
|
215 |
+
|
216 |
+
return StrategyResult(
|
217 |
+
strategy_type=strategy_type,
|
218 |
+
success=result.get("success", False),
|
219 |
+
answer=result.get("answer"),
|
220 |
+
confidence=result.get("confidence", 0.0),
|
221 |
+
reasoning_trace=result.get("reasoning_trace", []),
|
222 |
+
metadata=result.get("metadata", {}),
|
223 |
+
performance_metrics=result.get("performance_metrics", {})
|
224 |
+
)
|
225 |
+
except Exception as e:
|
226 |
+
logging.error(f"Error in parallel strategy {strategy_type}: {str(e)}")
|
227 |
+
return StrategyResult(
|
228 |
+
strategy_type=strategy_type,
|
229 |
+
success=False,
|
230 |
+
answer=None,
|
231 |
+
confidence=0.0,
|
232 |
+
reasoning_trace=[{"error": str(e)}],
|
233 |
+
metadata={},
|
234 |
+
performance_metrics={}
|
235 |
+
)
|
236 |
+
|
237 |
+
# Execute strategies in parallel
|
238 |
+
tasks = [execute_strategy(strategy_type)
|
239 |
+
for strategy_type in state.active_strategies
|
240 |
+
if state.active_strategies[strategy_type]]
|
241 |
+
|
242 |
+
results = await asyncio.gather(*tasks)
|
243 |
+
|
244 |
+
# Synthesize results
|
245 |
+
synthesis = await self._synthesize_parallel_results(results, context)
|
246 |
+
|
247 |
+
return {
|
248 |
+
"success": synthesis.get("success", False),
|
249 |
+
"results": results,
|
250 |
+
"synthesis": synthesis,
|
251 |
+
"pattern": CoordinationPattern.PARALLEL.value,
|
252 |
+
"metrics": {
|
253 |
+
"total_strategies": len(results),
|
254 |
+
"success_rate": sum(1 for r in results if r.success) / len(results) if results else 0
|
255 |
+
}
|
256 |
+
}
|
257 |
+
|
258 |
+
async def _coordinate_hierarchical(self,
|
259 |
+
query: str,
|
260 |
+
context: Dict[str, Any],
|
261 |
+
state: CoordinationState) -> Dict[str, Any]:
|
262 |
+
"""Coordinate strategies in hierarchical pattern."""
|
263 |
+
# Build strategy hierarchy
|
264 |
+
hierarchy = await self._build_strategy_hierarchy(query, context)
|
265 |
+
results = {}
|
266 |
+
|
267 |
+
async def execute_level(level_strategies: List[StrategyType],
|
268 |
+
level_context: Dict[str, Any]) -> List[StrategyResult]:
|
269 |
+
tasks = []
|
270 |
+
for strategy_type in level_strategies:
|
271 |
+
if strategy_type in state.active_strategies and state.active_strategies[strategy_type]:
|
272 |
+
strategy = self.strategies[strategy_type]
|
273 |
+
tasks.append(strategy.reason(query, level_context))
|
274 |
+
|
275 |
+
level_results = await asyncio.gather(*tasks)
|
276 |
+
return [
|
277 |
+
StrategyResult(
|
278 |
+
strategy_type=strategy_type,
|
279 |
+
success=result.get("success", False),
|
280 |
+
answer=result.get("answer"),
|
281 |
+
confidence=result.get("confidence", 0.0),
|
282 |
+
reasoning_trace=result.get("reasoning_trace", []),
|
283 |
+
metadata=result.get("metadata", {}),
|
284 |
+
performance_metrics=result.get("performance_metrics", {})
|
285 |
+
)
|
286 |
+
for strategy_type, result in zip(level_strategies, level_results)
|
287 |
+
]
|
288 |
+
|
289 |
+
# Execute hierarchy levels
|
290 |
+
current_context = context.copy()
|
291 |
+
for level, level_strategies in enumerate(hierarchy):
|
292 |
+
results[level] = await execute_level(level_strategies, current_context)
|
293 |
+
|
294 |
+
# Update context for next level
|
295 |
+
current_context.update({
|
296 |
+
"previous_level_results": results[level],
|
297 |
+
"hierarchy_level": level
|
298 |
+
})
|
299 |
+
|
300 |
+
return {
|
301 |
+
"success": any(any(r.success for r in level_results)
|
302 |
+
for level_results in results.values()),
|
303 |
+
"results": results,
|
304 |
+
"hierarchy": hierarchy,
|
305 |
+
"pattern": CoordinationPattern.HIERARCHICAL.value,
|
306 |
+
"metrics": {
|
307 |
+
"total_levels": len(hierarchy),
|
308 |
+
"level_success_rates": {
|
309 |
+
level: sum(1 for r in results[level] if r.success) / len(results[level])
|
310 |
+
for level in results if results[level]
|
311 |
+
}
|
312 |
+
}
|
313 |
+
}
|
314 |
+
|
315 |
+
async def _coordinate_feedback(self,
|
316 |
+
query: str,
|
317 |
+
context: Dict[str, Any],
|
318 |
+
state: CoordinationState) -> Dict[str, Any]:
|
319 |
+
"""Coordinate strategies with feedback loops."""
|
320 |
+
results = []
|
321 |
+
feedback_history = []
|
322 |
+
current_context = context.copy()
|
323 |
+
|
324 |
+
max_iterations = 5 # Prevent infinite loops
|
325 |
+
iteration = 0
|
326 |
+
|
327 |
+
while iteration < max_iterations:
|
328 |
+
iteration += 1
|
329 |
+
|
330 |
+
# Execute strategies
|
331 |
+
iteration_results = []
|
332 |
+
for strategy_type in state.active_strategies:
|
333 |
+
if state.active_strategies[strategy_type]:
|
334 |
+
try:
|
335 |
+
strategy = self.strategies[strategy_type]
|
336 |
+
result = await strategy.reason(query, current_context)
|
337 |
+
|
338 |
+
strategy_result = StrategyResult(
|
339 |
+
strategy_type=strategy_type,
|
340 |
+
success=result.get("success", False),
|
341 |
+
answer=result.get("answer"),
|
342 |
+
confidence=result.get("confidence", 0.0),
|
343 |
+
reasoning_trace=result.get("reasoning_trace", []),
|
344 |
+
metadata=result.get("metadata", {}),
|
345 |
+
performance_metrics=result.get("performance_metrics", {})
|
346 |
+
)
|
347 |
+
|
348 |
+
iteration_results.append(strategy_result)
|
349 |
+
|
350 |
+
except Exception as e:
|
351 |
+
logging.error(f"Error in feedback strategy {strategy_type}: {str(e)}")
|
352 |
+
|
353 |
+
results.append(iteration_results)
|
354 |
+
|
355 |
+
# Generate feedback
|
356 |
+
feedback = await self._generate_feedback(iteration_results, current_context)
|
357 |
+
feedback_history.append(feedback)
|
358 |
+
|
359 |
+
# Check termination condition
|
360 |
+
if self._should_terminate_feedback(feedback, iteration_results):
|
361 |
+
break
|
362 |
+
|
363 |
+
# Update context with feedback
|
364 |
+
current_context.update({
|
365 |
+
"previous_results": iteration_results,
|
366 |
+
"feedback": feedback,
|
367 |
+
"iteration": iteration
|
368 |
+
})
|
369 |
+
|
370 |
+
return {
|
371 |
+
"success": any(any(r.success for r in iteration_results)
|
372 |
+
for iteration_results in results),
|
373 |
+
"results": results,
|
374 |
+
"feedback_history": feedback_history,
|
375 |
+
"pattern": CoordinationPattern.FEEDBACK.value,
|
376 |
+
"metrics": {
|
377 |
+
"total_iterations": iteration,
|
378 |
+
"feedback_impact": self._calculate_feedback_impact(results, feedback_history)
|
379 |
+
}
|
380 |
+
}
|
381 |
+
|
382 |
+
async def _coordinate_adaptive(self,
|
383 |
+
query: str,
|
384 |
+
context: Dict[str, Any],
|
385 |
+
state: CoordinationState) -> Dict[str, Any]:
|
386 |
+
"""Coordinate strategies with adaptive selection."""
|
387 |
+
results = []
|
388 |
+
adaptations = []
|
389 |
+
current_context = context.copy()
|
390 |
+
|
391 |
+
while len(results) < len(state.active_strategies):
|
392 |
+
# Select next strategy
|
393 |
+
next_strategy = await self._select_next_strategy(
|
394 |
+
results, state.active_strategies, current_context)
|
395 |
+
|
396 |
+
if not next_strategy:
|
397 |
+
break
|
398 |
+
|
399 |
+
try:
|
400 |
+
# Execute strategy
|
401 |
+
strategy = self.strategies[next_strategy]
|
402 |
+
result = await strategy.reason(query, current_context)
|
403 |
+
|
404 |
+
strategy_result = StrategyResult(
|
405 |
+
strategy_type=next_strategy,
|
406 |
+
success=result.get("success", False),
|
407 |
+
answer=result.get("answer"),
|
408 |
+
confidence=result.get("confidence", 0.0),
|
409 |
+
reasoning_trace=result.get("reasoning_trace", []),
|
410 |
+
metadata=result.get("metadata", {}),
|
411 |
+
performance_metrics=result.get("performance_metrics", {})
|
412 |
+
)
|
413 |
+
|
414 |
+
results.append(strategy_result)
|
415 |
+
|
416 |
+
# Adapt strategy selection
|
417 |
+
adaptation = await self._adapt_strategy_selection(
|
418 |
+
strategy_result, current_context)
|
419 |
+
adaptations.append(adaptation)
|
420 |
+
|
421 |
+
# Update context
|
422 |
+
current_context.update({
|
423 |
+
"previous_results": results,
|
424 |
+
"adaptations": adaptations,
|
425 |
+
"current_strategy": next_strategy
|
426 |
+
})
|
427 |
+
|
428 |
+
except Exception as e:
|
429 |
+
logging.error(f"Error in adaptive strategy {next_strategy}: {str(e)}")
|
430 |
+
|
431 |
+
return {
|
432 |
+
"success": any(r.success for r in results),
|
433 |
+
"results": results,
|
434 |
+
"adaptations": adaptations,
|
435 |
+
"pattern": CoordinationPattern.ADAPTIVE.value,
|
436 |
+
"metrics": {
|
437 |
+
"total_strategies": len(results),
|
438 |
+
"adaptation_impact": self._calculate_adaptation_impact(results, adaptations)
|
439 |
+
}
|
440 |
+
}
|
441 |
+
|
442 |
+
async def _coordinate_ensemble(self,
|
443 |
+
query: str,
|
444 |
+
context: Dict[str, Any],
|
445 |
+
state: CoordinationState) -> Dict[str, Any]:
|
446 |
+
"""Coordinate strategies as an ensemble."""
|
447 |
+
# Execute all strategies
|
448 |
+
results = []
|
449 |
+
for strategy_type in state.active_strategies:
|
450 |
+
if state.active_strategies[strategy_type]:
|
451 |
+
try:
|
452 |
+
strategy = self.strategies[strategy_type]
|
453 |
+
result = await strategy.reason(query, context)
|
454 |
+
|
455 |
+
strategy_result = StrategyResult(
|
456 |
+
strategy_type=strategy_type,
|
457 |
+
success=result.get("success", False),
|
458 |
+
answer=result.get("answer"),
|
459 |
+
confidence=result.get("confidence", 0.0),
|
460 |
+
reasoning_trace=result.get("reasoning_trace", []),
|
461 |
+
metadata=result.get("metadata", {}),
|
462 |
+
performance_metrics=result.get("performance_metrics", {})
|
463 |
+
)
|
464 |
+
|
465 |
+
results.append(strategy_result)
|
466 |
+
|
467 |
+
except Exception as e:
|
468 |
+
logging.error(f"Error in ensemble strategy {strategy_type}: {str(e)}")
|
469 |
+
|
470 |
+
# Combine results using ensemble methods
|
471 |
+
ensemble_result = await self._combine_ensemble_results(results, context)
|
472 |
+
|
473 |
+
return {
|
474 |
+
"success": ensemble_result.get("success", False),
|
475 |
+
"results": results,
|
476 |
+
"ensemble_result": ensemble_result,
|
477 |
+
"pattern": CoordinationPattern.ENSEMBLE.value,
|
478 |
+
"metrics": {
|
479 |
+
"total_members": len(results),
|
480 |
+
"ensemble_confidence": ensemble_result.get("confidence", 0.0)
|
481 |
+
}
|
482 |
+
}
|
483 |
+
|
484 |
+
def _record_interaction(self,
|
485 |
+
source: StrategyType,
|
486 |
+
target: Optional[StrategyType],
|
487 |
+
interaction_type: str,
|
488 |
+
data: Dict[str, Any]):
|
489 |
+
"""Record strategy interaction."""
|
490 |
+
self.interactions.append(StrategyInteraction(
|
491 |
+
source=source,
|
492 |
+
target=target,
|
493 |
+
interaction_type=interaction_type,
|
494 |
+
data=data
|
495 |
+
))
|
496 |
+
|
497 |
+
def _update_pattern_performance(self, pattern: CoordinationPattern, result: Dict[str, Any]):
|
498 |
+
"""Update pattern performance metrics."""
|
499 |
+
success_rate = result["metrics"].get("success_rate", 0.0)
|
500 |
+
self.pattern_performance[pattern].append(success_rate)
|
501 |
+
|
502 |
+
# Update weights using exponential moving average
|
503 |
+
current_weight = self.pattern_weights[pattern]
|
504 |
+
self.pattern_weights[pattern] = (
|
505 |
+
(1 - self.learning_rate) * current_weight +
|
506 |
+
self.learning_rate * success_rate
|
507 |
+
)
|
508 |
+
|
509 |
+
def get_performance_metrics(self) -> Dict[str, Any]:
|
510 |
+
"""Get comprehensive performance metrics."""
|
511 |
+
return {
|
512 |
+
"pattern_weights": dict(self.pattern_weights),
|
513 |
+
"average_performance": {
|
514 |
+
pattern.value: sum(scores) / len(scores) if scores else 0
|
515 |
+
for pattern, scores in self.pattern_performance.items()
|
516 |
+
},
|
517 |
+
"interaction_counts": defaultdict(int, {
|
518 |
+
interaction.interaction_type: 1
|
519 |
+
for interaction in self.interactions
|
520 |
+
}),
|
521 |
+
"active_patterns": [
|
522 |
+
pattern.value for pattern, weight in self.pattern_weights.items()
|
523 |
+
if weight > 0.5
|
524 |
+
]
|
525 |
+
}
|
reasoning/emergent.py
ADDED
@@ -0,0 +1,133 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Emergent Reasoning Module
|
3 |
+
------------------------
|
4 |
+
Implements emergent reasoning capabilities that arise from the interaction
|
5 |
+
of multiple reasoning strategies and patterns.
|
6 |
+
"""
|
7 |
+
|
8 |
+
from typing import Dict, Any, List, Optional
|
9 |
+
from .base import ReasoningStrategy
|
10 |
+
from .meta_learning import MetaLearningStrategy
|
11 |
+
from .chain_of_thought import ChainOfThoughtStrategy
|
12 |
+
from .tree_of_thoughts import TreeOfThoughtsStrategy
|
13 |
+
|
14 |
+
class EmergentReasoning(ReasoningStrategy):
|
15 |
+
"""
|
16 |
+
A reasoning strategy that combines multiple approaches to discover
|
17 |
+
emergent patterns and solutions.
|
18 |
+
"""
|
19 |
+
|
20 |
+
def __init__(self, config: Optional[Dict[str, Any]] = None):
|
21 |
+
"""Initialize emergent reasoning with component strategies."""
|
22 |
+
super().__init__()
|
23 |
+
self.config = config or {}
|
24 |
+
|
25 |
+
# Standard reasoning parameters
|
26 |
+
self.min_confidence = self.config.get('min_confidence', 0.7)
|
27 |
+
self.parallel_threshold = self.config.get('parallel_threshold', 3)
|
28 |
+
self.learning_rate = self.config.get('learning_rate', 0.1)
|
29 |
+
self.strategy_weights = self.config.get('strategy_weights', {
|
30 |
+
"LOCAL_LLM": 0.8,
|
31 |
+
"CHAIN_OF_THOUGHT": 0.6,
|
32 |
+
"TREE_OF_THOUGHTS": 0.5,
|
33 |
+
"META_LEARNING": 0.4
|
34 |
+
})
|
35 |
+
|
36 |
+
# Initialize component strategies with shared config
|
37 |
+
strategy_config = {
|
38 |
+
'min_confidence': self.min_confidence,
|
39 |
+
'parallel_threshold': self.parallel_threshold,
|
40 |
+
'learning_rate': self.learning_rate,
|
41 |
+
'strategy_weights': self.strategy_weights
|
42 |
+
}
|
43 |
+
|
44 |
+
self.meta_learner = MetaLearningStrategy(strategy_config)
|
45 |
+
self.chain_of_thought = ChainOfThoughtStrategy(strategy_config)
|
46 |
+
self.tree_of_thoughts = TreeOfThoughtsStrategy(strategy_config)
|
47 |
+
|
48 |
+
# Configure weights for strategy combination
|
49 |
+
self.weights = self.config.get('combination_weights', {
|
50 |
+
'meta': 0.4,
|
51 |
+
'chain': 0.3,
|
52 |
+
'tree': 0.3
|
53 |
+
})
|
54 |
+
|
55 |
+
async def reason(self, query: str, context: Dict[str, Any]) -> Dict[str, Any]:
|
56 |
+
"""
|
57 |
+
Apply emergent reasoning by combining multiple strategies and
|
58 |
+
identifying patterns that emerge from their interaction.
|
59 |
+
|
60 |
+
Args:
|
61 |
+
query: The input query to reason about
|
62 |
+
context: Additional context and parameters
|
63 |
+
|
64 |
+
Returns:
|
65 |
+
Dict containing reasoning results and confidence scores
|
66 |
+
"""
|
67 |
+
try:
|
68 |
+
# Get results from each strategy
|
69 |
+
meta_result = await self.meta_learner.reason(query, context)
|
70 |
+
chain_result = await self.chain_of_thought.reason(query, context)
|
71 |
+
tree_result = await self.tree_of_thoughts.reason(query, context)
|
72 |
+
|
73 |
+
# Combine results with weighted averaging
|
74 |
+
combined_answer = self._combine_results([
|
75 |
+
(meta_result.get('answer', ''), self.weights['meta']),
|
76 |
+
(chain_result.get('answer', ''), self.weights['chain']),
|
77 |
+
(tree_result.get('answer', ''), self.weights['tree'])
|
78 |
+
])
|
79 |
+
|
80 |
+
# Calculate overall confidence
|
81 |
+
confidence = (
|
82 |
+
meta_result.get('confidence', 0) * self.weights['meta'] +
|
83 |
+
chain_result.get('confidence', 0) * self.weights['chain'] +
|
84 |
+
tree_result.get('confidence', 0) * self.weights['tree']
|
85 |
+
)
|
86 |
+
|
87 |
+
return {
|
88 |
+
'answer': combined_answer,
|
89 |
+
'confidence': confidence,
|
90 |
+
'reasoning_path': {
|
91 |
+
'meta': meta_result.get('reasoning_path'),
|
92 |
+
'chain': chain_result.get('reasoning_path'),
|
93 |
+
'tree': tree_result.get('reasoning_path')
|
94 |
+
},
|
95 |
+
'emergent_patterns': self._identify_patterns([
|
96 |
+
meta_result, chain_result, tree_result
|
97 |
+
])
|
98 |
+
}
|
99 |
+
|
100 |
+
except Exception as e:
|
101 |
+
return {
|
102 |
+
'error': f"Emergent reasoning failed: {str(e)}",
|
103 |
+
'confidence': 0.0
|
104 |
+
}
|
105 |
+
|
106 |
+
def _combine_results(self, weighted_results: List[tuple[str, float]]) -> str:
|
107 |
+
"""Combine multiple reasoning results with weights."""
|
108 |
+
if not weighted_results:
|
109 |
+
return ""
|
110 |
+
|
111 |
+
# For now, use the highest weighted result
|
112 |
+
return max(weighted_results, key=lambda x: x[1])[0]
|
113 |
+
|
114 |
+
def _identify_patterns(self, results: List[Dict[str, Any]]) -> List[str]:
|
115 |
+
"""Identify common patterns across different reasoning strategies."""
|
116 |
+
patterns = []
|
117 |
+
|
118 |
+
# Extract common themes or conclusions
|
119 |
+
answers = [r.get('answer', '') for r in results if r.get('answer')]
|
120 |
+
if len(set(answers)) == 1:
|
121 |
+
patterns.append("All strategies reached the same conclusion")
|
122 |
+
elif len(set(answers)) < len(answers):
|
123 |
+
patterns.append("Some strategies converged on similar conclusions")
|
124 |
+
|
125 |
+
# Look for common confidence patterns
|
126 |
+
confidences = [r.get('confidence', 0) for r in results]
|
127 |
+
avg_confidence = sum(confidences) / len(confidences) if confidences else 0
|
128 |
+
if avg_confidence > 0.8:
|
129 |
+
patterns.append("High confidence across all strategies")
|
130 |
+
elif avg_confidence < 0.3:
|
131 |
+
patterns.append("Low confidence across strategies")
|
132 |
+
|
133 |
+
return patterns
|
reasoning/groq_strategy.py
ADDED
@@ -0,0 +1,332 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Groq API integration with streaming and optimizations."""
|
2 |
+
|
3 |
+
import os
|
4 |
+
import logging
|
5 |
+
import asyncio
|
6 |
+
from typing import Dict, Any, Optional, List, AsyncGenerator, Union
|
7 |
+
import groq
|
8 |
+
from datetime import datetime
|
9 |
+
import json
|
10 |
+
from dataclasses import dataclass
|
11 |
+
from concurrent.futures import ThreadPoolExecutor
|
12 |
+
|
13 |
+
from .base import ReasoningStrategy, StrategyResult
|
14 |
+
|
15 |
+
logger = logging.getLogger(__name__)
|
16 |
+
|
17 |
+
@dataclass
|
18 |
+
class GroqConfig:
|
19 |
+
"""Configuration for Groq models."""
|
20 |
+
model_name: str
|
21 |
+
max_tokens: int
|
22 |
+
temperature: float
|
23 |
+
top_p: float
|
24 |
+
top_k: Optional[int] = None
|
25 |
+
presence_penalty: float = 0.0
|
26 |
+
frequency_penalty: float = 0.0
|
27 |
+
stop_sequences: Optional[List[str]] = None
|
28 |
+
chunk_size: int = 1024
|
29 |
+
retry_attempts: int = 3
|
30 |
+
retry_delay: float = 1.0
|
31 |
+
|
32 |
+
class GroqStrategy(ReasoningStrategy):
|
33 |
+
"""Enhanced reasoning strategy using Groq's API with streaming and optimizations."""
|
34 |
+
|
35 |
+
def __init__(self, api_key: Optional[str] = None):
|
36 |
+
"""Initialize Groq strategy."""
|
37 |
+
super().__init__()
|
38 |
+
self.api_key = api_key or os.getenv("GROQ_API_KEY")
|
39 |
+
if not self.api_key:
|
40 |
+
raise ValueError("GROQ_API_KEY must be set")
|
41 |
+
|
42 |
+
# Initialize Groq client with optimized settings
|
43 |
+
self.client = groq.Groq(
|
44 |
+
api_key=self.api_key,
|
45 |
+
timeout=30,
|
46 |
+
max_retries=3
|
47 |
+
)
|
48 |
+
|
49 |
+
# Optimized model configurations
|
50 |
+
self.model_configs = {
|
51 |
+
"mixtral": GroqConfig(
|
52 |
+
model_name="mixtral-8x7b-32768",
|
53 |
+
max_tokens=32768,
|
54 |
+
temperature=0.7,
|
55 |
+
top_p=0.9,
|
56 |
+
top_k=40,
|
57 |
+
presence_penalty=0.1,
|
58 |
+
frequency_penalty=0.1,
|
59 |
+
chunk_size=4096
|
60 |
+
),
|
61 |
+
"llama": GroqConfig(
|
62 |
+
model_name="llama2-70b-4096",
|
63 |
+
max_tokens=4096,
|
64 |
+
temperature=0.8,
|
65 |
+
top_p=0.9,
|
66 |
+
top_k=50,
|
67 |
+
presence_penalty=0.2,
|
68 |
+
frequency_penalty=0.2,
|
69 |
+
chunk_size=1024
|
70 |
+
)
|
71 |
+
}
|
72 |
+
|
73 |
+
# Initialize thread pool for parallel processing
|
74 |
+
self.executor = ThreadPoolExecutor(max_workers=4)
|
75 |
+
|
76 |
+
# Response cache
|
77 |
+
self.cache: Dict[str, Any] = {}
|
78 |
+
self.cache_ttl = 3600 # 1 hour
|
79 |
+
|
80 |
+
async def reason_stream(
|
81 |
+
self,
|
82 |
+
query: str,
|
83 |
+
context: Dict[str, Any],
|
84 |
+
model: str = "mixtral",
|
85 |
+
chunk_handler: Optional[callable] = None
|
86 |
+
) -> AsyncGenerator[str, None]:
|
87 |
+
"""
|
88 |
+
Stream reasoning results from Groq's API.
|
89 |
+
|
90 |
+
Args:
|
91 |
+
query: The query to reason about
|
92 |
+
context: Additional context
|
93 |
+
model: Model to use ('mixtral' or 'llama')
|
94 |
+
chunk_handler: Optional callback for handling chunks
|
95 |
+
"""
|
96 |
+
config = self.model_configs[model]
|
97 |
+
messages = self._prepare_messages(query, context)
|
98 |
+
|
99 |
+
try:
|
100 |
+
stream = await self.client.chat.completions.create(
|
101 |
+
model=config.model_name,
|
102 |
+
messages=messages,
|
103 |
+
temperature=config.temperature,
|
104 |
+
top_p=config.top_p,
|
105 |
+
top_k=config.top_k,
|
106 |
+
presence_penalty=config.presence_penalty,
|
107 |
+
frequency_penalty=config.frequency_penalty,
|
108 |
+
max_tokens=config.max_tokens,
|
109 |
+
stream=True
|
110 |
+
)
|
111 |
+
|
112 |
+
collected_content = []
|
113 |
+
async for chunk in stream:
|
114 |
+
if chunk.choices[0].delta.content:
|
115 |
+
content = chunk.choices[0].delta.content
|
116 |
+
collected_content.append(content)
|
117 |
+
|
118 |
+
if chunk_handler:
|
119 |
+
await chunk_handler(content)
|
120 |
+
|
121 |
+
yield content
|
122 |
+
|
123 |
+
# Cache the complete response
|
124 |
+
cache_key = self._generate_cache_key(query, context, model)
|
125 |
+
self.cache[cache_key] = {
|
126 |
+
"content": "".join(collected_content),
|
127 |
+
"timestamp": datetime.now()
|
128 |
+
}
|
129 |
+
|
130 |
+
except Exception as e:
|
131 |
+
logger.error(f"Groq streaming error: {str(e)}")
|
132 |
+
yield f"Error: {str(e)}"
|
133 |
+
|
134 |
+
async def reason(
|
135 |
+
self,
|
136 |
+
query: str,
|
137 |
+
context: Dict[str, Any],
|
138 |
+
model: str = "mixtral"
|
139 |
+
) -> StrategyResult:
|
140 |
+
"""
|
141 |
+
Enhanced reasoning with Groq's API including optimizations.
|
142 |
+
|
143 |
+
Args:
|
144 |
+
query: The query to reason about
|
145 |
+
context: Additional context
|
146 |
+
model: Model to use ('mixtral' or 'llama')
|
147 |
+
"""
|
148 |
+
# Check cache first
|
149 |
+
cache_key = self._generate_cache_key(query, context, model)
|
150 |
+
cached_response = self._get_from_cache(cache_key)
|
151 |
+
if cached_response:
|
152 |
+
return self._create_result(cached_response, model, from_cache=True)
|
153 |
+
|
154 |
+
config = self.model_configs[model]
|
155 |
+
messages = self._prepare_messages(query, context)
|
156 |
+
|
157 |
+
# Implement retry logic with exponential backoff
|
158 |
+
for attempt in range(config.retry_attempts):
|
159 |
+
try:
|
160 |
+
start_time = datetime.now()
|
161 |
+
|
162 |
+
# Make API call with optimized parameters
|
163 |
+
response = await self.client.chat.completions.create(
|
164 |
+
model=config.model_name,
|
165 |
+
messages=messages,
|
166 |
+
temperature=config.temperature,
|
167 |
+
top_p=config.top_p,
|
168 |
+
top_k=config.top_k,
|
169 |
+
presence_penalty=config.presence_penalty,
|
170 |
+
frequency_penalty=config.frequency_penalty,
|
171 |
+
max_tokens=config.max_tokens,
|
172 |
+
stream=False
|
173 |
+
)
|
174 |
+
|
175 |
+
end_time = datetime.now()
|
176 |
+
|
177 |
+
# Cache successful response
|
178 |
+
self.cache[cache_key] = {
|
179 |
+
"content": response.choices[0].message.content,
|
180 |
+
"timestamp": datetime.now()
|
181 |
+
}
|
182 |
+
|
183 |
+
return self._create_result(response, model)
|
184 |
+
|
185 |
+
except Exception as e:
|
186 |
+
delay = config.retry_delay * (2 ** attempt)
|
187 |
+
logger.warning(f"Groq API attempt {attempt + 1} failed: {str(e)}")
|
188 |
+
if attempt < config.retry_attempts - 1:
|
189 |
+
await asyncio.sleep(delay)
|
190 |
+
else:
|
191 |
+
logger.error(f"All Groq API attempts failed: {str(e)}")
|
192 |
+
return self._create_error_result(str(e))
|
193 |
+
|
194 |
+
def _create_result(
|
195 |
+
self,
|
196 |
+
response: Union[Dict, Any],
|
197 |
+
model: str,
|
198 |
+
from_cache: bool = False
|
199 |
+
) -> StrategyResult:
|
200 |
+
"""Create a strategy result from response."""
|
201 |
+
if from_cache:
|
202 |
+
answer = response["content"]
|
203 |
+
confidence = 0.9 # Higher confidence for cached responses
|
204 |
+
performance_metrics = {
|
205 |
+
"from_cache": True,
|
206 |
+
"cache_age": (datetime.now() - response["timestamp"]).total_seconds()
|
207 |
+
}
|
208 |
+
else:
|
209 |
+
answer = response.choices[0].message.content
|
210 |
+
confidence = self._calculate_confidence(response)
|
211 |
+
performance_metrics = {
|
212 |
+
"latency": response.usage.total_tokens / 1000, # tokens per second
|
213 |
+
"tokens_used": response.usage.total_tokens,
|
214 |
+
"prompt_tokens": response.usage.prompt_tokens,
|
215 |
+
"completion_tokens": response.usage.completion_tokens,
|
216 |
+
"model": self.model_configs[model].model_name
|
217 |
+
}
|
218 |
+
|
219 |
+
return StrategyResult(
|
220 |
+
strategy_type="groq",
|
221 |
+
success=True,
|
222 |
+
answer=answer,
|
223 |
+
confidence=confidence,
|
224 |
+
reasoning_trace=[{
|
225 |
+
"step": "groq_api_call",
|
226 |
+
"model": self.model_configs[model].model_name,
|
227 |
+
"timestamp": datetime.now().isoformat(),
|
228 |
+
"metrics": performance_metrics
|
229 |
+
}],
|
230 |
+
metadata={
|
231 |
+
"model": self.model_configs[model].model_name,
|
232 |
+
"from_cache": from_cache
|
233 |
+
},
|
234 |
+
performance_metrics=performance_metrics
|
235 |
+
)
|
236 |
+
|
237 |
+
def _create_error_result(self, error: str) -> StrategyResult:
|
238 |
+
"""Create an error result."""
|
239 |
+
return StrategyResult(
|
240 |
+
strategy_type="groq",
|
241 |
+
success=False,
|
242 |
+
answer=None,
|
243 |
+
confidence=0.0,
|
244 |
+
reasoning_trace=[{
|
245 |
+
"step": "groq_api_error",
|
246 |
+
"error": error,
|
247 |
+
"timestamp": datetime.now().isoformat()
|
248 |
+
}],
|
249 |
+
metadata={"error": error},
|
250 |
+
performance_metrics={}
|
251 |
+
)
|
252 |
+
|
253 |
+
def _generate_cache_key(
|
254 |
+
self,
|
255 |
+
query: str,
|
256 |
+
context: Dict[str, Any],
|
257 |
+
model: str
|
258 |
+
) -> str:
|
259 |
+
"""Generate a cache key."""
|
260 |
+
key_data = {
|
261 |
+
"query": query,
|
262 |
+
"context": context,
|
263 |
+
"model": model
|
264 |
+
}
|
265 |
+
return json.dumps(key_data, sort_keys=True)
|
266 |
+
|
267 |
+
def _get_from_cache(self, cache_key: str) -> Optional[Dict]:
|
268 |
+
"""Get response from cache if valid."""
|
269 |
+
if cache_key in self.cache:
|
270 |
+
cached = self.cache[cache_key]
|
271 |
+
age = (datetime.now() - cached["timestamp"]).total_seconds()
|
272 |
+
if age < self.cache_ttl:
|
273 |
+
return cached
|
274 |
+
else:
|
275 |
+
del self.cache[cache_key]
|
276 |
+
return None
|
277 |
+
|
278 |
+
def _calculate_confidence(self, response: Any) -> float:
|
279 |
+
"""Calculate confidence score from response."""
|
280 |
+
confidence = 0.8 # Base confidence
|
281 |
+
|
282 |
+
# Adjust based on token usage and model behavior
|
283 |
+
if hasattr(response, 'usage'):
|
284 |
+
completion_tokens = response.usage.completion_tokens
|
285 |
+
total_tokens = response.usage.total_tokens
|
286 |
+
|
287 |
+
# Length-based adjustment
|
288 |
+
if completion_tokens < 10:
|
289 |
+
confidence *= 0.8 # Reduce confidence for very short responses
|
290 |
+
elif completion_tokens > 100:
|
291 |
+
confidence *= 1.1 # Increase confidence for detailed responses
|
292 |
+
|
293 |
+
# Token efficiency adjustment
|
294 |
+
token_efficiency = completion_tokens / total_tokens
|
295 |
+
if token_efficiency > 0.5:
|
296 |
+
confidence *= 1.1 # Good token efficiency
|
297 |
+
|
298 |
+
# Response completeness check
|
299 |
+
if hasattr(response.choices[0], 'finish_reason'):
|
300 |
+
if response.choices[0].finish_reason == "stop":
|
301 |
+
confidence *= 1.1 # Natural completion
|
302 |
+
elif response.choices[0].finish_reason == "length":
|
303 |
+
confidence *= 0.9 # Truncated response
|
304 |
+
|
305 |
+
return min(1.0, max(0.0, confidence)) # Ensure between 0 and 1
|
306 |
+
|
307 |
+
def _prepare_messages(
|
308 |
+
self,
|
309 |
+
query: str,
|
310 |
+
context: Dict[str, Any]
|
311 |
+
) -> List[Dict[str, str]]:
|
312 |
+
"""Prepare messages for the Groq API."""
|
313 |
+
messages = []
|
314 |
+
|
315 |
+
# Add system message if provided
|
316 |
+
if "system_message" in context:
|
317 |
+
messages.append({
|
318 |
+
"role": "system",
|
319 |
+
"content": context["system_message"]
|
320 |
+
})
|
321 |
+
|
322 |
+
# Add chat history if provided
|
323 |
+
if "chat_history" in context:
|
324 |
+
messages.extend(context["chat_history"])
|
325 |
+
|
326 |
+
# Add the current query
|
327 |
+
messages.append({
|
328 |
+
"role": "user",
|
329 |
+
"content": query
|
330 |
+
})
|
331 |
+
|
332 |
+
return messages
|
reasoning/learning.py
ADDED
@@ -0,0 +1,394 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Enhanced learning mechanisms for reasoning strategies."""
|
2 |
+
|
3 |
+
import logging
|
4 |
+
from typing import Dict, Any, List, Optional, Set, Union, Type, Tuple
|
5 |
+
import json
|
6 |
+
from dataclasses import dataclass, field
|
7 |
+
from enum import Enum
|
8 |
+
from datetime import datetime
|
9 |
+
import numpy as np
|
10 |
+
from collections import defaultdict
|
11 |
+
|
12 |
+
@dataclass
|
13 |
+
class LearningEvent:
|
14 |
+
"""Event for strategy learning."""
|
15 |
+
strategy_type: str
|
16 |
+
event_type: str
|
17 |
+
data: Dict[str, Any]
|
18 |
+
outcome: Optional[float]
|
19 |
+
timestamp: datetime = field(default_factory=datetime.now)
|
20 |
+
|
21 |
+
class LearningMode(Enum):
|
22 |
+
"""Types of learning modes."""
|
23 |
+
SUPERVISED = "supervised"
|
24 |
+
REINFORCEMENT = "reinforcement"
|
25 |
+
ACTIVE = "active"
|
26 |
+
TRANSFER = "transfer"
|
27 |
+
META = "meta"
|
28 |
+
ENSEMBLE = "ensemble"
|
29 |
+
|
30 |
+
@dataclass
|
31 |
+
class LearningState:
|
32 |
+
"""State for learning process."""
|
33 |
+
mode: LearningMode
|
34 |
+
parameters: Dict[str, Any]
|
35 |
+
history: List[LearningEvent]
|
36 |
+
metrics: Dict[str, float]
|
37 |
+
metadata: Dict[str, Any] = field(default_factory=dict)
|
38 |
+
|
39 |
+
class EnhancedLearningManager:
|
40 |
+
"""
|
41 |
+
Advanced learning manager that:
|
42 |
+
1. Implements multiple learning modes
|
43 |
+
2. Tracks learning progress
|
44 |
+
3. Adapts learning parameters
|
45 |
+
4. Optimizes strategy performance
|
46 |
+
5. Transfers knowledge between strategies
|
47 |
+
"""
|
48 |
+
|
49 |
+
def __init__(self,
|
50 |
+
learning_rate: float = 0.1,
|
51 |
+
exploration_rate: float = 0.2,
|
52 |
+
memory_size: int = 1000):
|
53 |
+
self.learning_rate = learning_rate
|
54 |
+
self.exploration_rate = exploration_rate
|
55 |
+
self.memory_size = memory_size
|
56 |
+
|
57 |
+
# Learning states
|
58 |
+
self.states: Dict[str, LearningState] = {}
|
59 |
+
|
60 |
+
# Performance tracking
|
61 |
+
self.performance_history: List[Dict[str, Any]] = []
|
62 |
+
self.strategy_metrics: Dict[str, List[float]] = defaultdict(list)
|
63 |
+
|
64 |
+
# Knowledge transfer
|
65 |
+
self.knowledge_base: Dict[str, Any] = {}
|
66 |
+
self.transfer_history: List[Dict[str, Any]] = []
|
67 |
+
|
68 |
+
async def learn(self,
|
69 |
+
strategy_type: str,
|
70 |
+
event: LearningEvent,
|
71 |
+
context: Dict[str, Any]) -> Dict[str, Any]:
|
72 |
+
"""Learn from strategy execution event."""
|
73 |
+
try:
|
74 |
+
# Initialize or get learning state
|
75 |
+
state = self._get_learning_state(strategy_type)
|
76 |
+
|
77 |
+
# Select learning mode
|
78 |
+
mode = await self._select_learning_mode(event, state, context)
|
79 |
+
|
80 |
+
# Execute learning
|
81 |
+
if mode == LearningMode.SUPERVISED:
|
82 |
+
result = await self._supervised_learning(event, state, context)
|
83 |
+
elif mode == LearningMode.REINFORCEMENT:
|
84 |
+
result = await self._reinforcement_learning(event, state, context)
|
85 |
+
elif mode == LearningMode.ACTIVE:
|
86 |
+
result = await self._active_learning(event, state, context)
|
87 |
+
elif mode == LearningMode.TRANSFER:
|
88 |
+
result = await self._transfer_learning(event, state, context)
|
89 |
+
elif mode == LearningMode.META:
|
90 |
+
result = await self._meta_learning(event, state, context)
|
91 |
+
elif mode == LearningMode.ENSEMBLE:
|
92 |
+
result = await self._ensemble_learning(event, state, context)
|
93 |
+
else:
|
94 |
+
raise ValueError(f"Unsupported learning mode: {mode}")
|
95 |
+
|
96 |
+
# Update state
|
97 |
+
self._update_learning_state(state, result)
|
98 |
+
|
99 |
+
# Record performance
|
100 |
+
self._record_performance(strategy_type, result)
|
101 |
+
|
102 |
+
return result
|
103 |
+
|
104 |
+
except Exception as e:
|
105 |
+
logging.error(f"Error in learning: {str(e)}")
|
106 |
+
return {
|
107 |
+
"success": False,
|
108 |
+
"error": str(e),
|
109 |
+
"mode": mode.value if 'mode' in locals() else None
|
110 |
+
}
|
111 |
+
|
112 |
+
async def _supervised_learning(self,
|
113 |
+
event: LearningEvent,
|
114 |
+
state: LearningState,
|
115 |
+
context: Dict[str, Any]) -> Dict[str, Any]:
|
116 |
+
"""Implement supervised learning."""
|
117 |
+
# Extract features and labels
|
118 |
+
features = await self._extract_features(event.data, context)
|
119 |
+
labels = event.outcome if event.outcome is not None else 0.0
|
120 |
+
|
121 |
+
# Train model
|
122 |
+
model_update = await self._update_model(features, labels, state, context)
|
123 |
+
|
124 |
+
# Validate performance
|
125 |
+
validation = await self._validate_model(model_update, state, context)
|
126 |
+
|
127 |
+
return {
|
128 |
+
"success": True,
|
129 |
+
"mode": LearningMode.SUPERVISED.value,
|
130 |
+
"model_update": model_update,
|
131 |
+
"validation": validation,
|
132 |
+
"metrics": {
|
133 |
+
"accuracy": validation.get("accuracy", 0.0),
|
134 |
+
"loss": validation.get("loss", 0.0)
|
135 |
+
}
|
136 |
+
}
|
137 |
+
|
138 |
+
async def _reinforcement_learning(self,
|
139 |
+
event: LearningEvent,
|
140 |
+
state: LearningState,
|
141 |
+
context: Dict[str, Any]) -> Dict[str, Any]:
|
142 |
+
"""Implement reinforcement learning."""
|
143 |
+
# Extract state and action
|
144 |
+
current_state = await self._extract_state(event.data, context)
|
145 |
+
action = event.data.get("action")
|
146 |
+
reward = event.outcome if event.outcome is not None else 0.0
|
147 |
+
|
148 |
+
# Update policy
|
149 |
+
policy_update = await self._update_policy(
|
150 |
+
current_state, action, reward, state, context)
|
151 |
+
|
152 |
+
# Optimize value function
|
153 |
+
value_update = await self._update_value_function(
|
154 |
+
current_state, reward, state, context)
|
155 |
+
|
156 |
+
return {
|
157 |
+
"success": True,
|
158 |
+
"mode": LearningMode.REINFORCEMENT.value,
|
159 |
+
"policy_update": policy_update,
|
160 |
+
"value_update": value_update,
|
161 |
+
"metrics": {
|
162 |
+
"reward": reward,
|
163 |
+
"value_error": value_update.get("error", 0.0)
|
164 |
+
}
|
165 |
+
}
|
166 |
+
|
167 |
+
async def _active_learning(self,
|
168 |
+
event: LearningEvent,
|
169 |
+
state: LearningState,
|
170 |
+
context: Dict[str, Any]) -> Dict[str, Any]:
|
171 |
+
"""Implement active learning."""
|
172 |
+
# Query selection
|
173 |
+
query = await self._select_query(event.data, state, context)
|
174 |
+
|
175 |
+
# Get feedback
|
176 |
+
feedback = await self._get_feedback(query, context)
|
177 |
+
|
178 |
+
# Update model
|
179 |
+
model_update = await self._update_model_active(
|
180 |
+
query, feedback, state, context)
|
181 |
+
|
182 |
+
return {
|
183 |
+
"success": True,
|
184 |
+
"mode": LearningMode.ACTIVE.value,
|
185 |
+
"query": query,
|
186 |
+
"feedback": feedback,
|
187 |
+
"model_update": model_update,
|
188 |
+
"metrics": {
|
189 |
+
"uncertainty": query.get("uncertainty", 0.0),
|
190 |
+
"feedback_quality": feedback.get("quality", 0.0)
|
191 |
+
}
|
192 |
+
}
|
193 |
+
|
194 |
+
async def _transfer_learning(self,
|
195 |
+
event: LearningEvent,
|
196 |
+
state: LearningState,
|
197 |
+
context: Dict[str, Any]) -> Dict[str, Any]:
|
198 |
+
"""Implement transfer learning."""
|
199 |
+
# Source task selection
|
200 |
+
source_task = await self._select_source_task(event.data, state, context)
|
201 |
+
|
202 |
+
# Knowledge extraction
|
203 |
+
knowledge = await self._extract_knowledge(source_task, context)
|
204 |
+
|
205 |
+
# Transfer adaptation
|
206 |
+
adaptation = await self._adapt_knowledge(
|
207 |
+
knowledge, event.data, state, context)
|
208 |
+
|
209 |
+
# Apply transfer
|
210 |
+
transfer = await self._apply_transfer(adaptation, state, context)
|
211 |
+
|
212 |
+
return {
|
213 |
+
"success": True,
|
214 |
+
"mode": LearningMode.TRANSFER.value,
|
215 |
+
"source_task": source_task,
|
216 |
+
"knowledge": knowledge,
|
217 |
+
"adaptation": adaptation,
|
218 |
+
"transfer": transfer,
|
219 |
+
"metrics": {
|
220 |
+
"transfer_efficiency": transfer.get("efficiency", 0.0),
|
221 |
+
"adaptation_quality": adaptation.get("quality", 0.0)
|
222 |
+
}
|
223 |
+
}
|
224 |
+
|
225 |
+
async def _meta_learning(self,
|
226 |
+
event: LearningEvent,
|
227 |
+
state: LearningState,
|
228 |
+
context: Dict[str, Any]) -> Dict[str, Any]:
|
229 |
+
"""Implement meta-learning."""
|
230 |
+
# Task characterization
|
231 |
+
task_char = await self._characterize_task(event.data, context)
|
232 |
+
|
233 |
+
# Strategy selection
|
234 |
+
strategy = await self._select_strategy(task_char, state, context)
|
235 |
+
|
236 |
+
# Parameter optimization
|
237 |
+
optimization = await self._optimize_parameters(
|
238 |
+
strategy, task_char, state, context)
|
239 |
+
|
240 |
+
# Apply meta-learning
|
241 |
+
meta_update = await self._apply_meta_learning(
|
242 |
+
optimization, state, context)
|
243 |
+
|
244 |
+
return {
|
245 |
+
"success": True,
|
246 |
+
"mode": LearningMode.META.value,
|
247 |
+
"task_characterization": task_char,
|
248 |
+
"strategy": strategy,
|
249 |
+
"optimization": optimization,
|
250 |
+
"meta_update": meta_update,
|
251 |
+
"metrics": {
|
252 |
+
"strategy_fit": strategy.get("fit_score", 0.0),
|
253 |
+
"optimization_improvement": optimization.get("improvement", 0.0)
|
254 |
+
}
|
255 |
+
}
|
256 |
+
|
257 |
+
async def _ensemble_learning(self,
|
258 |
+
event: LearningEvent,
|
259 |
+
state: LearningState,
|
260 |
+
context: Dict[str, Any]) -> Dict[str, Any]:
|
261 |
+
"""Implement ensemble learning."""
|
262 |
+
# Member selection
|
263 |
+
members = await self._select_members(event.data, state, context)
|
264 |
+
|
265 |
+
# Weight optimization
|
266 |
+
weights = await self._optimize_weights(members, state, context)
|
267 |
+
|
268 |
+
# Combine predictions
|
269 |
+
combination = await self._combine_predictions(
|
270 |
+
members, weights, event.data, context)
|
271 |
+
|
272 |
+
return {
|
273 |
+
"success": True,
|
274 |
+
"mode": LearningMode.ENSEMBLE.value,
|
275 |
+
"members": members,
|
276 |
+
"weights": weights,
|
277 |
+
"combination": combination,
|
278 |
+
"metrics": {
|
279 |
+
"ensemble_diversity": weights.get("diversity", 0.0),
|
280 |
+
"combination_strength": combination.get("strength", 0.0)
|
281 |
+
}
|
282 |
+
}
|
283 |
+
|
284 |
+
def _get_learning_state(self, strategy_type: str) -> LearningState:
|
285 |
+
"""Get or initialize learning state for strategy."""
|
286 |
+
if strategy_type not in self.states:
|
287 |
+
self.states[strategy_type] = LearningState(
|
288 |
+
mode=LearningMode.SUPERVISED,
|
289 |
+
parameters={
|
290 |
+
"learning_rate": self.learning_rate,
|
291 |
+
"exploration_rate": self.exploration_rate
|
292 |
+
},
|
293 |
+
history=[],
|
294 |
+
metrics={}
|
295 |
+
)
|
296 |
+
return self.states[strategy_type]
|
297 |
+
|
298 |
+
def _update_learning_state(self, state: LearningState, result: Dict[str, Any]):
|
299 |
+
"""Update learning state with result."""
|
300 |
+
# Update history
|
301 |
+
state.history.append(LearningEvent(
|
302 |
+
strategy_type=result.get("strategy_type", "unknown"),
|
303 |
+
event_type="learning_update",
|
304 |
+
data=result,
|
305 |
+
outcome=result.get("metrics", {}).get("accuracy", 0.0),
|
306 |
+
timestamp=datetime.now()
|
307 |
+
))
|
308 |
+
|
309 |
+
# Update metrics
|
310 |
+
for metric, value in result.get("metrics", {}).items():
|
311 |
+
if metric in state.metrics:
|
312 |
+
state.metrics[metric] = (
|
313 |
+
0.9 * state.metrics[metric] + 0.1 * value # Exponential moving average
|
314 |
+
)
|
315 |
+
else:
|
316 |
+
state.metrics[metric] = value
|
317 |
+
|
318 |
+
# Adapt parameters
|
319 |
+
self._adapt_parameters(state, result)
|
320 |
+
|
321 |
+
def _record_performance(self, strategy_type: str, result: Dict[str, Any]):
|
322 |
+
"""Record learning performance."""
|
323 |
+
self.performance_history.append({
|
324 |
+
"timestamp": datetime.now().isoformat(),
|
325 |
+
"strategy_type": strategy_type,
|
326 |
+
"mode": result.get("mode"),
|
327 |
+
"metrics": result.get("metrics", {}),
|
328 |
+
"success": result.get("success", False)
|
329 |
+
})
|
330 |
+
|
331 |
+
# Update strategy metrics
|
332 |
+
for metric, value in result.get("metrics", {}).items():
|
333 |
+
self.strategy_metrics[f"{strategy_type}_{metric}"].append(value)
|
334 |
+
|
335 |
+
# Maintain memory size
|
336 |
+
if len(self.performance_history) > self.memory_size:
|
337 |
+
self.performance_history = self.performance_history[-self.memory_size:]
|
338 |
+
|
339 |
+
def _adapt_parameters(self, state: LearningState, result: Dict[str, Any]):
|
340 |
+
"""Adapt learning parameters based on performance."""
|
341 |
+
# Adapt learning rate
|
342 |
+
if "accuracy" in result.get("metrics", {}):
|
343 |
+
accuracy = result["metrics"]["accuracy"]
|
344 |
+
if accuracy > 0.8:
|
345 |
+
state.parameters["learning_rate"] *= 0.95 # Decrease if performing well
|
346 |
+
elif accuracy < 0.6:
|
347 |
+
state.parameters["learning_rate"] *= 1.05 # Increase if performing poorly
|
348 |
+
|
349 |
+
# Adapt exploration rate
|
350 |
+
if "reward" in result.get("metrics", {}):
|
351 |
+
reward = result["metrics"]["reward"]
|
352 |
+
if reward > 0:
|
353 |
+
state.parameters["exploration_rate"] *= 0.95 # Decrease if getting rewards
|
354 |
+
else:
|
355 |
+
state.parameters["exploration_rate"] *= 1.05 # Increase if not getting rewards
|
356 |
+
|
357 |
+
# Clip parameters to reasonable ranges
|
358 |
+
state.parameters["learning_rate"] = np.clip(
|
359 |
+
state.parameters["learning_rate"], 0.001, 0.5)
|
360 |
+
state.parameters["exploration_rate"] = np.clip(
|
361 |
+
state.parameters["exploration_rate"], 0.01, 0.5)
|
362 |
+
|
363 |
+
def get_performance_metrics(self) -> Dict[str, Any]:
|
364 |
+
"""Get comprehensive performance metrics."""
|
365 |
+
return {
|
366 |
+
"learning_states": {
|
367 |
+
strategy_type: {
|
368 |
+
"mode": state.mode.value,
|
369 |
+
"parameters": state.parameters,
|
370 |
+
"metrics": state.metrics
|
371 |
+
}
|
372 |
+
for strategy_type, state in self.states.items()
|
373 |
+
},
|
374 |
+
"strategy_performance": {
|
375 |
+
metric: {
|
376 |
+
"mean": np.mean(values) if values else 0.0,
|
377 |
+
"std": np.std(values) if values else 0.0,
|
378 |
+
"min": min(values) if values else 0.0,
|
379 |
+
"max": max(values) if values else 0.0
|
380 |
+
}
|
381 |
+
for metric, values in self.strategy_metrics.items()
|
382 |
+
},
|
383 |
+
"transfer_metrics": {
|
384 |
+
"total_transfers": len(self.transfer_history),
|
385 |
+
"success_rate": sum(1 for t in self.transfer_history if t.get("success", False)) / len(self.transfer_history) if self.transfer_history else 0
|
386 |
+
}
|
387 |
+
}
|
388 |
+
|
389 |
+
def clear_history(self):
|
390 |
+
"""Clear learning history and reset states."""
|
391 |
+
self.states.clear()
|
392 |
+
self.performance_history.clear()
|
393 |
+
self.strategy_metrics.clear()
|
394 |
+
self.transfer_history.clear()
|
reasoning/local_llm.py
ADDED
@@ -0,0 +1,117 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Local LLM integration for the reasoning system."""
|
2 |
+
|
3 |
+
import os
|
4 |
+
from typing import Dict, Any, Optional
|
5 |
+
from datetime import datetime
|
6 |
+
import logging
|
7 |
+
from llama_cpp import Llama
|
8 |
+
import huggingface_hub
|
9 |
+
from .base import ReasoningStrategy
|
10 |
+
from .model_manager import ModelManager, ModelType
|
11 |
+
|
12 |
+
class LocalLLMStrategy(ReasoningStrategy):
|
13 |
+
"""Implements reasoning using local LLM."""
|
14 |
+
|
15 |
+
def __init__(self, config: Optional[Dict[str, Any]] = None):
|
16 |
+
"""Initialize the local LLM strategy."""
|
17 |
+
super().__init__()
|
18 |
+
self.config = config or {}
|
19 |
+
|
20 |
+
# Initialize model manager
|
21 |
+
self.model_manager = ModelManager(self.config.get('model_dir', "models"))
|
22 |
+
|
23 |
+
# Standard reasoning parameters
|
24 |
+
self.min_confidence = self.config.get('min_confidence', 0.7)
|
25 |
+
self.parallel_threshold = self.config.get('parallel_threshold', 3)
|
26 |
+
self.learning_rate = self.config.get('learning_rate', 0.1)
|
27 |
+
self.strategy_weights = self.config.get('strategy_weights', {
|
28 |
+
"LOCAL_LLM": 0.8,
|
29 |
+
"CHAIN_OF_THOUGHT": 0.6,
|
30 |
+
"TREE_OF_THOUGHTS": 0.5,
|
31 |
+
"META_LEARNING": 0.4
|
32 |
+
})
|
33 |
+
|
34 |
+
self.logger = logging.getLogger(__name__)
|
35 |
+
|
36 |
+
async def initialize(self):
|
37 |
+
"""Initialize all models."""
|
38 |
+
await self.model_manager.initialize_all_models()
|
39 |
+
|
40 |
+
async def reason(self, query: str, context: Dict[str, Any]) -> Dict[str, Any]:
|
41 |
+
"""Generate reasoning response using appropriate local LLM."""
|
42 |
+
try:
|
43 |
+
# Determine best model for the task
|
44 |
+
task_type = context.get('task_type', 'general')
|
45 |
+
model_key = self.model_manager.get_best_model_for_task(task_type)
|
46 |
+
|
47 |
+
# Get or initialize the model
|
48 |
+
model = await self.model_manager.get_model(model_key)
|
49 |
+
if not model:
|
50 |
+
raise Exception(f"Failed to initialize {model_key} model")
|
51 |
+
|
52 |
+
# Format prompt with context
|
53 |
+
prompt = self._format_prompt(query, context)
|
54 |
+
|
55 |
+
# Generate response
|
56 |
+
response = model(
|
57 |
+
prompt,
|
58 |
+
max_tokens=1024 if model.n_ctx >= 4096 else 512,
|
59 |
+
temperature=0.7,
|
60 |
+
top_p=0.95,
|
61 |
+
repeat_penalty=1.1,
|
62 |
+
echo=False
|
63 |
+
)
|
64 |
+
|
65 |
+
# Extract and structure the response
|
66 |
+
result = self._parse_response(response['choices'][0]['text'])
|
67 |
+
|
68 |
+
return {
|
69 |
+
'success': True,
|
70 |
+
'answer': result['answer'],
|
71 |
+
'reasoning': result['reasoning'],
|
72 |
+
'confidence': result['confidence'],
|
73 |
+
'timestamp': datetime.now(),
|
74 |
+
'metadata': {
|
75 |
+
'model': model_key,
|
76 |
+
'strategy': 'local_llm',
|
77 |
+
'context_length': len(prompt),
|
78 |
+
'response_length': len(response['choices'][0]['text'])
|
79 |
+
}
|
80 |
+
}
|
81 |
+
|
82 |
+
except Exception as e:
|
83 |
+
self.logger.error(f"Error in reasoning: {e}")
|
84 |
+
return {
|
85 |
+
'success': False,
|
86 |
+
'error': str(e),
|
87 |
+
'timestamp': datetime.now()
|
88 |
+
}
|
89 |
+
|
90 |
+
def _format_prompt(self, query: str, context: Dict[str, Any]) -> str:
|
91 |
+
"""Format the prompt with query and context."""
|
92 |
+
# Include relevant context
|
93 |
+
context_str = "\n".join([
|
94 |
+
f"{k}: {v}" for k, v in context.items()
|
95 |
+
if k in ['objective', 'constraints', 'background']
|
96 |
+
])
|
97 |
+
|
98 |
+
return f"""Let's solve this problem step by step.
|
99 |
+
|
100 |
+
Context:
|
101 |
+
{context_str}
|
102 |
+
|
103 |
+
Question: {query}
|
104 |
+
|
105 |
+
Let me break this down:
|
106 |
+
1."""
|
107 |
+
|
108 |
+
def _parse_response(self, text: str) -> Dict[str, Any]:
|
109 |
+
"""Parse the response into structured output."""
|
110 |
+
# Simple parsing for now
|
111 |
+
lines = text.strip().split('\n')
|
112 |
+
|
113 |
+
return {
|
114 |
+
'answer': lines[-1] if lines else '',
|
115 |
+
'reasoning': '\n'.join(lines[:-1]) if len(lines) > 1 else '',
|
116 |
+
'confidence': 0.8 # Default confidence
|
117 |
+
}
|
reasoning/market_analysis.py
ADDED
@@ -0,0 +1,450 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Advanced market analysis tools for venture strategies."""
|
2 |
+
|
3 |
+
import logging
|
4 |
+
from typing import Dict, Any, List, Optional, Set, Union, Type, Tuple
|
5 |
+
import json
|
6 |
+
from dataclasses import dataclass, field
|
7 |
+
from enum import Enum
|
8 |
+
from datetime import datetime
|
9 |
+
import numpy as np
|
10 |
+
from collections import defaultdict
|
11 |
+
|
12 |
+
from .base import ReasoningStrategy
|
13 |
+
|
14 |
+
@dataclass
|
15 |
+
class MarketSegment:
|
16 |
+
"""Market segment analysis."""
|
17 |
+
size: float
|
18 |
+
growth_rate: float
|
19 |
+
cagr: float
|
20 |
+
competition: List[Dict[str, Any]]
|
21 |
+
barriers: List[str]
|
22 |
+
opportunities: List[str]
|
23 |
+
risks: List[str]
|
24 |
+
|
25 |
+
@dataclass
|
26 |
+
class CompetitorAnalysis:
|
27 |
+
"""Competitor analysis."""
|
28 |
+
name: str
|
29 |
+
market_share: float
|
30 |
+
strengths: List[str]
|
31 |
+
weaknesses: List[str]
|
32 |
+
strategy: str
|
33 |
+
revenue: Optional[float]
|
34 |
+
valuation: Optional[float]
|
35 |
+
|
36 |
+
@dataclass
|
37 |
+
class MarketTrend:
|
38 |
+
"""Market trend analysis."""
|
39 |
+
name: str
|
40 |
+
impact: float
|
41 |
+
timeline: str
|
42 |
+
adoption_rate: float
|
43 |
+
market_potential: float
|
44 |
+
risk_level: float
|
45 |
+
|
46 |
+
class MarketAnalyzer:
|
47 |
+
"""
|
48 |
+
Advanced market analysis toolkit that:
|
49 |
+
1. Analyzes market segments
|
50 |
+
2. Tracks competitors
|
51 |
+
3. Identifies trends
|
52 |
+
4. Predicts opportunities
|
53 |
+
5. Assesses risks
|
54 |
+
"""
|
55 |
+
|
56 |
+
def __init__(self):
|
57 |
+
self.segments: Dict[str, MarketSegment] = {}
|
58 |
+
self.competitors: Dict[str, CompetitorAnalysis] = {}
|
59 |
+
self.trends: List[MarketTrend] = []
|
60 |
+
|
61 |
+
async def analyze_market(self,
|
62 |
+
segment: str,
|
63 |
+
context: Dict[str, Any]) -> Dict[str, Any]:
|
64 |
+
"""Perform comprehensive market analysis."""
|
65 |
+
try:
|
66 |
+
# Segment analysis
|
67 |
+
segment_analysis = await self._analyze_segment(segment, context)
|
68 |
+
|
69 |
+
# Competitor analysis
|
70 |
+
competitor_analysis = await self._analyze_competitors(segment, context)
|
71 |
+
|
72 |
+
# Trend analysis
|
73 |
+
trend_analysis = await self._analyze_trends(segment, context)
|
74 |
+
|
75 |
+
# Opportunity analysis
|
76 |
+
opportunity_analysis = await self._analyze_opportunities(
|
77 |
+
segment_analysis, competitor_analysis, trend_analysis, context)
|
78 |
+
|
79 |
+
# Risk analysis
|
80 |
+
risk_analysis = await self._analyze_risks(
|
81 |
+
segment_analysis, competitor_analysis, trend_analysis, context)
|
82 |
+
|
83 |
+
return {
|
84 |
+
"success": True,
|
85 |
+
"segment_analysis": segment_analysis,
|
86 |
+
"competitor_analysis": competitor_analysis,
|
87 |
+
"trend_analysis": trend_analysis,
|
88 |
+
"opportunity_analysis": opportunity_analysis,
|
89 |
+
"risk_analysis": risk_analysis,
|
90 |
+
"metrics": {
|
91 |
+
"market_score": self._calculate_market_score(segment_analysis),
|
92 |
+
"opportunity_score": self._calculate_opportunity_score(opportunity_analysis),
|
93 |
+
"risk_score": self._calculate_risk_score(risk_analysis)
|
94 |
+
}
|
95 |
+
}
|
96 |
+
except Exception as e:
|
97 |
+
logging.error(f"Error in market analysis: {str(e)}")
|
98 |
+
return {"success": False, "error": str(e)}
|
99 |
+
|
100 |
+
async def _analyze_segment(self,
|
101 |
+
segment: str,
|
102 |
+
context: Dict[str, Any]) -> Dict[str, Any]:
|
103 |
+
"""Analyze market segment."""
|
104 |
+
prompt = f"""
|
105 |
+
Analyze market segment:
|
106 |
+
Segment: {segment}
|
107 |
+
Context: {json.dumps(context)}
|
108 |
+
|
109 |
+
Analyze:
|
110 |
+
1. Market size and growth
|
111 |
+
2. Customer segments
|
112 |
+
3. Value chain
|
113 |
+
4. Entry barriers
|
114 |
+
5. Competitive dynamics
|
115 |
+
|
116 |
+
Format as:
|
117 |
+
[Analysis]
|
118 |
+
Size: ...
|
119 |
+
Growth: ...
|
120 |
+
Segments: ...
|
121 |
+
Value_Chain: ...
|
122 |
+
Barriers: ...
|
123 |
+
"""
|
124 |
+
|
125 |
+
response = await context["groq_api"].predict(prompt)
|
126 |
+
return self._parse_segment_analysis(response["answer"])
|
127 |
+
|
128 |
+
async def _analyze_competitors(self,
|
129 |
+
segment: str,
|
130 |
+
context: Dict[str, Any]) -> Dict[str, Any]:
|
131 |
+
"""Analyze competitors in segment."""
|
132 |
+
prompt = f"""
|
133 |
+
Analyze competitors:
|
134 |
+
Segment: {segment}
|
135 |
+
Context: {json.dumps(context)}
|
136 |
+
|
137 |
+
For each competitor analyze:
|
138 |
+
1. Market share
|
139 |
+
2. Business model
|
140 |
+
3. Strengths/weaknesses
|
141 |
+
4. Strategy
|
142 |
+
5. Performance metrics
|
143 |
+
|
144 |
+
Format as:
|
145 |
+
[Competitor1]
|
146 |
+
Share: ...
|
147 |
+
Model: ...
|
148 |
+
Strengths: ...
|
149 |
+
Weaknesses: ...
|
150 |
+
Strategy: ...
|
151 |
+
Metrics: ...
|
152 |
+
"""
|
153 |
+
|
154 |
+
response = await context["groq_api"].predict(prompt)
|
155 |
+
return self._parse_competitor_analysis(response["answer"])
|
156 |
+
|
157 |
+
async def _analyze_trends(self,
|
158 |
+
segment: str,
|
159 |
+
context: Dict[str, Any]) -> Dict[str, Any]:
|
160 |
+
"""Analyze market trends."""
|
161 |
+
prompt = f"""
|
162 |
+
Analyze market trends:
|
163 |
+
Segment: {segment}
|
164 |
+
Context: {json.dumps(context)}
|
165 |
+
|
166 |
+
Analyze trends in:
|
167 |
+
1. Technology
|
168 |
+
2. Customer behavior
|
169 |
+
3. Business models
|
170 |
+
4. Regulation
|
171 |
+
5. Market dynamics
|
172 |
+
|
173 |
+
Format as:
|
174 |
+
[Trend1]
|
175 |
+
Type: ...
|
176 |
+
Impact: ...
|
177 |
+
Timeline: ...
|
178 |
+
Adoption: ...
|
179 |
+
Potential: ...
|
180 |
+
"""
|
181 |
+
|
182 |
+
response = await context["groq_api"].predict(prompt)
|
183 |
+
return self._parse_trend_analysis(response["answer"])
|
184 |
+
|
185 |
+
async def _analyze_opportunities(self,
|
186 |
+
segment_analysis: Dict[str, Any],
|
187 |
+
competitor_analysis: Dict[str, Any],
|
188 |
+
trend_analysis: Dict[str, Any],
|
189 |
+
context: Dict[str, Any]) -> Dict[str, Any]:
|
190 |
+
"""Analyze market opportunities."""
|
191 |
+
prompt = f"""
|
192 |
+
Analyze market opportunities:
|
193 |
+
Segment: {json.dumps(segment_analysis)}
|
194 |
+
Competitors: {json.dumps(competitor_analysis)}
|
195 |
+
Trends: {json.dumps(trend_analysis)}
|
196 |
+
Context: {json.dumps(context)}
|
197 |
+
|
198 |
+
Identify opportunities in:
|
199 |
+
1. Unmet needs
|
200 |
+
2. Market gaps
|
201 |
+
3. Innovation potential
|
202 |
+
4. Scaling potential
|
203 |
+
5. Value creation
|
204 |
+
|
205 |
+
Format as:
|
206 |
+
[Opportunity1]
|
207 |
+
Type: ...
|
208 |
+
Description: ...
|
209 |
+
Potential: ...
|
210 |
+
Requirements: ...
|
211 |
+
Timeline: ...
|
212 |
+
"""
|
213 |
+
|
214 |
+
response = await context["groq_api"].predict(prompt)
|
215 |
+
return self._parse_opportunity_analysis(response["answer"])
|
216 |
+
|
217 |
+
async def _analyze_risks(self,
|
218 |
+
segment_analysis: Dict[str, Any],
|
219 |
+
competitor_analysis: Dict[str, Any],
|
220 |
+
trend_analysis: Dict[str, Any],
|
221 |
+
context: Dict[str, Any]) -> Dict[str, Any]:
|
222 |
+
"""Analyze market risks."""
|
223 |
+
prompt = f"""
|
224 |
+
Analyze market risks:
|
225 |
+
Segment: {json.dumps(segment_analysis)}
|
226 |
+
Competitors: {json.dumps(competitor_analysis)}
|
227 |
+
Trends: {json.dumps(trend_analysis)}
|
228 |
+
Context: {json.dumps(context)}
|
229 |
+
|
230 |
+
Analyze risks in:
|
231 |
+
1. Market dynamics
|
232 |
+
2. Competition
|
233 |
+
3. Technology
|
234 |
+
4. Regulation
|
235 |
+
5. Execution
|
236 |
+
|
237 |
+
Format as:
|
238 |
+
[Risk1]
|
239 |
+
Type: ...
|
240 |
+
Description: ...
|
241 |
+
Impact: ...
|
242 |
+
Probability: ...
|
243 |
+
Mitigation: ...
|
244 |
+
"""
|
245 |
+
|
246 |
+
response = await context["groq_api"].predict(prompt)
|
247 |
+
return self._parse_risk_analysis(response["answer"])
|
248 |
+
|
249 |
+
def _calculate_market_score(self, analysis: Dict[str, Any]) -> float:
|
250 |
+
"""Calculate market attractiveness score."""
|
251 |
+
weights = {
|
252 |
+
"size": 0.3,
|
253 |
+
"growth": 0.3,
|
254 |
+
"competition": 0.2,
|
255 |
+
"barriers": 0.1,
|
256 |
+
"dynamics": 0.1
|
257 |
+
}
|
258 |
+
|
259 |
+
scores = {
|
260 |
+
"size": min(analysis.get("size", 0) / 1e9, 1.0), # Normalize to 1B
|
261 |
+
"growth": min(analysis.get("growth", 0) / 30, 1.0), # Normalize to 30%
|
262 |
+
"competition": 1.0 - min(len(analysis.get("competitors", [])) / 10, 1.0),
|
263 |
+
"barriers": 1.0 - min(len(analysis.get("barriers", [])) / 5, 1.0),
|
264 |
+
"dynamics": analysis.get("dynamics_score", 0.5)
|
265 |
+
}
|
266 |
+
|
267 |
+
return sum(weights[k] * scores[k] for k in weights)
|
268 |
+
|
269 |
+
def _calculate_opportunity_score(self, analysis: Dict[str, Any]) -> float:
|
270 |
+
"""Calculate opportunity attractiveness score."""
|
271 |
+
weights = {
|
272 |
+
"market_potential": 0.3,
|
273 |
+
"innovation_potential": 0.2,
|
274 |
+
"execution_feasibility": 0.2,
|
275 |
+
"competitive_advantage": 0.2,
|
276 |
+
"timing": 0.1
|
277 |
+
}
|
278 |
+
|
279 |
+
scores = {
|
280 |
+
"market_potential": analysis.get("market_potential", 0.5),
|
281 |
+
"innovation_potential": analysis.get("innovation_potential", 0.5),
|
282 |
+
"execution_feasibility": analysis.get("execution_feasibility", 0.5),
|
283 |
+
"competitive_advantage": analysis.get("competitive_advantage", 0.5),
|
284 |
+
"timing": analysis.get("timing_score", 0.5)
|
285 |
+
}
|
286 |
+
|
287 |
+
return sum(weights[k] * scores[k] for k in weights)
|
288 |
+
|
289 |
+
def _calculate_risk_score(self, analysis: Dict[str, Any]) -> float:
|
290 |
+
"""Calculate risk level score."""
|
291 |
+
weights = {
|
292 |
+
"market_risk": 0.2,
|
293 |
+
"competition_risk": 0.2,
|
294 |
+
"technology_risk": 0.2,
|
295 |
+
"regulatory_risk": 0.2,
|
296 |
+
"execution_risk": 0.2
|
297 |
+
}
|
298 |
+
|
299 |
+
scores = {
|
300 |
+
"market_risk": analysis.get("market_risk", 0.5),
|
301 |
+
"competition_risk": analysis.get("competition_risk", 0.5),
|
302 |
+
"technology_risk": analysis.get("technology_risk", 0.5),
|
303 |
+
"regulatory_risk": analysis.get("regulatory_risk", 0.5),
|
304 |
+
"execution_risk": analysis.get("execution_risk", 0.5)
|
305 |
+
}
|
306 |
+
|
307 |
+
return sum(weights[k] * scores[k] for k in weights)
|
308 |
+
|
309 |
+
def get_market_insights(self) -> Dict[str, Any]:
|
310 |
+
"""Get comprehensive market insights."""
|
311 |
+
return {
|
312 |
+
"segment_insights": {
|
313 |
+
segment: {
|
314 |
+
"size": s.size,
|
315 |
+
"growth_rate": s.growth_rate,
|
316 |
+
"cagr": s.cagr,
|
317 |
+
"opportunity_score": self._calculate_market_score({
|
318 |
+
"size": s.size,
|
319 |
+
"growth": s.growth_rate,
|
320 |
+
"competitors": s.competition,
|
321 |
+
"barriers": s.barriers
|
322 |
+
})
|
323 |
+
}
|
324 |
+
for segment, s in self.segments.items()
|
325 |
+
},
|
326 |
+
"competitor_insights": {
|
327 |
+
competitor: {
|
328 |
+
"market_share": c.market_share,
|
329 |
+
"strength_score": len(c.strengths) / (len(c.strengths) + len(c.weaknesses)),
|
330 |
+
"revenue": c.revenue,
|
331 |
+
"valuation": c.valuation
|
332 |
+
}
|
333 |
+
for competitor, c in self.competitors.items()
|
334 |
+
},
|
335 |
+
"trend_insights": [
|
336 |
+
{
|
337 |
+
"name": t.name,
|
338 |
+
"impact": t.impact,
|
339 |
+
"potential": t.market_potential,
|
340 |
+
"risk": t.risk_level
|
341 |
+
}
|
342 |
+
for t in self.trends
|
343 |
+
]
|
344 |
+
}
|
345 |
+
|
346 |
+
class MarketAnalysisStrategy(ReasoningStrategy):
|
347 |
+
"""
|
348 |
+
Advanced market analysis strategy that combines multiple analytical tools
|
349 |
+
to provide comprehensive market insights.
|
350 |
+
"""
|
351 |
+
|
352 |
+
def __init__(self, config: Optional[Dict[str, Any]] = None):
|
353 |
+
"""Initialize market analysis strategy."""
|
354 |
+
super().__init__()
|
355 |
+
self.config = config or {}
|
356 |
+
self.analyzer = MarketAnalyzer()
|
357 |
+
|
358 |
+
async def reason(self, query: str, context: Dict[str, Any]) -> Dict[str, Any]:
|
359 |
+
"""
|
360 |
+
Perform market analysis based on query and context.
|
361 |
+
|
362 |
+
Args:
|
363 |
+
query: The market analysis query
|
364 |
+
context: Additional context and parameters
|
365 |
+
|
366 |
+
Returns:
|
367 |
+
Dict containing market analysis results and confidence scores
|
368 |
+
"""
|
369 |
+
try:
|
370 |
+
# Extract market segment from query/context
|
371 |
+
segment = self._extract_segment(query, context)
|
372 |
+
|
373 |
+
# Perform market analysis
|
374 |
+
analysis = await self._analyze_market(segment, context)
|
375 |
+
|
376 |
+
# Get insights
|
377 |
+
insights = self.analyzer.get_market_insights()
|
378 |
+
|
379 |
+
# Calculate confidence based on data quality and completeness
|
380 |
+
confidence = self._calculate_confidence(analysis, insights)
|
381 |
+
|
382 |
+
return {
|
383 |
+
'answer': self._format_insights(insights),
|
384 |
+
'confidence': confidence,
|
385 |
+
'analysis': analysis,
|
386 |
+
'insights': insights,
|
387 |
+
'segment': segment
|
388 |
+
}
|
389 |
+
|
390 |
+
except Exception as e:
|
391 |
+
logging.error(f"Market analysis failed: {str(e)}")
|
392 |
+
return {
|
393 |
+
'error': f"Market analysis failed: {str(e)}",
|
394 |
+
'confidence': 0.0
|
395 |
+
}
|
396 |
+
|
397 |
+
def _extract_segment(self, query: str, context: Dict[str, Any]) -> str:
|
398 |
+
"""Extract market segment from query and context."""
|
399 |
+
# Use context if available
|
400 |
+
if 'segment' in context:
|
401 |
+
return context['segment']
|
402 |
+
|
403 |
+
# Default to general market
|
404 |
+
return 'general'
|
405 |
+
|
406 |
+
async def _analyze_market(self, segment: str, context: Dict[str, Any]) -> Dict[str, Any]:
|
407 |
+
"""Perform comprehensive market analysis."""
|
408 |
+
return await self.analyzer.analyze_market(segment, context)
|
409 |
+
|
410 |
+
def _calculate_confidence(self, analysis: Dict[str, Any], insights: Dict[str, Any]) -> float:
|
411 |
+
"""Calculate confidence score based on analysis quality."""
|
412 |
+
# Base confidence
|
413 |
+
confidence = 0.5
|
414 |
+
|
415 |
+
# Adjust based on data completeness
|
416 |
+
if analysis.get('segment_analysis'):
|
417 |
+
confidence += 0.1
|
418 |
+
if analysis.get('competitor_analysis'):
|
419 |
+
confidence += 0.1
|
420 |
+
if analysis.get('trend_analysis'):
|
421 |
+
confidence += 0.1
|
422 |
+
|
423 |
+
# Adjust based on insight quality
|
424 |
+
if insights.get('opportunities'):
|
425 |
+
confidence += 0.1
|
426 |
+
if insights.get('risks'):
|
427 |
+
confidence += 0.1
|
428 |
+
|
429 |
+
return min(confidence, 1.0)
|
430 |
+
|
431 |
+
def _format_insights(self, insights: Dict[str, Any]) -> str:
|
432 |
+
"""Format market insights into readable text."""
|
433 |
+
sections = []
|
434 |
+
|
435 |
+
if 'market_overview' in insights:
|
436 |
+
sections.append(f"Market Overview: {insights['market_overview']}")
|
437 |
+
|
438 |
+
if 'opportunities' in insights:
|
439 |
+
opps = insights['opportunities']
|
440 |
+
sections.append("Key Opportunities:\n- " + "\n- ".join(opps))
|
441 |
+
|
442 |
+
if 'risks' in insights:
|
443 |
+
risks = insights['risks']
|
444 |
+
sections.append("Key Risks:\n- " + "\n- ".join(risks))
|
445 |
+
|
446 |
+
if 'recommendations' in insights:
|
447 |
+
recs = insights['recommendations']
|
448 |
+
sections.append("Recommendations:\n- " + "\n- ".join(recs))
|
449 |
+
|
450 |
+
return "\n\n".join(sections)
|
reasoning/meta_learning.py
ADDED
@@ -0,0 +1,339 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Advanced meta-learning strategy for adaptive reasoning."""
|
2 |
+
|
3 |
+
import logging
|
4 |
+
from typing import Dict, Any, List, Optional, Set, Union, Type, Tuple
|
5 |
+
import json
|
6 |
+
from dataclasses import dataclass, field
|
7 |
+
from enum import Enum
|
8 |
+
from datetime import datetime
|
9 |
+
import numpy as np
|
10 |
+
from collections import defaultdict
|
11 |
+
|
12 |
+
from .base import ReasoningStrategy
|
13 |
+
|
14 |
+
@dataclass
|
15 |
+
class MetaTask:
|
16 |
+
"""Meta-learning task with parameters and performance metrics."""
|
17 |
+
name: str
|
18 |
+
parameters: Dict[str, Any]
|
19 |
+
metrics: Dict[str, float]
|
20 |
+
history: List[Dict[str, Any]] = field(default_factory=list)
|
21 |
+
|
22 |
+
class MetaLearningStrategy(ReasoningStrategy):
|
23 |
+
"""
|
24 |
+
Advanced meta-learning strategy that:
|
25 |
+
1. Adapts to new tasks
|
26 |
+
2. Learns from experience
|
27 |
+
3. Optimizes parameters
|
28 |
+
4. Transfers knowledge
|
29 |
+
5. Improves over time
|
30 |
+
"""
|
31 |
+
|
32 |
+
def __init__(self, config: Optional[Dict[str, Any]] = None):
|
33 |
+
"""Initialize meta-learning strategy."""
|
34 |
+
super().__init__()
|
35 |
+
self.config = config or {}
|
36 |
+
|
37 |
+
# Configure parameters
|
38 |
+
self.learning_rate = self.config.get('learning_rate', 0.01)
|
39 |
+
self.memory_size = self.config.get('memory_size', 100)
|
40 |
+
self.adaptation_threshold = self.config.get('adaptation_threshold', 0.7)
|
41 |
+
|
42 |
+
# Initialize task memory
|
43 |
+
self.task_memory: List[MetaTask] = []
|
44 |
+
|
45 |
+
async def reason(self, query: str, context: Dict[str, Any]) -> Dict[str, Any]:
|
46 |
+
"""
|
47 |
+
Apply meta-learning to adapt and optimize reasoning.
|
48 |
+
|
49 |
+
Args:
|
50 |
+
query: The input query to reason about
|
51 |
+
context: Additional context and parameters
|
52 |
+
|
53 |
+
Returns:
|
54 |
+
Dict containing reasoning results and confidence scores
|
55 |
+
"""
|
56 |
+
try:
|
57 |
+
# Identify similar tasks
|
58 |
+
similar_tasks = await self._find_similar_tasks(query, context)
|
59 |
+
|
60 |
+
# Adapt parameters
|
61 |
+
adapted_params = await self._adapt_parameters(similar_tasks, context)
|
62 |
+
|
63 |
+
# Apply meta-learning
|
64 |
+
results = await self._apply_meta_learning(
|
65 |
+
query,
|
66 |
+
adapted_params,
|
67 |
+
context
|
68 |
+
)
|
69 |
+
|
70 |
+
# Update memory
|
71 |
+
await self._update_memory(query, results, context)
|
72 |
+
|
73 |
+
# Generate analysis
|
74 |
+
analysis = await self._generate_analysis(results, context)
|
75 |
+
|
76 |
+
return {
|
77 |
+
'answer': self._format_analysis(analysis),
|
78 |
+
'confidence': self._calculate_confidence(results),
|
79 |
+
'similar_tasks': similar_tasks,
|
80 |
+
'adapted_params': adapted_params,
|
81 |
+
'results': results,
|
82 |
+
'analysis': analysis
|
83 |
+
}
|
84 |
+
|
85 |
+
except Exception as e:
|
86 |
+
logging.error(f"Meta-learning failed: {str(e)}")
|
87 |
+
return {
|
88 |
+
'error': f"Meta-learning failed: {str(e)}",
|
89 |
+
'confidence': 0.0
|
90 |
+
}
|
91 |
+
|
92 |
+
async def _find_similar_tasks(
|
93 |
+
self,
|
94 |
+
query: str,
|
95 |
+
context: Dict[str, Any]
|
96 |
+
) -> List[MetaTask]:
|
97 |
+
"""Find similar tasks in memory."""
|
98 |
+
similar_tasks = []
|
99 |
+
|
100 |
+
# Extract query features
|
101 |
+
query_features = self._extract_features(query)
|
102 |
+
|
103 |
+
for task in self.task_memory:
|
104 |
+
# Calculate similarity
|
105 |
+
similarity = self._calculate_similarity(
|
106 |
+
query_features,
|
107 |
+
self._extract_features(task.name)
|
108 |
+
)
|
109 |
+
|
110 |
+
if similarity > self.adaptation_threshold:
|
111 |
+
similar_tasks.append(task)
|
112 |
+
|
113 |
+
# Sort by similarity
|
114 |
+
similar_tasks.sort(
|
115 |
+
key=lambda x: np.mean(list(x.metrics.values())),
|
116 |
+
reverse=True
|
117 |
+
)
|
118 |
+
|
119 |
+
return similar_tasks
|
120 |
+
|
121 |
+
def _extract_features(self, text: str) -> np.ndarray:
|
122 |
+
"""Extract features from text."""
|
123 |
+
# Simple bag of words for now
|
124 |
+
words = set(text.lower().split())
|
125 |
+
return np.array([hash(word) % 100 for word in words])
|
126 |
+
|
127 |
+
def _calculate_similarity(
|
128 |
+
self,
|
129 |
+
features1: np.ndarray,
|
130 |
+
features2: np.ndarray
|
131 |
+
) -> float:
|
132 |
+
"""Calculate similarity between feature sets."""
|
133 |
+
# Simple Jaccard similarity
|
134 |
+
intersection = np.intersect1d(features1, features2)
|
135 |
+
union = np.union1d(features1, features2)
|
136 |
+
|
137 |
+
return len(intersection) / len(union) if len(union) > 0 else 0
|
138 |
+
|
139 |
+
async def _adapt_parameters(
|
140 |
+
self,
|
141 |
+
similar_tasks: List[MetaTask],
|
142 |
+
context: Dict[str, Any]
|
143 |
+
) -> Dict[str, Any]:
|
144 |
+
"""Adapt parameters based on similar tasks."""
|
145 |
+
if not similar_tasks:
|
146 |
+
return self.config.copy()
|
147 |
+
|
148 |
+
adapted_params = {}
|
149 |
+
|
150 |
+
# Weight tasks by performance
|
151 |
+
total_performance = sum(
|
152 |
+
np.mean(list(task.metrics.values()))
|
153 |
+
for task in similar_tasks
|
154 |
+
)
|
155 |
+
|
156 |
+
if total_performance > 0:
|
157 |
+
# Weighted average of parameters
|
158 |
+
for param_name in self.config:
|
159 |
+
adapted_params[param_name] = sum(
|
160 |
+
task.parameters.get(param_name, self.config[param_name]) *
|
161 |
+
(np.mean(list(task.metrics.values())) / total_performance)
|
162 |
+
for task in similar_tasks
|
163 |
+
)
|
164 |
+
else:
|
165 |
+
adapted_params = self.config.copy()
|
166 |
+
|
167 |
+
return adapted_params
|
168 |
+
|
169 |
+
async def _apply_meta_learning(
|
170 |
+
self,
|
171 |
+
query: str,
|
172 |
+
parameters: Dict[str, Any],
|
173 |
+
context: Dict[str, Any]
|
174 |
+
) -> Dict[str, Any]:
|
175 |
+
"""Apply meta-learning with adapted parameters."""
|
176 |
+
results = {
|
177 |
+
'query': query,
|
178 |
+
'parameters': parameters,
|
179 |
+
'metrics': {}
|
180 |
+
}
|
181 |
+
|
182 |
+
# Apply learning rate
|
183 |
+
for param_name, value in parameters.items():
|
184 |
+
if isinstance(value, (int, float)):
|
185 |
+
results['parameters'][param_name] = (
|
186 |
+
value * (1 - self.learning_rate) +
|
187 |
+
self.config[param_name] * self.learning_rate
|
188 |
+
)
|
189 |
+
|
190 |
+
# Calculate performance metrics
|
191 |
+
results['metrics'] = {
|
192 |
+
'adaptation_score': np.mean([
|
193 |
+
p / self.config[name]
|
194 |
+
for name, p in results['parameters'].items()
|
195 |
+
if isinstance(p, (int, float)) and self.config[name] != 0
|
196 |
+
]),
|
197 |
+
'novelty_score': 1 - max(
|
198 |
+
self._calculate_similarity(
|
199 |
+
self._extract_features(query),
|
200 |
+
self._extract_features(task.name)
|
201 |
+
)
|
202 |
+
for task in self.task_memory
|
203 |
+
) if self.task_memory else 1.0
|
204 |
+
}
|
205 |
+
|
206 |
+
return results
|
207 |
+
|
208 |
+
async def _update_memory(
|
209 |
+
self,
|
210 |
+
query: str,
|
211 |
+
results: Dict[str, Any],
|
212 |
+
context: Dict[str, Any]
|
213 |
+
) -> None:
|
214 |
+
"""Update task memory."""
|
215 |
+
# Create new task
|
216 |
+
task = MetaTask(
|
217 |
+
name=query,
|
218 |
+
parameters=results['parameters'],
|
219 |
+
metrics=results['metrics'],
|
220 |
+
history=[{
|
221 |
+
'timestamp': datetime.now().isoformat(),
|
222 |
+
'context': context,
|
223 |
+
'results': results
|
224 |
+
}]
|
225 |
+
)
|
226 |
+
|
227 |
+
# Add to memory
|
228 |
+
self.task_memory.append(task)
|
229 |
+
|
230 |
+
# Maintain memory size
|
231 |
+
if len(self.task_memory) > self.memory_size:
|
232 |
+
# Remove worst performing task
|
233 |
+
self.task_memory.sort(
|
234 |
+
key=lambda x: np.mean(list(x.metrics.values()))
|
235 |
+
)
|
236 |
+
self.task_memory.pop(0)
|
237 |
+
|
238 |
+
async def _generate_analysis(
|
239 |
+
self,
|
240 |
+
results: Dict[str, Any],
|
241 |
+
context: Dict[str, Any]
|
242 |
+
) -> Dict[str, Any]:
|
243 |
+
"""Generate meta-learning analysis."""
|
244 |
+
# Calculate statistics
|
245 |
+
param_stats = {
|
246 |
+
name: {
|
247 |
+
'value': value,
|
248 |
+
'adaptation': value / self.config[name]
|
249 |
+
if isinstance(value, (int, float)) and self.config[name] != 0
|
250 |
+
else 1.0
|
251 |
+
}
|
252 |
+
for name, value in results['parameters'].items()
|
253 |
+
}
|
254 |
+
|
255 |
+
# Calculate overall metrics
|
256 |
+
metrics = {
|
257 |
+
'adaptation_score': results['metrics']['adaptation_score'],
|
258 |
+
'novelty_score': results['metrics']['novelty_score'],
|
259 |
+
'memory_usage': len(self.task_memory) / self.memory_size
|
260 |
+
}
|
261 |
+
|
262 |
+
return {
|
263 |
+
'parameters': param_stats,
|
264 |
+
'metrics': metrics,
|
265 |
+
'memory_size': len(self.task_memory),
|
266 |
+
'total_tasks_seen': len(self.task_memory)
|
267 |
+
}
|
268 |
+
|
269 |
+
def _format_analysis(self, analysis: Dict[str, Any]) -> str:
|
270 |
+
"""Format analysis into readable text."""
|
271 |
+
sections = []
|
272 |
+
|
273 |
+
# Parameter adaptations
|
274 |
+
sections.append("Parameter adaptations:")
|
275 |
+
for name, stats in analysis['parameters'].items():
|
276 |
+
sections.append(
|
277 |
+
f"- {name}: {stats['value']:.2f} "
|
278 |
+
f"({stats['adaptation']:.1%} of original)"
|
279 |
+
)
|
280 |
+
|
281 |
+
# Performance metrics
|
282 |
+
sections.append("\nPerformance metrics:")
|
283 |
+
metrics = analysis['metrics']
|
284 |
+
sections.append(f"- Adaptation score: {metrics['adaptation_score']:.1%}")
|
285 |
+
sections.append(f"- Novelty score: {metrics['novelty_score']:.1%}")
|
286 |
+
sections.append(f"- Memory usage: {metrics['memory_usage']:.1%}")
|
287 |
+
|
288 |
+
# Memory statistics
|
289 |
+
sections.append("\nMemory statistics:")
|
290 |
+
sections.append(f"- Current tasks in memory: {analysis['memory_size']}")
|
291 |
+
sections.append(f"- Total tasks seen: {analysis['total_tasks_seen']}")
|
292 |
+
|
293 |
+
return "\n".join(sections)
|
294 |
+
|
295 |
+
def _calculate_confidence(self, results: Dict[str, Any]) -> float:
|
296 |
+
"""Calculate overall confidence score."""
|
297 |
+
if not results.get('metrics'):
|
298 |
+
return 0.0
|
299 |
+
|
300 |
+
# Base confidence
|
301 |
+
confidence = 0.5
|
302 |
+
|
303 |
+
# Adjust based on adaptation score
|
304 |
+
adaptation_score = results['metrics']['adaptation_score']
|
305 |
+
if adaptation_score > 0.8:
|
306 |
+
confidence += 0.3
|
307 |
+
elif adaptation_score > 0.6:
|
308 |
+
confidence += 0.2
|
309 |
+
elif adaptation_score > 0.4:
|
310 |
+
confidence += 0.1
|
311 |
+
|
312 |
+
# Adjust based on novelty
|
313 |
+
novelty_score = results['metrics']['novelty_score']
|
314 |
+
if novelty_score < 0.2: # Very similar to known tasks
|
315 |
+
confidence += 0.2
|
316 |
+
elif novelty_score < 0.4:
|
317 |
+
confidence += 0.1
|
318 |
+
|
319 |
+
return min(confidence, 1.0)
|
320 |
+
|
321 |
+
def get_performance_metrics(self) -> Dict[str, Any]:
|
322 |
+
"""Get current performance metrics."""
|
323 |
+
return {
|
324 |
+
"success_rate": 0.0,
|
325 |
+
"adaptation_rate": 0.0,
|
326 |
+
"exploration_count": 0,
|
327 |
+
"episode_count": len(self.task_memory),
|
328 |
+
"pattern_count": 0,
|
329 |
+
"learning_rate": self.learning_rate,
|
330 |
+
"exploration_rate": 0.0
|
331 |
+
}
|
332 |
+
|
333 |
+
def get_top_patterns(self, n: int = 10) -> List[Tuple[str, float]]:
|
334 |
+
"""Get top performing patterns."""
|
335 |
+
return []
|
336 |
+
|
337 |
+
def clear_memory(self):
|
338 |
+
"""Clear learning memory."""
|
339 |
+
self.task_memory.clear()
|
reasoning/model_manager.py
ADDED
@@ -0,0 +1,145 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Model manager for handling multiple LLMs in Hugging Face Spaces."""
|
2 |
+
|
3 |
+
import os
|
4 |
+
from typing import Dict, Any, Optional, List
|
5 |
+
import logging
|
6 |
+
from dataclasses import dataclass
|
7 |
+
from enum import Enum
|
8 |
+
import huggingface_hub
|
9 |
+
from llama_cpp import Llama
|
10 |
+
|
11 |
+
class ModelType(Enum):
|
12 |
+
"""Types of models and their specific tasks."""
|
13 |
+
REASONING = "reasoning"
|
14 |
+
CODE = "code"
|
15 |
+
CHAT = "chat"
|
16 |
+
PLANNING = "planning"
|
17 |
+
ANALYSIS = "analysis"
|
18 |
+
|
19 |
+
@dataclass
|
20 |
+
class ModelConfig:
|
21 |
+
"""Configuration for a specific model."""
|
22 |
+
repo_id: str
|
23 |
+
filename: str
|
24 |
+
model_type: ModelType
|
25 |
+
context_size: int = 4096
|
26 |
+
gpu_layers: int = 35
|
27 |
+
batch_size: int = 512
|
28 |
+
threads: int = 8
|
29 |
+
|
30 |
+
class ModelManager:
|
31 |
+
"""Manages multiple LLM models for different tasks in Spaces."""
|
32 |
+
|
33 |
+
def __init__(self):
|
34 |
+
# In Spaces, models are stored in the cache directory
|
35 |
+
self.model_dir = os.getenv('SPACE_CACHE_DIR', '/tmp/models')
|
36 |
+
self.models: Dict[str, Llama] = {}
|
37 |
+
self.logger = logging.getLogger(__name__)
|
38 |
+
|
39 |
+
# Define model configurations
|
40 |
+
self.model_configs = {
|
41 |
+
"reasoning": ModelConfig(
|
42 |
+
repo_id="rrbale/pruned-qwen-moe",
|
43 |
+
filename="model-Q6_K.gguf",
|
44 |
+
model_type=ModelType.REASONING
|
45 |
+
),
|
46 |
+
"code": ModelConfig(
|
47 |
+
repo_id="YorkieOH10/deepseek-coder-6.7B-kexer-Q8_0-GGUF",
|
48 |
+
filename="model.gguf",
|
49 |
+
model_type=ModelType.CODE
|
50 |
+
),
|
51 |
+
"chat": ModelConfig(
|
52 |
+
repo_id="Nidum-Llama-3.2-3B-Uncensored-GGUF",
|
53 |
+
filename="model-Q6_K.gguf",
|
54 |
+
model_type=ModelType.CHAT
|
55 |
+
),
|
56 |
+
"planning": ModelConfig(
|
57 |
+
repo_id="deepseek-ai/JanusFlow-1.3B",
|
58 |
+
filename="model.gguf",
|
59 |
+
model_type=ModelType.PLANNING
|
60 |
+
),
|
61 |
+
"analysis": ModelConfig(
|
62 |
+
repo_id="prithivMLmods/QwQ-4B-Instruct",
|
63 |
+
filename="model.gguf",
|
64 |
+
model_type=ModelType.ANALYSIS,
|
65 |
+
context_size=8192,
|
66 |
+
gpu_layers=40
|
67 |
+
),
|
68 |
+
"general": ModelConfig(
|
69 |
+
repo_id="gpt-omni/mini-omni2",
|
70 |
+
filename="mini-omni2.gguf",
|
71 |
+
model_type=ModelType.CHAT
|
72 |
+
)
|
73 |
+
}
|
74 |
+
|
75 |
+
os.makedirs(self.model_dir, exist_ok=True)
|
76 |
+
|
77 |
+
async def initialize_model(self, model_key: str) -> Optional[Llama]:
|
78 |
+
"""Initialize a specific model in Spaces."""
|
79 |
+
try:
|
80 |
+
config = self.model_configs[model_key]
|
81 |
+
cache_dir = os.path.join(self.model_dir, model_key)
|
82 |
+
os.makedirs(cache_dir, exist_ok=True)
|
83 |
+
|
84 |
+
# Download model using HF Hub
|
85 |
+
self.logger.info(f"Downloading {model_key} model...")
|
86 |
+
model_path = huggingface_hub.hf_hub_download(
|
87 |
+
repo_id=config.repo_id,
|
88 |
+
filename=config.filename,
|
89 |
+
repo_type="model",
|
90 |
+
cache_dir=cache_dir,
|
91 |
+
local_dir_use_symlinks=False
|
92 |
+
)
|
93 |
+
|
94 |
+
# Configure for Spaces GPU environment
|
95 |
+
try:
|
96 |
+
model = Llama(
|
97 |
+
model_path=model_path,
|
98 |
+
n_ctx=config.context_size,
|
99 |
+
n_batch=config.batch_size,
|
100 |
+
n_threads=config.threads,
|
101 |
+
n_gpu_layers=config.gpu_layers,
|
102 |
+
main_gpu=0,
|
103 |
+
tensor_split=None # Let it use all available GPU memory
|
104 |
+
)
|
105 |
+
self.logger.info(f"{model_key} model loaded with GPU acceleration!")
|
106 |
+
except Exception as e:
|
107 |
+
self.logger.warning(f"GPU loading failed for {model_key}: {e}, falling back to CPU...")
|
108 |
+
model = Llama(
|
109 |
+
model_path=model_path,
|
110 |
+
n_ctx=2048,
|
111 |
+
n_batch=256,
|
112 |
+
n_threads=4,
|
113 |
+
n_gpu_layers=0
|
114 |
+
)
|
115 |
+
self.logger.info(f"{model_key} model loaded in CPU-only mode")
|
116 |
+
|
117 |
+
self.models[model_key] = model
|
118 |
+
return model
|
119 |
+
|
120 |
+
except Exception as e:
|
121 |
+
self.logger.error(f"Error initializing {model_key} model: {e}")
|
122 |
+
return None
|
123 |
+
|
124 |
+
async def get_model(self, model_key: str) -> Optional[Llama]:
|
125 |
+
"""Get a model, initializing it if necessary."""
|
126 |
+
if model_key not in self.models:
|
127 |
+
return await self.initialize_model(model_key)
|
128 |
+
return self.models[model_key]
|
129 |
+
|
130 |
+
async def initialize_all_models(self):
|
131 |
+
"""Initialize all configured models."""
|
132 |
+
for model_key in self.model_configs.keys():
|
133 |
+
await self.initialize_model(model_key)
|
134 |
+
|
135 |
+
def get_best_model_for_task(self, task_type: str) -> str:
|
136 |
+
"""Get the best model key for a specific task type."""
|
137 |
+
task_model_mapping = {
|
138 |
+
"reasoning": "reasoning",
|
139 |
+
"code": "code",
|
140 |
+
"chat": "chat",
|
141 |
+
"planning": "planning",
|
142 |
+
"analysis": "analysis",
|
143 |
+
"general": "general"
|
144 |
+
}
|
145 |
+
return task_model_mapping.get(task_type, "general")
|
reasoning/monetization.py
ADDED
@@ -0,0 +1,447 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Advanced monetization strategies for venture optimization."""
|
2 |
+
|
3 |
+
import logging
|
4 |
+
from typing import Dict, Any, List, Optional, Set, Union, Type, Tuple
|
5 |
+
import json
|
6 |
+
from dataclasses import dataclass, field
|
7 |
+
from enum import Enum
|
8 |
+
from datetime import datetime
|
9 |
+
import numpy as np
|
10 |
+
from collections import defaultdict
|
11 |
+
|
12 |
+
from .base import ReasoningStrategy
|
13 |
+
|
14 |
+
@dataclass
|
15 |
+
class MonetizationModel:
|
16 |
+
"""Monetization model configuration."""
|
17 |
+
name: str
|
18 |
+
type: str
|
19 |
+
pricing_tiers: List[Dict[str, Any]]
|
20 |
+
features: List[str]
|
21 |
+
constraints: List[str]
|
22 |
+
metrics: Dict[str, float]
|
23 |
+
|
24 |
+
@dataclass
|
25 |
+
class RevenueStream:
|
26 |
+
"""Revenue stream configuration."""
|
27 |
+
name: str
|
28 |
+
type: str
|
29 |
+
volume: float
|
30 |
+
unit_economics: Dict[str, float]
|
31 |
+
growth_rate: float
|
32 |
+
churn_rate: float
|
33 |
+
|
34 |
+
class MonetizationOptimizer:
|
35 |
+
"""
|
36 |
+
Advanced monetization optimization that:
|
37 |
+
1. Designs pricing models
|
38 |
+
2. Optimizes revenue streams
|
39 |
+
3. Maximizes customer value
|
40 |
+
4. Reduces churn
|
41 |
+
5. Increases lifetime value
|
42 |
+
"""
|
43 |
+
|
44 |
+
def __init__(self, config: Optional[Dict[str, Any]] = None):
|
45 |
+
"""Initialize monetization optimizer."""
|
46 |
+
self.config = config or {}
|
47 |
+
|
48 |
+
# Configure optimization parameters
|
49 |
+
self.min_revenue = self.config.get('min_revenue', 1_000_000)
|
50 |
+
self.min_margin = self.config.get('min_margin', 0.3)
|
51 |
+
self.max_churn = self.config.get('max_churn', 0.1)
|
52 |
+
self.target_ltv = self.config.get('target_ltv', 1000)
|
53 |
+
|
54 |
+
self.models: Dict[str, MonetizationModel] = {}
|
55 |
+
self.streams: Dict[str, RevenueStream] = {}
|
56 |
+
|
57 |
+
async def optimize_monetization(self,
|
58 |
+
venture_type: str,
|
59 |
+
context: Dict[str, Any]) -> Dict[str, Any]:
|
60 |
+
"""Optimize monetization strategy."""
|
61 |
+
try:
|
62 |
+
# Design models
|
63 |
+
models = await self._design_models(venture_type, context)
|
64 |
+
|
65 |
+
# Optimize pricing
|
66 |
+
pricing = await self._optimize_pricing(models, context)
|
67 |
+
|
68 |
+
# Revenue optimization
|
69 |
+
revenue = await self._optimize_revenue(pricing, context)
|
70 |
+
|
71 |
+
# Value optimization
|
72 |
+
value = await self._optimize_value(revenue, context)
|
73 |
+
|
74 |
+
# Performance projections
|
75 |
+
projections = await self._project_performance(value, context)
|
76 |
+
|
77 |
+
return {
|
78 |
+
"success": projections["annual_revenue"] >= 1_000_000,
|
79 |
+
"models": models,
|
80 |
+
"pricing": pricing,
|
81 |
+
"revenue": revenue,
|
82 |
+
"value": value,
|
83 |
+
"projections": projections
|
84 |
+
}
|
85 |
+
except Exception as e:
|
86 |
+
logging.error(f"Error in monetization optimization: {str(e)}")
|
87 |
+
return {"success": False, "error": str(e)}
|
88 |
+
|
89 |
+
async def _design_models(self,
|
90 |
+
venture_type: str,
|
91 |
+
context: Dict[str, Any]) -> Dict[str, Any]:
|
92 |
+
"""Design monetization models."""
|
93 |
+
prompt = f"""
|
94 |
+
Design monetization models:
|
95 |
+
Venture: {venture_type}
|
96 |
+
Context: {json.dumps(context)}
|
97 |
+
|
98 |
+
Design models for:
|
99 |
+
1. Subscription tiers
|
100 |
+
2. Usage-based pricing
|
101 |
+
3. Hybrid models
|
102 |
+
4. Enterprise pricing
|
103 |
+
5. Marketplace fees
|
104 |
+
|
105 |
+
Format as:
|
106 |
+
[Model1]
|
107 |
+
Name: ...
|
108 |
+
Type: ...
|
109 |
+
Tiers: ...
|
110 |
+
Features: ...
|
111 |
+
Constraints: ...
|
112 |
+
"""
|
113 |
+
|
114 |
+
response = await context["groq_api"].predict(prompt)
|
115 |
+
return self._parse_model_design(response["answer"])
|
116 |
+
|
117 |
+
async def _optimize_pricing(self,
|
118 |
+
models: Dict[str, Any],
|
119 |
+
context: Dict[str, Any]) -> Dict[str, Any]:
|
120 |
+
"""Optimize pricing strategy."""
|
121 |
+
prompt = f"""
|
122 |
+
Optimize pricing strategy:
|
123 |
+
Models: {json.dumps(models)}
|
124 |
+
Context: {json.dumps(context)}
|
125 |
+
|
126 |
+
Optimize for:
|
127 |
+
1. Market positioning
|
128 |
+
2. Value perception
|
129 |
+
3. Competitive dynamics
|
130 |
+
4. Customer segments
|
131 |
+
5. Growth potential
|
132 |
+
|
133 |
+
Format as:
|
134 |
+
[Strategy1]
|
135 |
+
Model: ...
|
136 |
+
Positioning: ...
|
137 |
+
Value_Props: ...
|
138 |
+
Segments: ...
|
139 |
+
Growth: ...
|
140 |
+
"""
|
141 |
+
|
142 |
+
response = await context["groq_api"].predict(prompt)
|
143 |
+
return self._parse_pricing_strategy(response["answer"])
|
144 |
+
|
145 |
+
async def _optimize_revenue(self,
|
146 |
+
pricing: Dict[str, Any],
|
147 |
+
context: Dict[str, Any]) -> Dict[str, Any]:
|
148 |
+
"""Optimize revenue streams."""
|
149 |
+
prompt = f"""
|
150 |
+
Optimize revenue streams:
|
151 |
+
Pricing: {json.dumps(pricing)}
|
152 |
+
Context: {json.dumps(context)}
|
153 |
+
|
154 |
+
Optimize for:
|
155 |
+
1. Revenue mix
|
156 |
+
2. Growth drivers
|
157 |
+
3. Retention factors
|
158 |
+
4. Expansion potential
|
159 |
+
5. Risk mitigation
|
160 |
+
|
161 |
+
Format as:
|
162 |
+
[Stream1]
|
163 |
+
Type: ...
|
164 |
+
Drivers: ...
|
165 |
+
Retention: ...
|
166 |
+
Expansion: ...
|
167 |
+
Risks: ...
|
168 |
+
"""
|
169 |
+
|
170 |
+
response = await context["groq_api"].predict(prompt)
|
171 |
+
return self._parse_revenue_optimization(response["answer"])
|
172 |
+
|
173 |
+
async def _optimize_value(self,
|
174 |
+
revenue: Dict[str, Any],
|
175 |
+
context: Dict[str, Any]) -> Dict[str, Any]:
|
176 |
+
"""Optimize customer value."""
|
177 |
+
prompt = f"""
|
178 |
+
Optimize customer value:
|
179 |
+
Revenue: {json.dumps(revenue)}
|
180 |
+
Context: {json.dumps(context)}
|
181 |
+
|
182 |
+
Optimize for:
|
183 |
+
1. Acquisition cost
|
184 |
+
2. Lifetime value
|
185 |
+
3. Churn reduction
|
186 |
+
4. Upsell potential
|
187 |
+
5. Network effects
|
188 |
+
|
189 |
+
Format as:
|
190 |
+
[Value1]
|
191 |
+
Metric: ...
|
192 |
+
Strategy: ...
|
193 |
+
Potential: ...
|
194 |
+
Actions: ...
|
195 |
+
Timeline: ...
|
196 |
+
"""
|
197 |
+
|
198 |
+
response = await context["groq_api"].predict(prompt)
|
199 |
+
return self._parse_value_optimization(response["answer"])
|
200 |
+
|
201 |
+
async def _project_performance(self,
|
202 |
+
value: Dict[str, Any],
|
203 |
+
context: Dict[str, Any]) -> Dict[str, Any]:
|
204 |
+
"""Project monetization performance."""
|
205 |
+
prompt = f"""
|
206 |
+
Project performance:
|
207 |
+
Value: {json.dumps(value)}
|
208 |
+
Context: {json.dumps(context)}
|
209 |
+
|
210 |
+
Project:
|
211 |
+
1. Revenue growth
|
212 |
+
2. Customer metrics
|
213 |
+
3. Unit economics
|
214 |
+
4. Profitability
|
215 |
+
5. Scale effects
|
216 |
+
|
217 |
+
Format as:
|
218 |
+
[Projections]
|
219 |
+
Revenue: ...
|
220 |
+
Metrics: ...
|
221 |
+
Economics: ...
|
222 |
+
Profit: ...
|
223 |
+
Scale: ...
|
224 |
+
"""
|
225 |
+
|
226 |
+
response = await context["groq_api"].predict(prompt)
|
227 |
+
return self._parse_performance_projections(response["answer"])
|
228 |
+
|
229 |
+
def _calculate_revenue_potential(self, model: MonetizationModel) -> float:
|
230 |
+
"""Calculate revenue potential for model."""
|
231 |
+
base_potential = sum(
|
232 |
+
tier.get("price", 0) * tier.get("volume", 0)
|
233 |
+
for tier in model.pricing_tiers
|
234 |
+
)
|
235 |
+
|
236 |
+
growth_factor = 1.0 + (model.metrics.get("growth_rate", 0) / 100)
|
237 |
+
retention_factor = 1.0 - (model.metrics.get("churn_rate", 0) / 100)
|
238 |
+
|
239 |
+
return base_potential * growth_factor * retention_factor
|
240 |
+
|
241 |
+
def _calculate_customer_ltv(self, stream: RevenueStream) -> float:
|
242 |
+
"""Calculate customer lifetime value."""
|
243 |
+
monthly_revenue = stream.volume * stream.unit_economics.get("arpu", 0)
|
244 |
+
churn_rate = stream.churn_rate / 100
|
245 |
+
discount_rate = 0.1 # 10% annual discount rate
|
246 |
+
|
247 |
+
if churn_rate > 0:
|
248 |
+
ltv = monthly_revenue / churn_rate
|
249 |
+
else:
|
250 |
+
ltv = monthly_revenue * 12 # Assume 1 year if no churn
|
251 |
+
|
252 |
+
return ltv / (1 + discount_rate)
|
253 |
+
|
254 |
+
def get_monetization_metrics(self) -> Dict[str, Any]:
|
255 |
+
"""Get comprehensive monetization metrics."""
|
256 |
+
return {
|
257 |
+
"model_metrics": {
|
258 |
+
model.name: {
|
259 |
+
"revenue_potential": self._calculate_revenue_potential(model),
|
260 |
+
"tier_count": len(model.pricing_tiers),
|
261 |
+
"feature_count": len(model.features),
|
262 |
+
"constraint_count": len(model.constraints)
|
263 |
+
}
|
264 |
+
for model in self.models.values()
|
265 |
+
},
|
266 |
+
"stream_metrics": {
|
267 |
+
stream.name: {
|
268 |
+
"monthly_revenue": stream.volume * stream.unit_economics.get("arpu", 0),
|
269 |
+
"ltv": self._calculate_customer_ltv(stream),
|
270 |
+
"growth_rate": stream.growth_rate,
|
271 |
+
"churn_rate": stream.churn_rate
|
272 |
+
}
|
273 |
+
for stream in self.streams.values()
|
274 |
+
},
|
275 |
+
"aggregate_metrics": {
|
276 |
+
"total_revenue_potential": sum(
|
277 |
+
self._calculate_revenue_potential(model)
|
278 |
+
for model in self.models.values()
|
279 |
+
),
|
280 |
+
"average_ltv": np.mean([
|
281 |
+
self._calculate_customer_ltv(stream)
|
282 |
+
for stream in self.streams.values()
|
283 |
+
]) if self.streams else 0,
|
284 |
+
"weighted_growth_rate": np.average(
|
285 |
+
[stream.growth_rate for stream in self.streams.values()],
|
286 |
+
weights=[stream.volume for stream in self.streams.values()]
|
287 |
+
) if self.streams else 0
|
288 |
+
}
|
289 |
+
}
|
290 |
+
|
291 |
+
class MonetizationStrategy(ReasoningStrategy):
|
292 |
+
"""
|
293 |
+
Advanced monetization strategy that:
|
294 |
+
1. Designs optimal pricing models
|
295 |
+
2. Optimizes revenue streams
|
296 |
+
3. Maximizes customer lifetime value
|
297 |
+
4. Reduces churn
|
298 |
+
5. Increases profitability
|
299 |
+
"""
|
300 |
+
|
301 |
+
def __init__(self, config: Optional[Dict[str, Any]] = None):
|
302 |
+
"""Initialize monetization strategy."""
|
303 |
+
super().__init__()
|
304 |
+
self.config = config or {}
|
305 |
+
|
306 |
+
# Standard reasoning parameters
|
307 |
+
self.min_confidence = self.config.get('min_confidence', 0.7)
|
308 |
+
self.parallel_threshold = self.config.get('parallel_threshold', 3)
|
309 |
+
self.learning_rate = self.config.get('learning_rate', 0.1)
|
310 |
+
self.strategy_weights = self.config.get('strategy_weights', {
|
311 |
+
"LOCAL_LLM": 0.8,
|
312 |
+
"CHAIN_OF_THOUGHT": 0.6,
|
313 |
+
"TREE_OF_THOUGHTS": 0.5,
|
314 |
+
"META_LEARNING": 0.4
|
315 |
+
})
|
316 |
+
|
317 |
+
# Initialize optimizer with shared config
|
318 |
+
optimizer_config = {
|
319 |
+
'min_revenue': self.config.get('min_revenue', 1_000_000),
|
320 |
+
'min_margin': self.config.get('min_margin', 0.3),
|
321 |
+
'max_churn': self.config.get('max_churn', 0.1),
|
322 |
+
'target_ltv': self.config.get('target_ltv', 1000)
|
323 |
+
}
|
324 |
+
self.optimizer = MonetizationOptimizer(optimizer_config)
|
325 |
+
|
326 |
+
async def reason(self, query: str, context: Dict[str, Any]) -> Dict[str, Any]:
|
327 |
+
"""
|
328 |
+
Generate monetization strategy based on query and context.
|
329 |
+
|
330 |
+
Args:
|
331 |
+
query: The monetization query
|
332 |
+
context: Additional context and parameters
|
333 |
+
|
334 |
+
Returns:
|
335 |
+
Dict containing monetization strategy and confidence scores
|
336 |
+
"""
|
337 |
+
try:
|
338 |
+
# Extract venture type
|
339 |
+
venture_type = self._extract_venture_type(query, context)
|
340 |
+
|
341 |
+
# Optimize monetization
|
342 |
+
optimization_result = await self.optimizer.optimize_monetization(
|
343 |
+
venture_type=venture_type,
|
344 |
+
context=context
|
345 |
+
)
|
346 |
+
|
347 |
+
# Format results
|
348 |
+
formatted_result = self._format_strategy(optimization_result)
|
349 |
+
|
350 |
+
return {
|
351 |
+
'answer': formatted_result,
|
352 |
+
'confidence': self._calculate_confidence(optimization_result),
|
353 |
+
'optimization': optimization_result
|
354 |
+
}
|
355 |
+
|
356 |
+
except Exception as e:
|
357 |
+
logging.error(f"Monetization strategy generation failed: {str(e)}")
|
358 |
+
return {
|
359 |
+
'error': f"Monetization strategy generation failed: {str(e)}",
|
360 |
+
'confidence': 0.0
|
361 |
+
}
|
362 |
+
|
363 |
+
def _extract_venture_type(self, query: str, context: Dict[str, Any]) -> str:
|
364 |
+
"""Extract venture type from query and context."""
|
365 |
+
# Use context if available
|
366 |
+
if 'venture_type' in context:
|
367 |
+
return context['venture_type']
|
368 |
+
|
369 |
+
# Simple keyword matching
|
370 |
+
query_lower = query.lower()
|
371 |
+
if any(term in query_lower for term in ['ai', 'ml', 'model']):
|
372 |
+
return 'ai_startup'
|
373 |
+
elif any(term in query_lower for term in ['saas', 'software']):
|
374 |
+
return 'saas'
|
375 |
+
elif any(term in query_lower for term in ['api', 'service']):
|
376 |
+
return 'api_service'
|
377 |
+
elif any(term in query_lower for term in ['data', 'analytics']):
|
378 |
+
return 'data_analytics'
|
379 |
+
|
380 |
+
# Default to SaaS if unclear
|
381 |
+
return 'saas'
|
382 |
+
|
383 |
+
def _calculate_confidence(self, result: Dict[str, Any]) -> float:
|
384 |
+
"""Calculate confidence score based on optimization quality."""
|
385 |
+
# Base confidence
|
386 |
+
confidence = 0.5
|
387 |
+
|
388 |
+
# Adjust based on optimization completeness
|
389 |
+
if result.get('models'):
|
390 |
+
confidence += 0.1
|
391 |
+
if result.get('pricing'):
|
392 |
+
confidence += 0.1
|
393 |
+
if result.get('revenue'):
|
394 |
+
confidence += 0.1
|
395 |
+
if result.get('value'):
|
396 |
+
confidence += 0.1
|
397 |
+
|
398 |
+
# Adjust based on projected performance
|
399 |
+
performance = result.get('performance', {})
|
400 |
+
if performance.get('roi', 0) > 2.0:
|
401 |
+
confidence += 0.1
|
402 |
+
if performance.get('ltv', 0) > 1000:
|
403 |
+
confidence += 0.1
|
404 |
+
|
405 |
+
return min(confidence, 1.0)
|
406 |
+
|
407 |
+
def _format_strategy(self, result: Dict[str, Any]) -> str:
|
408 |
+
"""Format monetization strategy into readable text."""
|
409 |
+
sections = []
|
410 |
+
|
411 |
+
# Monetization models
|
412 |
+
if 'models' in result:
|
413 |
+
models = result['models']
|
414 |
+
sections.append("Monetization Models:")
|
415 |
+
for model in models:
|
416 |
+
sections.append(f"- {model['name']}: {model['type']}")
|
417 |
+
if 'pricing_tiers' in model:
|
418 |
+
sections.append(" Pricing Tiers:")
|
419 |
+
for tier in model['pricing_tiers']:
|
420 |
+
sections.append(f" * {tier['name']}: ${tier['price']}/mo")
|
421 |
+
|
422 |
+
# Revenue optimization
|
423 |
+
if 'revenue' in result:
|
424 |
+
revenue = result['revenue']
|
425 |
+
sections.append("\nRevenue Optimization:")
|
426 |
+
for stream, details in revenue.items():
|
427 |
+
sections.append(f"- {stream.replace('_', ' ').title()}:")
|
428 |
+
sections.append(f" * Projected Revenue: ${details['projected_revenue']:,.2f}")
|
429 |
+
sections.append(f" * Growth Rate: {details['growth_rate']*100:.1f}%")
|
430 |
+
|
431 |
+
# Customer value optimization
|
432 |
+
if 'value' in result:
|
433 |
+
value = result['value']
|
434 |
+
sections.append("\nCustomer Value Optimization:")
|
435 |
+
sections.append(f"- Customer Acquisition Cost: ${value['cac']:,.2f}")
|
436 |
+
sections.append(f"- Lifetime Value: ${value['ltv']:,.2f}")
|
437 |
+
sections.append(f"- Churn Rate: {value['churn_rate']*100:.1f}%")
|
438 |
+
|
439 |
+
# Performance projections
|
440 |
+
if 'performance' in result:
|
441 |
+
perf = result['performance']
|
442 |
+
sections.append("\nPerformance Projections:")
|
443 |
+
sections.append(f"- ROI: {perf['roi']*100:.1f}%")
|
444 |
+
sections.append(f"- Payback Period: {perf['payback_months']:.1f} months")
|
445 |
+
sections.append(f"- Break-even Point: ${perf['breakeven']:,.2f}")
|
446 |
+
|
447 |
+
return "\n".join(sections)
|
reasoning/multimodal.py
ADDED
@@ -0,0 +1,305 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Advanced multimodal reasoning combining different types of information."""
|
2 |
+
|
3 |
+
import logging
|
4 |
+
from typing import Dict, Any, List, Optional, Set, Union, Type, Tuple
|
5 |
+
import json
|
6 |
+
from dataclasses import dataclass, field
|
7 |
+
from enum import Enum
|
8 |
+
from datetime import datetime
|
9 |
+
import numpy as np
|
10 |
+
from collections import defaultdict
|
11 |
+
|
12 |
+
from .base import ReasoningStrategy
|
13 |
+
|
14 |
+
@dataclass
|
15 |
+
class ModalityFeatures:
|
16 |
+
"""Features extracted from different modalities."""
|
17 |
+
text: List[Dict[str, Any]]
|
18 |
+
image: Optional[List[Dict[str, Any]]] = None
|
19 |
+
audio: Optional[List[Dict[str, Any]]] = None
|
20 |
+
video: Optional[List[Dict[str, Any]]] = None
|
21 |
+
structured: Optional[List[Dict[str, Any]]] = None
|
22 |
+
|
23 |
+
class MultiModalReasoning(ReasoningStrategy):
|
24 |
+
"""
|
25 |
+
Advanced multimodal reasoning that:
|
26 |
+
1. Processes different types of information
|
27 |
+
2. Aligns cross-modal features
|
28 |
+
3. Integrates multimodal context
|
29 |
+
4. Generates coherent responses
|
30 |
+
5. Handles uncertainty
|
31 |
+
"""
|
32 |
+
|
33 |
+
def __init__(self, config: Optional[Dict[str, Any]] = None):
|
34 |
+
"""Initialize multimodal reasoning."""
|
35 |
+
super().__init__()
|
36 |
+
self.config = config or {}
|
37 |
+
|
38 |
+
# Standard reasoning parameters
|
39 |
+
self.min_confidence = self.config.get('min_confidence', 0.7)
|
40 |
+
self.parallel_threshold = self.config.get('parallel_threshold', 3)
|
41 |
+
self.learning_rate = self.config.get('learning_rate', 0.1)
|
42 |
+
self.strategy_weights = self.config.get('strategy_weights', {
|
43 |
+
"LOCAL_LLM": 0.8,
|
44 |
+
"CHAIN_OF_THOUGHT": 0.6,
|
45 |
+
"TREE_OF_THOUGHTS": 0.5,
|
46 |
+
"META_LEARNING": 0.4
|
47 |
+
})
|
48 |
+
|
49 |
+
# Configure model repositories
|
50 |
+
self.models = self.config.get('models', {
|
51 |
+
'img2img': {
|
52 |
+
'repo_id': 'enhanceaiteam/Flux-Uncensored-V2',
|
53 |
+
'filename': 'Flux-Uncensored-V2.safetensors'
|
54 |
+
},
|
55 |
+
'img2vid': {
|
56 |
+
'repo_id': 'stabilityai/stable-video-diffusion-img2vid-xt',
|
57 |
+
'filename': 'svd_xt.safetensors'
|
58 |
+
},
|
59 |
+
'any2any': {
|
60 |
+
'repo_id': 'deepseek-ai/JanusFlow-1.3B',
|
61 |
+
'filename': 'janusflow-1.3b.safetensors'
|
62 |
+
}
|
63 |
+
})
|
64 |
+
|
65 |
+
# Configure modality weights
|
66 |
+
self.weights = self.config.get('modality_weights', {
|
67 |
+
'text': 0.4,
|
68 |
+
'image': 0.3,
|
69 |
+
'audio': 0.1,
|
70 |
+
'video': 0.1,
|
71 |
+
'structured': 0.1
|
72 |
+
})
|
73 |
+
|
74 |
+
async def reason(self, query: str, context: Dict[str, Any]) -> Dict[str, Any]:
|
75 |
+
"""
|
76 |
+
Apply multimodal reasoning to process and integrate different types of information.
|
77 |
+
|
78 |
+
Args:
|
79 |
+
query: The input query to reason about
|
80 |
+
context: Additional context and parameters
|
81 |
+
|
82 |
+
Returns:
|
83 |
+
Dict containing reasoning results and confidence scores
|
84 |
+
"""
|
85 |
+
try:
|
86 |
+
# Process across modalities
|
87 |
+
modalities = await self._process_modalities(query, context)
|
88 |
+
|
89 |
+
# Align cross-modal information
|
90 |
+
alignment = await self._cross_modal_alignment(modalities, context)
|
91 |
+
|
92 |
+
# Integrate aligned information
|
93 |
+
integration = await self._integrated_analysis(alignment, context)
|
94 |
+
|
95 |
+
# Generate final response
|
96 |
+
response = await self._generate_response(integration, context)
|
97 |
+
|
98 |
+
return {
|
99 |
+
'answer': response.get('text', ''),
|
100 |
+
'confidence': self._calculate_confidence(integration),
|
101 |
+
'modalities': modalities,
|
102 |
+
'alignment': alignment,
|
103 |
+
'integration': integration
|
104 |
+
}
|
105 |
+
|
106 |
+
except Exception as e:
|
107 |
+
logging.error(f"Multimodal reasoning failed: {str(e)}")
|
108 |
+
return {
|
109 |
+
'error': f"Multimodal reasoning failed: {str(e)}",
|
110 |
+
'confidence': 0.0
|
111 |
+
}
|
112 |
+
|
113 |
+
async def _process_modalities(
|
114 |
+
self,
|
115 |
+
query: str,
|
116 |
+
context: Dict[str, Any]
|
117 |
+
) -> Dict[str, List[Dict[str, Any]]]:
|
118 |
+
"""Process query across different modalities."""
|
119 |
+
modalities = {}
|
120 |
+
|
121 |
+
# Process text
|
122 |
+
if 'text' in context:
|
123 |
+
modalities['text'] = self._process_text(context['text'])
|
124 |
+
|
125 |
+
# Process images
|
126 |
+
if 'images' in context:
|
127 |
+
modalities['image'] = self._process_images(context['images'])
|
128 |
+
|
129 |
+
# Process audio
|
130 |
+
if 'audio' in context:
|
131 |
+
modalities['audio'] = self._process_audio(context['audio'])
|
132 |
+
|
133 |
+
# Process video
|
134 |
+
if 'video' in context:
|
135 |
+
modalities['video'] = self._process_video(context['video'])
|
136 |
+
|
137 |
+
# Process structured data
|
138 |
+
if 'structured' in context:
|
139 |
+
modalities['structured'] = self._process_structured(context['structured'])
|
140 |
+
|
141 |
+
return modalities
|
142 |
+
|
143 |
+
async def _cross_modal_alignment(
|
144 |
+
self,
|
145 |
+
modalities: Dict[str, List[Dict[str, Any]]],
|
146 |
+
context: Dict[str, Any]
|
147 |
+
) -> List[Dict[str, Any]]:
|
148 |
+
"""Align information across different modalities."""
|
149 |
+
alignments = []
|
150 |
+
|
151 |
+
# Get all modality pairs
|
152 |
+
modality_pairs = [
|
153 |
+
(m1, m2) for i, m1 in enumerate(modalities.keys())
|
154 |
+
for m2 in list(modalities.keys())[i+1:]
|
155 |
+
]
|
156 |
+
|
157 |
+
# Align each pair
|
158 |
+
for mod1, mod2 in modality_pairs:
|
159 |
+
items1 = modalities[mod1]
|
160 |
+
items2 = modalities[mod2]
|
161 |
+
|
162 |
+
# Calculate cross-modal similarities
|
163 |
+
for item1 in items1:
|
164 |
+
for item2 in items2:
|
165 |
+
similarity = self._calculate_similarity(item1, item2)
|
166 |
+
if similarity > 0.7: # Alignment threshold
|
167 |
+
alignments.append({
|
168 |
+
'modality1': mod1,
|
169 |
+
'modality2': mod2,
|
170 |
+
'item1': item1,
|
171 |
+
'item2': item2,
|
172 |
+
'similarity': similarity
|
173 |
+
})
|
174 |
+
|
175 |
+
return alignments
|
176 |
+
|
177 |
+
def _calculate_similarity(
|
178 |
+
self,
|
179 |
+
item1: Dict[str, Any],
|
180 |
+
item2: Dict[str, Any]
|
181 |
+
) -> float:
|
182 |
+
"""Calculate similarity between two items from different modalities."""
|
183 |
+
# Simple feature overlap for now
|
184 |
+
features1 = set(str(v) for v in item1.values())
|
185 |
+
features2 = set(str(v) for v in item2.values())
|
186 |
+
|
187 |
+
if not features1 or not features2:
|
188 |
+
return 0.0
|
189 |
+
|
190 |
+
overlap = len(features1.intersection(features2))
|
191 |
+
total = len(features1.union(features2))
|
192 |
+
|
193 |
+
return overlap / total if total > 0 else 0.0
|
194 |
+
|
195 |
+
async def _integrated_analysis(
|
196 |
+
self,
|
197 |
+
alignment: List[Dict[str, Any]],
|
198 |
+
context: Dict[str, Any]
|
199 |
+
) -> List[Dict[str, Any]]:
|
200 |
+
"""Perform integrated analysis of aligned information."""
|
201 |
+
integrated = []
|
202 |
+
|
203 |
+
# Group alignments by similarity
|
204 |
+
similarity_groups = defaultdict(list)
|
205 |
+
for align in alignment:
|
206 |
+
similarity_groups[align['similarity']].append(align)
|
207 |
+
|
208 |
+
# Process groups in order of similarity
|
209 |
+
for similarity, group in sorted(
|
210 |
+
similarity_groups.items(),
|
211 |
+
key=lambda x: x[0],
|
212 |
+
reverse=True
|
213 |
+
):
|
214 |
+
# Combine aligned features
|
215 |
+
for align in group:
|
216 |
+
integrated.append({
|
217 |
+
'features': {
|
218 |
+
**align['item1'],
|
219 |
+
**align['item2']
|
220 |
+
},
|
221 |
+
'modalities': [align['modality1'], align['modality2']],
|
222 |
+
'confidence': align['similarity']
|
223 |
+
})
|
224 |
+
|
225 |
+
return integrated
|
226 |
+
|
227 |
+
async def _generate_response(
|
228 |
+
self,
|
229 |
+
integration: List[Dict[str, Any]],
|
230 |
+
context: Dict[str, Any]
|
231 |
+
) -> Dict[str, Any]:
|
232 |
+
"""Generate coherent response from integrated analysis."""
|
233 |
+
if not integration:
|
234 |
+
return {'text': '', 'confidence': 0.0}
|
235 |
+
|
236 |
+
# Combine all integrated features
|
237 |
+
all_features = {}
|
238 |
+
for item in integration:
|
239 |
+
all_features.update(item['features'])
|
240 |
+
|
241 |
+
# Generate response text
|
242 |
+
response_text = []
|
243 |
+
|
244 |
+
# Add main findings
|
245 |
+
response_text.append("Main findings across modalities:")
|
246 |
+
for feature, value in all_features.items():
|
247 |
+
response_text.append(f"- {feature}: {value}")
|
248 |
+
|
249 |
+
# Add confidence
|
250 |
+
confidence = sum(item['confidence'] for item in integration) / len(integration)
|
251 |
+
response_text.append(f"\nOverall confidence: {confidence:.2f}")
|
252 |
+
|
253 |
+
return {
|
254 |
+
'text': "\n".join(response_text),
|
255 |
+
'confidence': confidence
|
256 |
+
}
|
257 |
+
|
258 |
+
def _calculate_confidence(self, integration: List[Dict[str, Any]]) -> float:
|
259 |
+
"""Calculate overall confidence score."""
|
260 |
+
if not integration:
|
261 |
+
return 0.0
|
262 |
+
|
263 |
+
# Base confidence
|
264 |
+
confidence = 0.5
|
265 |
+
|
266 |
+
# Adjust based on number of modalities
|
267 |
+
unique_modalities = set()
|
268 |
+
for item in integration:
|
269 |
+
unique_modalities.update(item['modalities'])
|
270 |
+
|
271 |
+
modality_bonus = len(unique_modalities) * 0.1
|
272 |
+
confidence += min(modality_bonus, 0.3)
|
273 |
+
|
274 |
+
# Adjust based on integration quality
|
275 |
+
avg_similarity = sum(
|
276 |
+
item['confidence'] for item in integration
|
277 |
+
) / len(integration)
|
278 |
+
confidence += avg_similarity * 0.2
|
279 |
+
|
280 |
+
return min(confidence, 1.0)
|
281 |
+
|
282 |
+
def _process_text(self, text: str) -> List[Dict[str, Any]]:
|
283 |
+
"""Process text modality."""
|
284 |
+
# Simple text processing for now
|
285 |
+
return [{'text': text}]
|
286 |
+
|
287 |
+
def _process_images(self, images: List[str]) -> List[Dict[str, Any]]:
|
288 |
+
"""Process image modality."""
|
289 |
+
# Simple image processing for now
|
290 |
+
return [{'image': image} for image in images]
|
291 |
+
|
292 |
+
def _process_audio(self, audio: List[str]) -> List[Dict[str, Any]]:
|
293 |
+
"""Process audio modality."""
|
294 |
+
# Simple audio processing for now
|
295 |
+
return [{'audio': audio_file} for audio_file in audio]
|
296 |
+
|
297 |
+
def _process_video(self, video: List[str]) -> List[Dict[str, Any]]:
|
298 |
+
"""Process video modality."""
|
299 |
+
# Simple video processing for now
|
300 |
+
return [{'video': video_file} for video_file in video]
|
301 |
+
|
302 |
+
def _process_structured(self, structured: Dict[str, Any]) -> List[Dict[str, Any]]:
|
303 |
+
"""Process structured data modality."""
|
304 |
+
# Simple structured data processing for now
|
305 |
+
return [{'structured': structured}]
|
reasoning/neurosymbolic.py
ADDED
@@ -0,0 +1,316 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Advanced neurosymbolic reasoning combining neural and symbolic approaches."""
|
2 |
+
|
3 |
+
import logging
|
4 |
+
from typing import Dict, Any, List, Optional, Set, Union, Type, Tuple
|
5 |
+
import json
|
6 |
+
from dataclasses import dataclass, field
|
7 |
+
from enum import Enum
|
8 |
+
from datetime import datetime
|
9 |
+
import numpy as np
|
10 |
+
from collections import defaultdict
|
11 |
+
|
12 |
+
from .base import ReasoningStrategy
|
13 |
+
|
14 |
+
@dataclass
|
15 |
+
class NeuralFeature:
|
16 |
+
"""Neural features extracted from data."""
|
17 |
+
name: str
|
18 |
+
values: np.ndarray
|
19 |
+
importance: float
|
20 |
+
metadata: Dict[str, Any] = field(default_factory=dict)
|
21 |
+
|
22 |
+
@dataclass
|
23 |
+
class SymbolicRule:
|
24 |
+
"""Symbolic rule with conditions and confidence."""
|
25 |
+
name: str
|
26 |
+
conditions: List[str]
|
27 |
+
conclusion: str
|
28 |
+
confidence: float
|
29 |
+
metadata: Dict[str, Any] = field(default_factory=dict)
|
30 |
+
|
31 |
+
class NeurosymbolicReasoning(ReasoningStrategy):
|
32 |
+
"""
|
33 |
+
Advanced neurosymbolic reasoning that:
|
34 |
+
1. Extracts neural features
|
35 |
+
2. Generates symbolic rules
|
36 |
+
3. Combines approaches
|
37 |
+
4. Handles uncertainty
|
38 |
+
5. Provides interpretable results
|
39 |
+
"""
|
40 |
+
|
41 |
+
def __init__(self, config: Optional[Dict[str, Any]] = None):
|
42 |
+
"""Initialize neurosymbolic reasoning."""
|
43 |
+
super().__init__()
|
44 |
+
self.config = config or {}
|
45 |
+
|
46 |
+
# Standard reasoning parameters
|
47 |
+
self.min_confidence = self.config.get('min_confidence', 0.7)
|
48 |
+
self.parallel_threshold = self.config.get('parallel_threshold', 3)
|
49 |
+
self.learning_rate = self.config.get('learning_rate', 0.1)
|
50 |
+
self.strategy_weights = self.config.get('strategy_weights', {
|
51 |
+
"LOCAL_LLM": 0.8,
|
52 |
+
"CHAIN_OF_THOUGHT": 0.6,
|
53 |
+
"TREE_OF_THOUGHTS": 0.5,
|
54 |
+
"META_LEARNING": 0.4
|
55 |
+
})
|
56 |
+
|
57 |
+
# Neurosymbolic specific parameters
|
58 |
+
self.feature_threshold = self.config.get('feature_threshold', 0.1)
|
59 |
+
self.rule_confidence_threshold = self.config.get('rule_confidence', 0.7)
|
60 |
+
self.max_rules = self.config.get('max_rules', 10)
|
61 |
+
|
62 |
+
async def reason(self, query: str, context: Dict[str, Any]) -> Dict[str, Any]:
|
63 |
+
"""
|
64 |
+
Apply neurosymbolic reasoning to combine neural and symbolic approaches.
|
65 |
+
|
66 |
+
Args:
|
67 |
+
query: The input query to reason about
|
68 |
+
context: Additional context and parameters
|
69 |
+
|
70 |
+
Returns:
|
71 |
+
Dict containing reasoning results and confidence scores
|
72 |
+
"""
|
73 |
+
try:
|
74 |
+
# Extract neural features
|
75 |
+
features = await self._extract_features(query, context)
|
76 |
+
|
77 |
+
# Generate symbolic rules
|
78 |
+
rules = await self._generate_rules(features, context)
|
79 |
+
|
80 |
+
# Combine approaches
|
81 |
+
combined = await self._combine_approaches(features, rules, context)
|
82 |
+
|
83 |
+
# Generate analysis
|
84 |
+
analysis = await self._generate_analysis(combined, context)
|
85 |
+
|
86 |
+
return {
|
87 |
+
'answer': self._format_analysis(analysis),
|
88 |
+
'confidence': self._calculate_confidence(combined),
|
89 |
+
'features': features,
|
90 |
+
'rules': rules,
|
91 |
+
'combined': combined,
|
92 |
+
'analysis': analysis
|
93 |
+
}
|
94 |
+
|
95 |
+
except Exception as e:
|
96 |
+
logging.error(f"Neurosymbolic reasoning failed: {str(e)}")
|
97 |
+
return {
|
98 |
+
'error': f"Neurosymbolic reasoning failed: {str(e)}",
|
99 |
+
'confidence': 0.0
|
100 |
+
}
|
101 |
+
|
102 |
+
async def _extract_features(
|
103 |
+
self,
|
104 |
+
query: str,
|
105 |
+
context: Dict[str, Any]
|
106 |
+
) -> List[NeuralFeature]:
|
107 |
+
"""Extract neural features from input."""
|
108 |
+
features = []
|
109 |
+
|
110 |
+
# Extract key terms
|
111 |
+
terms = query.lower().split()
|
112 |
+
|
113 |
+
# Process each term
|
114 |
+
for term in terms:
|
115 |
+
# Simple feature extraction for now
|
116 |
+
values = np.random.randn(10) # Placeholder for real feature extraction
|
117 |
+
importance = np.abs(values).mean()
|
118 |
+
|
119 |
+
if importance > self.feature_threshold:
|
120 |
+
features.append(NeuralFeature(
|
121 |
+
name=term,
|
122 |
+
values=values,
|
123 |
+
importance=importance,
|
124 |
+
metadata={'source': 'term_extraction'}
|
125 |
+
))
|
126 |
+
|
127 |
+
# Sort by importance
|
128 |
+
features.sort(key=lambda x: x.importance, reverse=True)
|
129 |
+
|
130 |
+
return features
|
131 |
+
|
132 |
+
async def _generate_rules(
|
133 |
+
self,
|
134 |
+
features: List[NeuralFeature],
|
135 |
+
context: Dict[str, Any]
|
136 |
+
) -> List[SymbolicRule]:
|
137 |
+
"""Generate symbolic rules from features."""
|
138 |
+
rules = []
|
139 |
+
|
140 |
+
# Process feature combinations
|
141 |
+
for i, feature1 in enumerate(features):
|
142 |
+
for j, feature2 in enumerate(features[i+1:], i+1):
|
143 |
+
# Calculate correlation
|
144 |
+
correlation = np.corrcoef(feature1.values, feature2.values)[0, 1]
|
145 |
+
|
146 |
+
if abs(correlation) > self.rule_confidence_threshold:
|
147 |
+
# Create rule based on correlation
|
148 |
+
if correlation > 0:
|
149 |
+
condition = f"{feature1.name} AND {feature2.name}"
|
150 |
+
conclusion = "positively_correlated"
|
151 |
+
else:
|
152 |
+
condition = f"{feature1.name} XOR {feature2.name}"
|
153 |
+
conclusion = "negatively_correlated"
|
154 |
+
|
155 |
+
rules.append(SymbolicRule(
|
156 |
+
name=f"rule_{len(rules)}",
|
157 |
+
conditions=[condition],
|
158 |
+
conclusion=conclusion,
|
159 |
+
confidence=abs(correlation),
|
160 |
+
metadata={
|
161 |
+
'features': [feature1.name, feature2.name],
|
162 |
+
'correlation': correlation
|
163 |
+
}
|
164 |
+
))
|
165 |
+
|
166 |
+
if len(rules) >= self.max_rules:
|
167 |
+
break
|
168 |
+
|
169 |
+
if len(rules) >= self.max_rules:
|
170 |
+
break
|
171 |
+
|
172 |
+
return rules
|
173 |
+
|
174 |
+
async def _combine_approaches(
|
175 |
+
self,
|
176 |
+
features: List[NeuralFeature],
|
177 |
+
rules: List[SymbolicRule],
|
178 |
+
context: Dict[str, Any]
|
179 |
+
) -> Dict[str, Any]:
|
180 |
+
"""Combine neural and symbolic approaches."""
|
181 |
+
combined = {
|
182 |
+
'neural_weights': {},
|
183 |
+
'symbolic_weights': {},
|
184 |
+
'combined_scores': {}
|
185 |
+
}
|
186 |
+
|
187 |
+
# Calculate neural weights
|
188 |
+
total_importance = sum(f.importance for f in features)
|
189 |
+
if total_importance > 0:
|
190 |
+
combined['neural_weights'] = {
|
191 |
+
f.name: f.importance / total_importance
|
192 |
+
for f in features
|
193 |
+
}
|
194 |
+
|
195 |
+
# Calculate symbolic weights
|
196 |
+
total_confidence = sum(r.confidence for r in rules)
|
197 |
+
if total_confidence > 0:
|
198 |
+
combined['symbolic_weights'] = {
|
199 |
+
r.name: r.confidence / total_confidence
|
200 |
+
for r in rules
|
201 |
+
}
|
202 |
+
|
203 |
+
# Combine scores
|
204 |
+
all_elements = set(
|
205 |
+
list(combined['neural_weights'].keys()) +
|
206 |
+
list(combined['symbolic_weights'].keys())
|
207 |
+
)
|
208 |
+
|
209 |
+
for element in all_elements:
|
210 |
+
neural_score = combined['neural_weights'].get(element, 0)
|
211 |
+
symbolic_score = combined['symbolic_weights'].get(element, 0)
|
212 |
+
|
213 |
+
# Simple weighted average
|
214 |
+
combined['combined_scores'][element] = (
|
215 |
+
neural_score * 0.6 + # Favor neural slightly
|
216 |
+
symbolic_score * 0.4
|
217 |
+
)
|
218 |
+
|
219 |
+
return combined
|
220 |
+
|
221 |
+
async def _generate_analysis(
|
222 |
+
self,
|
223 |
+
combined: Dict[str, Any],
|
224 |
+
context: Dict[str, Any]
|
225 |
+
) -> Dict[str, Any]:
|
226 |
+
"""Generate neurosymbolic analysis."""
|
227 |
+
# Sort elements by combined score
|
228 |
+
ranked_elements = sorted(
|
229 |
+
combined['combined_scores'].items(),
|
230 |
+
key=lambda x: x[1],
|
231 |
+
reverse=True
|
232 |
+
)
|
233 |
+
|
234 |
+
# Calculate statistics
|
235 |
+
scores = list(combined['combined_scores'].values())
|
236 |
+
mean = np.mean(scores) if scores else 0
|
237 |
+
std = np.std(scores) if scores else 0
|
238 |
+
|
239 |
+
# Calculate entropy
|
240 |
+
entropy = -sum(
|
241 |
+
s * np.log2(s) if s > 0 else 0
|
242 |
+
for s in combined['combined_scores'].values()
|
243 |
+
)
|
244 |
+
|
245 |
+
return {
|
246 |
+
'top_element': ranked_elements[0][0] if ranked_elements else '',
|
247 |
+
'score': ranked_elements[0][1] if ranked_elements else 0,
|
248 |
+
'alternatives': [
|
249 |
+
{'name': name, 'score': score}
|
250 |
+
for name, score in ranked_elements[1:]
|
251 |
+
],
|
252 |
+
'statistics': {
|
253 |
+
'mean': mean,
|
254 |
+
'std': std,
|
255 |
+
'entropy': entropy
|
256 |
+
}
|
257 |
+
}
|
258 |
+
|
259 |
+
def _format_analysis(self, analysis: Dict[str, Any]) -> str:
|
260 |
+
"""Format analysis into readable text."""
|
261 |
+
sections = []
|
262 |
+
|
263 |
+
# Top element
|
264 |
+
if analysis['top_element']:
|
265 |
+
sections.append(
|
266 |
+
f"Most significant element: {analysis['top_element']} "
|
267 |
+
f"(score: {analysis['score']:.2%})"
|
268 |
+
)
|
269 |
+
|
270 |
+
# Alternative elements
|
271 |
+
if analysis['alternatives']:
|
272 |
+
sections.append("\nAlternative elements:")
|
273 |
+
for alt in analysis['alternatives']:
|
274 |
+
sections.append(
|
275 |
+
f"- {alt['name']}: {alt['score']:.2%}"
|
276 |
+
)
|
277 |
+
|
278 |
+
# Statistics
|
279 |
+
stats = analysis['statistics']
|
280 |
+
sections.append("\nAnalysis statistics:")
|
281 |
+
sections.append(f"- Mean score: {stats['mean']:.2%}")
|
282 |
+
sections.append(f"- Standard deviation: {stats['std']:.2%}")
|
283 |
+
sections.append(f"- Information entropy: {stats['entropy']:.2f} bits")
|
284 |
+
|
285 |
+
return "\n".join(sections)
|
286 |
+
|
287 |
+
def _calculate_confidence(self, combined: Dict[str, Any]) -> float:
|
288 |
+
"""Calculate overall confidence score."""
|
289 |
+
if not combined['combined_scores']:
|
290 |
+
return 0.0
|
291 |
+
|
292 |
+
# Base confidence
|
293 |
+
confidence = 0.5
|
294 |
+
|
295 |
+
# Get scores
|
296 |
+
scores = list(combined['combined_scores'].values())
|
297 |
+
|
298 |
+
# Strong leading score increases confidence
|
299 |
+
max_score = max(scores)
|
300 |
+
if max_score > 0.8:
|
301 |
+
confidence += 0.3
|
302 |
+
elif max_score > 0.6:
|
303 |
+
confidence += 0.2
|
304 |
+
elif max_score > 0.4:
|
305 |
+
confidence += 0.1
|
306 |
+
|
307 |
+
# Low entropy (clear distinction) increases confidence
|
308 |
+
entropy = -sum(s * np.log2(s) if s > 0 else 0 for s in scores)
|
309 |
+
max_entropy = -np.log2(1/len(scores)) # Maximum possible entropy
|
310 |
+
|
311 |
+
if entropy < 0.3 * max_entropy:
|
312 |
+
confidence += 0.2
|
313 |
+
elif entropy < 0.6 * max_entropy:
|
314 |
+
confidence += 0.1
|
315 |
+
|
316 |
+
return min(confidence, 1.0)
|
reasoning/portfolio_optimization.py
ADDED
@@ -0,0 +1,549 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Advanced portfolio optimization for venture strategies."""
|
2 |
+
|
3 |
+
import logging
|
4 |
+
from typing import Dict, Any, List, Optional, Set, Union, Type, Tuple
|
5 |
+
import json
|
6 |
+
from dataclasses import dataclass, field
|
7 |
+
from enum import Enum
|
8 |
+
from datetime import datetime
|
9 |
+
import numpy as np
|
10 |
+
from collections import defaultdict
|
11 |
+
|
12 |
+
from .base import ReasoningStrategy
|
13 |
+
|
14 |
+
@dataclass
|
15 |
+
class VentureMetrics:
|
16 |
+
"""Venture performance metrics."""
|
17 |
+
revenue: float
|
18 |
+
profit: float
|
19 |
+
growth_rate: float
|
20 |
+
risk_score: float
|
21 |
+
resource_usage: Dict[str, float]
|
22 |
+
synergy_score: float
|
23 |
+
|
24 |
+
@dataclass
|
25 |
+
class ResourceAllocation:
|
26 |
+
"""Resource allocation configuration."""
|
27 |
+
venture_id: str
|
28 |
+
resources: Dict[str, float]
|
29 |
+
constraints: List[str]
|
30 |
+
dependencies: List[str]
|
31 |
+
priority: float
|
32 |
+
|
33 |
+
class PortfolioOptimizer:
|
34 |
+
"""
|
35 |
+
Advanced portfolio optimization that:
|
36 |
+
1. Optimizes venture mix
|
37 |
+
2. Allocates resources
|
38 |
+
3. Manages risks
|
39 |
+
4. Maximizes synergies
|
40 |
+
5. Balances growth
|
41 |
+
"""
|
42 |
+
|
43 |
+
def __init__(self):
|
44 |
+
self.ventures: Dict[str, VentureMetrics] = {}
|
45 |
+
self.allocations: Dict[str, ResourceAllocation] = {}
|
46 |
+
|
47 |
+
async def optimize_portfolio(self,
|
48 |
+
ventures: List[str],
|
49 |
+
context: Dict[str, Any]) -> Dict[str, Any]:
|
50 |
+
"""Optimize venture portfolio."""
|
51 |
+
try:
|
52 |
+
# Analyze ventures
|
53 |
+
analysis = await self._analyze_ventures(ventures, context)
|
54 |
+
|
55 |
+
# Optimize allocation
|
56 |
+
allocation = await self._optimize_allocation(analysis, context)
|
57 |
+
|
58 |
+
# Risk optimization
|
59 |
+
risk = await self._optimize_risk(allocation, context)
|
60 |
+
|
61 |
+
# Synergy optimization
|
62 |
+
synergy = await self._optimize_synergies(risk, context)
|
63 |
+
|
64 |
+
# Performance projections
|
65 |
+
projections = await self._project_performance(synergy, context)
|
66 |
+
|
67 |
+
return {
|
68 |
+
"success": projections["annual_profit"] >= 1_000_000,
|
69 |
+
"analysis": analysis,
|
70 |
+
"allocation": allocation,
|
71 |
+
"risk": risk,
|
72 |
+
"synergy": synergy,
|
73 |
+
"projections": projections
|
74 |
+
}
|
75 |
+
except Exception as e:
|
76 |
+
logging.error(f"Error in portfolio optimization: {str(e)}")
|
77 |
+
return {"success": False, "error": str(e)}
|
78 |
+
|
79 |
+
async def _analyze_ventures(self,
|
80 |
+
ventures: List[str],
|
81 |
+
context: Dict[str, Any]) -> Dict[str, Any]:
|
82 |
+
"""Analyze venture characteristics."""
|
83 |
+
prompt = f"""
|
84 |
+
Analyze ventures:
|
85 |
+
Ventures: {json.dumps(ventures)}
|
86 |
+
Context: {json.dumps(context)}
|
87 |
+
|
88 |
+
Analyze:
|
89 |
+
1. Performance metrics
|
90 |
+
2. Resource requirements
|
91 |
+
3. Risk factors
|
92 |
+
4. Growth potential
|
93 |
+
5. Synergy opportunities
|
94 |
+
|
95 |
+
Format as:
|
96 |
+
[Venture1]
|
97 |
+
Metrics: ...
|
98 |
+
Resources: ...
|
99 |
+
Risks: ...
|
100 |
+
Growth: ...
|
101 |
+
Synergies: ...
|
102 |
+
"""
|
103 |
+
|
104 |
+
response = await context["groq_api"].predict(prompt)
|
105 |
+
return self._parse_venture_analysis(response["answer"])
|
106 |
+
|
107 |
+
async def _optimize_allocation(self,
|
108 |
+
analysis: Dict[str, Any],
|
109 |
+
context: Dict[str, Any]) -> Dict[str, Any]:
|
110 |
+
"""Optimize resource allocation."""
|
111 |
+
prompt = f"""
|
112 |
+
Optimize resource allocation:
|
113 |
+
Analysis: {json.dumps(analysis)}
|
114 |
+
Context: {json.dumps(context)}
|
115 |
+
|
116 |
+
Optimize for:
|
117 |
+
1. Resource efficiency
|
118 |
+
2. Growth potential
|
119 |
+
3. Risk balance
|
120 |
+
4. Synergy capture
|
121 |
+
5. Constraint satisfaction
|
122 |
+
|
123 |
+
Format as:
|
124 |
+
[Allocation1]
|
125 |
+
Venture: ...
|
126 |
+
Resources: ...
|
127 |
+
Constraints: ...
|
128 |
+
Dependencies: ...
|
129 |
+
Priority: ...
|
130 |
+
"""
|
131 |
+
|
132 |
+
response = await context["groq_api"].predict(prompt)
|
133 |
+
return self._parse_allocation_optimization(response["answer"])
|
134 |
+
|
135 |
+
async def _optimize_risk(self,
|
136 |
+
allocation: Dict[str, Any],
|
137 |
+
context: Dict[str, Any]) -> Dict[str, Any]:
|
138 |
+
"""Optimize risk management."""
|
139 |
+
prompt = f"""
|
140 |
+
Optimize risk management:
|
141 |
+
Allocation: {json.dumps(allocation)}
|
142 |
+
Context: {json.dumps(context)}
|
143 |
+
|
144 |
+
Optimize for:
|
145 |
+
1. Risk diversification
|
146 |
+
2. Exposure limits
|
147 |
+
3. Correlation management
|
148 |
+
4. Hedging strategies
|
149 |
+
5. Contingency planning
|
150 |
+
|
151 |
+
Format as:
|
152 |
+
[Risk1]
|
153 |
+
Type: ...
|
154 |
+
Exposure: ...
|
155 |
+
Mitigation: ...
|
156 |
+
Contingency: ...
|
157 |
+
Impact: ...
|
158 |
+
"""
|
159 |
+
|
160 |
+
response = await context["groq_api"].predict(prompt)
|
161 |
+
return self._parse_risk_optimization(response["answer"])
|
162 |
+
|
163 |
+
async def _optimize_synergies(self,
|
164 |
+
risk: Dict[str, Any],
|
165 |
+
context: Dict[str, Any]) -> Dict[str, Any]:
|
166 |
+
"""Optimize portfolio synergies."""
|
167 |
+
prompt = f"""
|
168 |
+
Optimize synergies:
|
169 |
+
Risk: {json.dumps(risk)}
|
170 |
+
Context: {json.dumps(context)}
|
171 |
+
|
172 |
+
Optimize for:
|
173 |
+
1. Resource sharing
|
174 |
+
2. Knowledge transfer
|
175 |
+
3. Market leverage
|
176 |
+
4. Technology reuse
|
177 |
+
5. Customer cross-sell
|
178 |
+
|
179 |
+
Format as:
|
180 |
+
[Synergy1]
|
181 |
+
Type: ...
|
182 |
+
Ventures: ...
|
183 |
+
Potential: ...
|
184 |
+
Requirements: ...
|
185 |
+
Timeline: ...
|
186 |
+
"""
|
187 |
+
|
188 |
+
response = await context["groq_api"].predict(prompt)
|
189 |
+
return self._parse_synergy_optimization(response["answer"])
|
190 |
+
|
191 |
+
async def _project_performance(self,
|
192 |
+
synergy: Dict[str, Any],
|
193 |
+
context: Dict[str, Any]) -> Dict[str, Any]:
|
194 |
+
"""Project portfolio performance."""
|
195 |
+
prompt = f"""
|
196 |
+
Project performance:
|
197 |
+
Synergy: {json.dumps(synergy)}
|
198 |
+
Context: {json.dumps(context)}
|
199 |
+
|
200 |
+
Project:
|
201 |
+
1. Revenue growth
|
202 |
+
2. Profit margins
|
203 |
+
3. Resource utilization
|
204 |
+
4. Risk metrics
|
205 |
+
5. Synergy capture
|
206 |
+
|
207 |
+
Format as:
|
208 |
+
[Projections]
|
209 |
+
Revenue: ...
|
210 |
+
Profit: ...
|
211 |
+
Resources: ...
|
212 |
+
Risk: ...
|
213 |
+
Synergies: ...
|
214 |
+
"""
|
215 |
+
|
216 |
+
response = await context["groq_api"].predict(prompt)
|
217 |
+
return self._parse_performance_projections(response["answer"])
|
218 |
+
|
219 |
+
def _calculate_portfolio_metrics(self) -> Dict[str, float]:
|
220 |
+
"""Calculate comprehensive portfolio metrics."""
|
221 |
+
if not self.ventures:
|
222 |
+
return {
|
223 |
+
"total_revenue": 0.0,
|
224 |
+
"total_profit": 0.0,
|
225 |
+
"avg_growth": 0.0,
|
226 |
+
"avg_risk": 0.0,
|
227 |
+
"resource_efficiency": 0.0,
|
228 |
+
"synergy_capture": 0.0
|
229 |
+
}
|
230 |
+
|
231 |
+
metrics = {
|
232 |
+
"total_revenue": sum(v.revenue for v in self.ventures.values()),
|
233 |
+
"total_profit": sum(v.profit for v in self.ventures.values()),
|
234 |
+
"avg_growth": np.mean([v.growth_rate for v in self.ventures.values()]),
|
235 |
+
"avg_risk": np.mean([v.risk_score for v in self.ventures.values()]),
|
236 |
+
"resource_efficiency": self._calculate_resource_efficiency(),
|
237 |
+
"synergy_capture": np.mean([v.synergy_score for v in self.ventures.values()])
|
238 |
+
}
|
239 |
+
|
240 |
+
return metrics
|
241 |
+
|
242 |
+
def _calculate_resource_efficiency(self) -> float:
|
243 |
+
"""Calculate resource utilization efficiency."""
|
244 |
+
if not self.ventures or not self.allocations:
|
245 |
+
return 0.0
|
246 |
+
|
247 |
+
total_resources = defaultdict(float)
|
248 |
+
used_resources = defaultdict(float)
|
249 |
+
|
250 |
+
# Sum up total and used resources
|
251 |
+
for venture_id, allocation in self.allocations.items():
|
252 |
+
for resource, amount in allocation.resources.items():
|
253 |
+
total_resources[resource] += amount
|
254 |
+
if venture_id in self.ventures:
|
255 |
+
used_resources[resource] += (
|
256 |
+
amount * self.ventures[venture_id].resource_usage.get(resource, 0)
|
257 |
+
)
|
258 |
+
|
259 |
+
# Calculate efficiency for each resource
|
260 |
+
efficiencies = []
|
261 |
+
for resource in total_resources:
|
262 |
+
if total_resources[resource] > 0:
|
263 |
+
efficiency = used_resources[resource] / total_resources[resource]
|
264 |
+
efficiencies.append(efficiency)
|
265 |
+
|
266 |
+
return np.mean(efficiencies) if efficiencies else 0.0
|
267 |
+
|
268 |
+
def get_portfolio_insights(self) -> Dict[str, Any]:
|
269 |
+
"""Get comprehensive portfolio insights."""
|
270 |
+
metrics = self._calculate_portfolio_metrics()
|
271 |
+
|
272 |
+
return {
|
273 |
+
"portfolio_metrics": metrics,
|
274 |
+
"venture_metrics": {
|
275 |
+
venture_id: {
|
276 |
+
"revenue": v.revenue,
|
277 |
+
"profit": v.profit,
|
278 |
+
"growth_rate": v.growth_rate,
|
279 |
+
"risk_score": v.risk_score,
|
280 |
+
"synergy_score": v.synergy_score
|
281 |
+
}
|
282 |
+
for venture_id, v in self.ventures.items()
|
283 |
+
},
|
284 |
+
"resource_allocation": {
|
285 |
+
venture_id: {
|
286 |
+
"resources": a.resources,
|
287 |
+
"priority": a.priority,
|
288 |
+
"constraints": len(a.constraints),
|
289 |
+
"dependencies": len(a.dependencies)
|
290 |
+
}
|
291 |
+
for venture_id, a in self.allocations.items()
|
292 |
+
},
|
293 |
+
"risk_profile": {
|
294 |
+
"portfolio_risk": metrics["avg_risk"],
|
295 |
+
"risk_concentration": self._calculate_risk_concentration(),
|
296 |
+
"risk_correlation": self._calculate_risk_correlation()
|
297 |
+
},
|
298 |
+
"optimization_opportunities": self._identify_optimization_opportunities()
|
299 |
+
}
|
300 |
+
|
301 |
+
def _calculate_risk_concentration(self) -> float:
|
302 |
+
"""Calculate risk concentration in portfolio."""
|
303 |
+
if not self.ventures:
|
304 |
+
return 0.0
|
305 |
+
|
306 |
+
risk_weights = [v.risk_score for v in self.ventures.values()]
|
307 |
+
return np.std(risk_weights) if len(risk_weights) > 1 else 0.0
|
308 |
+
|
309 |
+
def _calculate_risk_correlation(self) -> float:
|
310 |
+
"""Calculate risk correlation between ventures."""
|
311 |
+
if len(self.ventures) < 2:
|
312 |
+
return 0.0
|
313 |
+
|
314 |
+
# Create correlation matrix of risk scores and resource usage
|
315 |
+
venture_metrics = [
|
316 |
+
[v.risk_score] + list(v.resource_usage.values())
|
317 |
+
for v in self.ventures.values()
|
318 |
+
]
|
319 |
+
|
320 |
+
correlation_matrix = np.corrcoef(venture_metrics)
|
321 |
+
return np.mean(correlation_matrix[np.triu_indices_from(correlation_matrix, k=1)])
|
322 |
+
|
323 |
+
def _identify_optimization_opportunities(self) -> List[Dict[str, Any]]:
|
324 |
+
"""Identify portfolio optimization opportunities."""
|
325 |
+
opportunities = []
|
326 |
+
|
327 |
+
# Resource optimization opportunities
|
328 |
+
resource_efficiency = self._calculate_resource_efficiency()
|
329 |
+
if resource_efficiency < 0.8:
|
330 |
+
opportunities.append({
|
331 |
+
"type": "resource_optimization",
|
332 |
+
"potential": 1.0 - resource_efficiency,
|
333 |
+
"description": "Improve resource utilization efficiency"
|
334 |
+
})
|
335 |
+
|
336 |
+
# Risk optimization opportunities
|
337 |
+
risk_concentration = self._calculate_risk_concentration()
|
338 |
+
if risk_concentration > 0.2:
|
339 |
+
opportunities.append({
|
340 |
+
"type": "risk_diversification",
|
341 |
+
"potential": risk_concentration,
|
342 |
+
"description": "Reduce risk concentration"
|
343 |
+
})
|
344 |
+
|
345 |
+
# Synergy optimization opportunities
|
346 |
+
avg_synergy = np.mean([v.synergy_score for v in self.ventures.values()]) if self.ventures else 0
|
347 |
+
if avg_synergy < 0.7:
|
348 |
+
opportunities.append({
|
349 |
+
"type": "synergy_capture",
|
350 |
+
"potential": 1.0 - avg_synergy,
|
351 |
+
"description": "Increase synergy capture"
|
352 |
+
})
|
353 |
+
|
354 |
+
return opportunities
|
355 |
+
|
356 |
+
class PortfolioOptimizationStrategy(ReasoningStrategy):
|
357 |
+
"""
|
358 |
+
Advanced portfolio optimization strategy that:
|
359 |
+
1. Analyzes venture metrics
|
360 |
+
2. Optimizes resource allocation
|
361 |
+
3. Balances risk-reward
|
362 |
+
4. Maximizes portfolio synergies
|
363 |
+
5. Provides actionable recommendations
|
364 |
+
"""
|
365 |
+
|
366 |
+
def __init__(self, config: Optional[Dict[str, Any]] = None):
|
367 |
+
"""Initialize portfolio optimization strategy."""
|
368 |
+
super().__init__()
|
369 |
+
self.config = config or {}
|
370 |
+
self.optimizer = PortfolioOptimizer()
|
371 |
+
|
372 |
+
async def reason(self, query: str, context: Dict[str, Any]) -> Dict[str, Any]:
|
373 |
+
"""
|
374 |
+
Generate portfolio optimization strategy based on query and context.
|
375 |
+
|
376 |
+
Args:
|
377 |
+
query: The portfolio optimization query
|
378 |
+
context: Additional context and parameters
|
379 |
+
|
380 |
+
Returns:
|
381 |
+
Dict containing optimization strategy and confidence scores
|
382 |
+
"""
|
383 |
+
try:
|
384 |
+
# Extract portfolio parameters
|
385 |
+
params = self._extract_parameters(query, context)
|
386 |
+
|
387 |
+
# Optimize portfolio
|
388 |
+
optimization_result = self.optimizer.optimize_portfolio(
|
389 |
+
ventures=params.get('ventures', []),
|
390 |
+
constraints=params.get('constraints', []),
|
391 |
+
objectives=params.get('objectives', [])
|
392 |
+
)
|
393 |
+
|
394 |
+
# Get metrics
|
395 |
+
metrics = self.optimizer.get_portfolio_metrics()
|
396 |
+
|
397 |
+
# Generate recommendations
|
398 |
+
recommendations = self._generate_recommendations(
|
399 |
+
optimization_result,
|
400 |
+
metrics
|
401 |
+
)
|
402 |
+
|
403 |
+
return {
|
404 |
+
'answer': self._format_strategy(optimization_result, metrics, recommendations),
|
405 |
+
'confidence': self._calculate_confidence(optimization_result),
|
406 |
+
'optimization': optimization_result,
|
407 |
+
'metrics': metrics,
|
408 |
+
'recommendations': recommendations
|
409 |
+
}
|
410 |
+
|
411 |
+
except Exception as e:
|
412 |
+
logging.error(f"Portfolio optimization failed: {str(e)}")
|
413 |
+
return {
|
414 |
+
'error': f"Portfolio optimization failed: {str(e)}",
|
415 |
+
'confidence': 0.0
|
416 |
+
}
|
417 |
+
|
418 |
+
def _extract_parameters(self, query: str, context: Dict[str, Any]) -> Dict[str, Any]:
|
419 |
+
"""Extract optimization parameters from query and context."""
|
420 |
+
params = {}
|
421 |
+
|
422 |
+
# Extract ventures
|
423 |
+
if 'ventures' in context:
|
424 |
+
params['ventures'] = context['ventures']
|
425 |
+
else:
|
426 |
+
# Default empty portfolio
|
427 |
+
params['ventures'] = []
|
428 |
+
|
429 |
+
# Extract constraints
|
430 |
+
if 'constraints' in context:
|
431 |
+
params['constraints'] = context['constraints']
|
432 |
+
else:
|
433 |
+
# Default constraints
|
434 |
+
params['constraints'] = [
|
435 |
+
'budget_limit',
|
436 |
+
'risk_tolerance',
|
437 |
+
'resource_capacity'
|
438 |
+
]
|
439 |
+
|
440 |
+
# Extract objectives
|
441 |
+
if 'objectives' in context:
|
442 |
+
params['objectives'] = context['objectives']
|
443 |
+
else:
|
444 |
+
# Default objectives
|
445 |
+
params['objectives'] = [
|
446 |
+
'maximize_returns',
|
447 |
+
'minimize_risk',
|
448 |
+
'maximize_synergies'
|
449 |
+
]
|
450 |
+
|
451 |
+
return params
|
452 |
+
|
453 |
+
def _generate_recommendations(
|
454 |
+
self,
|
455 |
+
optimization_result: Dict[str, Any],
|
456 |
+
metrics: Dict[str, Any]
|
457 |
+
) -> List[str]:
|
458 |
+
"""Generate actionable recommendations."""
|
459 |
+
recommendations = []
|
460 |
+
|
461 |
+
# Portfolio composition recommendations
|
462 |
+
if 'allocation' in optimization_result:
|
463 |
+
allocation = optimization_result['allocation']
|
464 |
+
recommendations.extend([
|
465 |
+
f"Allocate {alloc['percentage']:.1f}% to {alloc['venture']}"
|
466 |
+
for alloc in allocation
|
467 |
+
])
|
468 |
+
|
469 |
+
# Risk management recommendations
|
470 |
+
if 'risk_analysis' in metrics:
|
471 |
+
risk = metrics['risk_analysis']
|
472 |
+
if risk.get('total_risk', 0) > 0.7:
|
473 |
+
recommendations.append(
|
474 |
+
"Consider reducing exposure to high-risk ventures"
|
475 |
+
)
|
476 |
+
if risk.get('correlation', 0) > 0.8:
|
477 |
+
recommendations.append(
|
478 |
+
"Increase portfolio diversification to reduce correlation"
|
479 |
+
)
|
480 |
+
|
481 |
+
# Performance optimization recommendations
|
482 |
+
if 'performance' in metrics:
|
483 |
+
perf = metrics['performance']
|
484 |
+
if perf.get('sharpe_ratio', 0) < 1.0:
|
485 |
+
recommendations.append(
|
486 |
+
"Optimize risk-adjusted returns through better venture selection"
|
487 |
+
)
|
488 |
+
if perf.get('efficiency', 0) < 0.8:
|
489 |
+
recommendations.append(
|
490 |
+
"Improve resource allocation efficiency across ventures"
|
491 |
+
)
|
492 |
+
|
493 |
+
return recommendations
|
494 |
+
|
495 |
+
def _calculate_confidence(self, optimization_result: Dict[str, Any]) -> float:
|
496 |
+
"""Calculate confidence score based on optimization quality."""
|
497 |
+
# Base confidence
|
498 |
+
confidence = 0.5
|
499 |
+
|
500 |
+
# Adjust based on optimization completeness
|
501 |
+
if optimization_result.get('allocation'):
|
502 |
+
confidence += 0.1
|
503 |
+
if optimization_result.get('risk_analysis'):
|
504 |
+
confidence += 0.1
|
505 |
+
if optimization_result.get('performance_metrics'):
|
506 |
+
confidence += 0.1
|
507 |
+
|
508 |
+
# Adjust based on solution quality
|
509 |
+
if optimization_result.get('convergence_status') == 'optimal':
|
510 |
+
confidence += 0.2
|
511 |
+
elif optimization_result.get('convergence_status') == 'suboptimal':
|
512 |
+
confidence += 0.1
|
513 |
+
|
514 |
+
return min(confidence, 1.0)
|
515 |
+
|
516 |
+
def _format_strategy(
|
517 |
+
self,
|
518 |
+
optimization_result: Dict[str, Any],
|
519 |
+
metrics: Dict[str, Any],
|
520 |
+
recommendations: List[str]
|
521 |
+
) -> str:
|
522 |
+
"""Format optimization strategy into readable text."""
|
523 |
+
sections = []
|
524 |
+
|
525 |
+
# Portfolio allocation
|
526 |
+
if 'allocation' in optimization_result:
|
527 |
+
allocation = optimization_result['allocation']
|
528 |
+
sections.append("Portfolio Allocation:")
|
529 |
+
for alloc in allocation:
|
530 |
+
sections.append(
|
531 |
+
f"- {alloc['venture']}: {alloc['percentage']:.1f}%"
|
532 |
+
)
|
533 |
+
|
534 |
+
# Key metrics
|
535 |
+
if metrics:
|
536 |
+
sections.append("\nKey Metrics:")
|
537 |
+
for key, value in metrics.items():
|
538 |
+
if isinstance(value, (int, float)):
|
539 |
+
sections.append(f"- {key.replace('_', ' ').title()}: {value:.2f}")
|
540 |
+
else:
|
541 |
+
sections.append(f"- {key.replace('_', ' ').title()}: {value}")
|
542 |
+
|
543 |
+
# Recommendations
|
544 |
+
if recommendations:
|
545 |
+
sections.append("\nKey Recommendations:")
|
546 |
+
for rec in recommendations:
|
547 |
+
sections.append(f"- {rec}")
|
548 |
+
|
549 |
+
return "\n".join(sections)
|
reasoning/quantum.py
ADDED
@@ -0,0 +1,372 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Quantum-inspired reasoning implementations."""
|
2 |
+
|
3 |
+
import logging
|
4 |
+
from typing import Dict, Any, List, Optional, Set, Union, Type, Tuple
|
5 |
+
import json
|
6 |
+
from dataclasses import dataclass, field
|
7 |
+
from enum import Enum
|
8 |
+
from datetime import datetime
|
9 |
+
import numpy as np
|
10 |
+
from collections import defaultdict
|
11 |
+
|
12 |
+
from .base import ReasoningStrategy
|
13 |
+
|
14 |
+
@dataclass
|
15 |
+
class QuantumState:
|
16 |
+
"""Quantum state with superposition and entanglement."""
|
17 |
+
name: str
|
18 |
+
amplitude: complex
|
19 |
+
phase: float
|
20 |
+
entangled_states: List[str] = field(default_factory=list)
|
21 |
+
|
22 |
+
class QuantumReasoning(ReasoningStrategy):
|
23 |
+
"""
|
24 |
+
Advanced quantum reasoning that:
|
25 |
+
1. Creates quantum states
|
26 |
+
2. Applies quantum operations
|
27 |
+
3. Measures outcomes
|
28 |
+
4. Handles superposition
|
29 |
+
5. Models entanglement
|
30 |
+
"""
|
31 |
+
|
32 |
+
def __init__(self, config: Optional[Dict[str, Any]] = None):
|
33 |
+
"""Initialize quantum reasoning."""
|
34 |
+
super().__init__()
|
35 |
+
self.config = config or {}
|
36 |
+
|
37 |
+
# Standard reasoning parameters
|
38 |
+
self.min_confidence = self.config.get('min_confidence', 0.7)
|
39 |
+
self.parallel_threshold = self.config.get('parallel_threshold', 3)
|
40 |
+
self.learning_rate = self.config.get('learning_rate', 0.1)
|
41 |
+
self.strategy_weights = self.config.get('strategy_weights', {
|
42 |
+
"LOCAL_LLM": 0.8,
|
43 |
+
"CHAIN_OF_THOUGHT": 0.6,
|
44 |
+
"TREE_OF_THOUGHTS": 0.5,
|
45 |
+
"META_LEARNING": 0.4
|
46 |
+
})
|
47 |
+
|
48 |
+
# Configure quantum parameters
|
49 |
+
self.num_qubits = self.config.get('num_qubits', 3)
|
50 |
+
self.measurement_threshold = self.config.get('measurement_threshold', 0.1)
|
51 |
+
self.decoherence_rate = self.config.get('decoherence_rate', 0.01)
|
52 |
+
|
53 |
+
async def reason(self, query: str, context: Dict[str, Any]) -> Dict[str, Any]:
|
54 |
+
"""
|
55 |
+
Apply quantum reasoning to analyze complex decisions.
|
56 |
+
|
57 |
+
Args:
|
58 |
+
query: The input query to reason about
|
59 |
+
context: Additional context and parameters
|
60 |
+
|
61 |
+
Returns:
|
62 |
+
Dict containing reasoning results and confidence scores
|
63 |
+
"""
|
64 |
+
try:
|
65 |
+
# Initialize quantum states
|
66 |
+
states = await self._initialize_states(query, context)
|
67 |
+
|
68 |
+
# Apply quantum operations
|
69 |
+
evolved_states = await self._apply_operations(states, context)
|
70 |
+
|
71 |
+
# Measure outcomes
|
72 |
+
measurements = await self._measure_states(evolved_states, context)
|
73 |
+
|
74 |
+
# Generate analysis
|
75 |
+
analysis = await self._generate_analysis(measurements, context)
|
76 |
+
|
77 |
+
return {
|
78 |
+
'answer': self._format_analysis(analysis),
|
79 |
+
'confidence': self._calculate_confidence(measurements),
|
80 |
+
'states': states,
|
81 |
+
'evolved_states': evolved_states,
|
82 |
+
'measurements': measurements,
|
83 |
+
'analysis': analysis
|
84 |
+
}
|
85 |
+
|
86 |
+
except Exception as e:
|
87 |
+
logging.error(f"Quantum reasoning failed: {str(e)}")
|
88 |
+
return {
|
89 |
+
'error': f"Quantum reasoning failed: {str(e)}",
|
90 |
+
'confidence': 0.0
|
91 |
+
}
|
92 |
+
|
93 |
+
async def _initialize_states(
|
94 |
+
self,
|
95 |
+
query: str,
|
96 |
+
context: Dict[str, Any]
|
97 |
+
) -> List[QuantumState]:
|
98 |
+
"""Initialize quantum states."""
|
99 |
+
states = []
|
100 |
+
|
101 |
+
# Extract key terms for state initialization
|
102 |
+
terms = set(query.lower().split())
|
103 |
+
|
104 |
+
# Create quantum states based on terms
|
105 |
+
for i, term in enumerate(terms):
|
106 |
+
if i >= self.num_qubits:
|
107 |
+
break
|
108 |
+
|
109 |
+
# Calculate initial amplitude and phase
|
110 |
+
amplitude = 1.0 / np.sqrt(len(terms[:self.num_qubits]))
|
111 |
+
phase = 2 * np.pi * i / len(terms[:self.num_qubits])
|
112 |
+
|
113 |
+
states.append(QuantumState(
|
114 |
+
name=term,
|
115 |
+
amplitude=complex(amplitude * np.cos(phase), amplitude * np.sin(phase)),
|
116 |
+
phase=phase
|
117 |
+
))
|
118 |
+
|
119 |
+
# Create entangled states if specified
|
120 |
+
if context.get('entangle', False):
|
121 |
+
self._entangle_states(states)
|
122 |
+
|
123 |
+
return states
|
124 |
+
|
125 |
+
async def _apply_operations(
|
126 |
+
self,
|
127 |
+
states: List[QuantumState],
|
128 |
+
context: Dict[str, Any]
|
129 |
+
) -> List[QuantumState]:
|
130 |
+
"""Apply quantum operations to states."""
|
131 |
+
evolved_states = []
|
132 |
+
|
133 |
+
# Get operation parameters
|
134 |
+
rotation = context.get('rotation', 0.0)
|
135 |
+
phase_shift = context.get('phase_shift', 0.0)
|
136 |
+
|
137 |
+
for state in states:
|
138 |
+
# Apply rotation
|
139 |
+
rotated_amplitude = state.amplitude * np.exp(1j * rotation)
|
140 |
+
|
141 |
+
# Apply phase shift
|
142 |
+
shifted_phase = (state.phase + phase_shift) % (2 * np.pi)
|
143 |
+
|
144 |
+
# Apply decoherence
|
145 |
+
decohered_amplitude = rotated_amplitude * (1 - self.decoherence_rate)
|
146 |
+
|
147 |
+
evolved_states.append(QuantumState(
|
148 |
+
name=state.name,
|
149 |
+
amplitude=decohered_amplitude,
|
150 |
+
phase=shifted_phase,
|
151 |
+
entangled_states=state.entangled_states.copy()
|
152 |
+
))
|
153 |
+
|
154 |
+
return evolved_states
|
155 |
+
|
156 |
+
async def _measure_states(
|
157 |
+
self,
|
158 |
+
states: List[QuantumState],
|
159 |
+
context: Dict[str, Any]
|
160 |
+
) -> Dict[str, float]:
|
161 |
+
"""Measure quantum states."""
|
162 |
+
measurements = {}
|
163 |
+
|
164 |
+
# Calculate total probability
|
165 |
+
total_probability = sum(
|
166 |
+
abs(state.amplitude) ** 2
|
167 |
+
for state in states
|
168 |
+
)
|
169 |
+
|
170 |
+
if total_probability > 0:
|
171 |
+
# Normalize and store measurements
|
172 |
+
for state in states:
|
173 |
+
probability = (abs(state.amplitude) ** 2) / total_probability
|
174 |
+
if probability > self.measurement_threshold:
|
175 |
+
measurements[state.name] = probability
|
176 |
+
|
177 |
+
return measurements
|
178 |
+
|
179 |
+
def _entangle_states(self, states: List[QuantumState]) -> None:
|
180 |
+
"""Create entanglement between states."""
|
181 |
+
if len(states) < 2:
|
182 |
+
return
|
183 |
+
|
184 |
+
# Simple entanglement: connect adjacent states
|
185 |
+
for i in range(len(states) - 1):
|
186 |
+
states[i].entangled_states.append(states[i + 1].name)
|
187 |
+
states[i + 1].entangled_states.append(states[i].name)
|
188 |
+
|
189 |
+
async def _generate_analysis(
|
190 |
+
self,
|
191 |
+
measurements: Dict[str, float],
|
192 |
+
context: Dict[str, Any]
|
193 |
+
) -> Dict[str, Any]:
|
194 |
+
"""Generate quantum analysis."""
|
195 |
+
# Sort states by measurement probability
|
196 |
+
ranked_states = sorted(
|
197 |
+
measurements.items(),
|
198 |
+
key=lambda x: x[1],
|
199 |
+
reverse=True
|
200 |
+
)
|
201 |
+
|
202 |
+
# Calculate quantum statistics
|
203 |
+
amplitudes = list(measurements.values())
|
204 |
+
mean = np.mean(amplitudes) if amplitudes else 0
|
205 |
+
std = np.std(amplitudes) if amplitudes else 0
|
206 |
+
|
207 |
+
# Calculate quantum entropy
|
208 |
+
entropy = -sum(
|
209 |
+
p * np.log2(p) if p > 0 else 0
|
210 |
+
for p in measurements.values()
|
211 |
+
)
|
212 |
+
|
213 |
+
return {
|
214 |
+
'top_state': ranked_states[0][0] if ranked_states else '',
|
215 |
+
'probability': ranked_states[0][1] if ranked_states else 0,
|
216 |
+
'alternatives': [
|
217 |
+
{'name': name, 'probability': prob}
|
218 |
+
for name, prob in ranked_states[1:]
|
219 |
+
],
|
220 |
+
'statistics': {
|
221 |
+
'mean': mean,
|
222 |
+
'std': std,
|
223 |
+
'entropy': entropy
|
224 |
+
}
|
225 |
+
}
|
226 |
+
|
227 |
+
def _format_analysis(self, analysis: Dict[str, Any]) -> str:
|
228 |
+
"""Format analysis into readable text."""
|
229 |
+
sections = []
|
230 |
+
|
231 |
+
# Top quantum state
|
232 |
+
if analysis['top_state']:
|
233 |
+
sections.append(
|
234 |
+
f"Most probable quantum state: {analysis['top_state']} "
|
235 |
+
f"(probability: {analysis['probability']:.2%})"
|
236 |
+
)
|
237 |
+
|
238 |
+
# Alternative states
|
239 |
+
if analysis['alternatives']:
|
240 |
+
sections.append("\nAlternative quantum states:")
|
241 |
+
for alt in analysis['alternatives']:
|
242 |
+
sections.append(
|
243 |
+
f"- {alt['name']}: {alt['probability']:.2%}"
|
244 |
+
)
|
245 |
+
|
246 |
+
# Quantum statistics
|
247 |
+
stats = analysis['statistics']
|
248 |
+
sections.append("\nQuantum statistics:")
|
249 |
+
sections.append(f"- Mean amplitude: {stats['mean']:.2%}")
|
250 |
+
sections.append(f"- Standard deviation: {stats['std']:.2%}")
|
251 |
+
sections.append(f"- Quantum entropy: {stats['entropy']:.2f} bits")
|
252 |
+
|
253 |
+
return "\n".join(sections)
|
254 |
+
|
255 |
+
def _calculate_confidence(self, measurements: Dict[str, float]) -> float:
|
256 |
+
"""Calculate overall confidence score."""
|
257 |
+
if not measurements:
|
258 |
+
return 0.0
|
259 |
+
|
260 |
+
# Base confidence
|
261 |
+
confidence = 0.5
|
262 |
+
|
263 |
+
# Adjust based on measurement distribution
|
264 |
+
probs = list(measurements.values())
|
265 |
+
|
266 |
+
# Strong leading measurement increases confidence
|
267 |
+
max_prob = max(probs)
|
268 |
+
if max_prob > 0.8:
|
269 |
+
confidence += 0.3
|
270 |
+
elif max_prob > 0.6:
|
271 |
+
confidence += 0.2
|
272 |
+
elif max_prob > 0.4:
|
273 |
+
confidence += 0.1
|
274 |
+
|
275 |
+
# Low entropy (clear distinction) increases confidence
|
276 |
+
entropy = -sum(p * np.log2(p) if p > 0 else 0 for p in probs)
|
277 |
+
max_entropy = -np.log2(1/len(probs)) # Maximum possible entropy
|
278 |
+
|
279 |
+
if entropy < 0.3 * max_entropy:
|
280 |
+
confidence += 0.2
|
281 |
+
elif entropy < 0.6 * max_entropy:
|
282 |
+
confidence += 0.1
|
283 |
+
|
284 |
+
return min(confidence, 1.0)
|
285 |
+
|
286 |
+
|
287 |
+
class QuantumInspiredStrategy(ReasoningStrategy):
|
288 |
+
"""Implements Quantum-Inspired reasoning."""
|
289 |
+
|
290 |
+
async def reason(self, query: str, context: Dict[str, Any]) -> Dict[str, Any]:
|
291 |
+
try:
|
292 |
+
# Create a clean context for serialization
|
293 |
+
clean_context = {k: v for k, v in context.items() if k != "groq_api"}
|
294 |
+
|
295 |
+
prompt = f"""
|
296 |
+
You are a meta-learning reasoning system that adapts its approach based on problem characteristics.
|
297 |
+
|
298 |
+
Problem Type:
|
299 |
+
Query: {query}
|
300 |
+
Context: {json.dumps(clean_context)}
|
301 |
+
|
302 |
+
Analyze this problem using meta-learning principles. Structure your response EXACTLY as follows:
|
303 |
+
|
304 |
+
PROBLEM ANALYSIS:
|
305 |
+
- [First key aspect or complexity factor]
|
306 |
+
- [Second key aspect or complexity factor]
|
307 |
+
- [Third key aspect or complexity factor]
|
308 |
+
|
309 |
+
SOLUTION PATHS:
|
310 |
+
- Path 1: [Specific solution approach]
|
311 |
+
- Path 2: [Alternative solution approach]
|
312 |
+
- Path 3: [Another alternative approach]
|
313 |
+
|
314 |
+
META INSIGHTS:
|
315 |
+
- Learning 1: [Key insight about the problem space]
|
316 |
+
- Learning 2: [Key insight about solution approaches]
|
317 |
+
- Learning 3: [Key insight about trade-offs]
|
318 |
+
|
319 |
+
CONCLUSION:
|
320 |
+
[Final synthesized solution incorporating meta-learnings]
|
321 |
+
"""
|
322 |
+
|
323 |
+
response = await context["groq_api"].predict(prompt)
|
324 |
+
|
325 |
+
if not response["success"]:
|
326 |
+
return response
|
327 |
+
|
328 |
+
# Parse response into components
|
329 |
+
lines = response["answer"].split("\n")
|
330 |
+
problem_analysis = []
|
331 |
+
solution_paths = []
|
332 |
+
meta_insights = []
|
333 |
+
conclusion = ""
|
334 |
+
|
335 |
+
section = None
|
336 |
+
for line in lines:
|
337 |
+
line = line.strip()
|
338 |
+
if not line:
|
339 |
+
continue
|
340 |
+
|
341 |
+
if "PROBLEM ANALYSIS:" in line:
|
342 |
+
section = "analysis"
|
343 |
+
elif "SOLUTION PATHS:" in line:
|
344 |
+
section = "paths"
|
345 |
+
elif "META INSIGHTS:" in line:
|
346 |
+
section = "insights"
|
347 |
+
elif "CONCLUSION:" in line:
|
348 |
+
section = "conclusion"
|
349 |
+
elif line.startswith("-"):
|
350 |
+
content = line.lstrip("- ").strip()
|
351 |
+
if section == "analysis":
|
352 |
+
problem_analysis.append(content)
|
353 |
+
elif section == "paths":
|
354 |
+
solution_paths.append(content)
|
355 |
+
elif section == "insights":
|
356 |
+
meta_insights.append(content)
|
357 |
+
elif section == "conclusion":
|
358 |
+
conclusion += line + " "
|
359 |
+
|
360 |
+
return {
|
361 |
+
"success": True,
|
362 |
+
"problem_analysis": problem_analysis,
|
363 |
+
"solution_paths": solution_paths,
|
364 |
+
"meta_insights": meta_insights,
|
365 |
+
"conclusion": conclusion.strip(),
|
366 |
+
# Add standard fields for compatibility
|
367 |
+
"reasoning_path": problem_analysis + solution_paths + meta_insights,
|
368 |
+
"conclusion": conclusion.strip()
|
369 |
+
}
|
370 |
+
|
371 |
+
except Exception as e:
|
372 |
+
return {"success": False, "error": str(e)}
|
reasoning/recursive.py
ADDED
@@ -0,0 +1,576 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Recursive reasoning implementation with advanced decomposition and synthesis."""
|
2 |
+
|
3 |
+
import logging
|
4 |
+
from typing import Dict, Any, List, Optional, Set, Tuple, Callable
|
5 |
+
import json
|
6 |
+
from dataclasses import dataclass, field
|
7 |
+
from enum import Enum
|
8 |
+
from datetime import datetime
|
9 |
+
import asyncio
|
10 |
+
from collections import defaultdict
|
11 |
+
|
12 |
+
from .base import ReasoningStrategy
|
13 |
+
|
14 |
+
class SubproblemType(Enum):
|
15 |
+
"""Types of subproblems in recursive reasoning."""
|
16 |
+
ATOMIC = "atomic"
|
17 |
+
COMPOSITE = "composite"
|
18 |
+
PARALLEL = "parallel"
|
19 |
+
SEQUENTIAL = "sequential"
|
20 |
+
CONDITIONAL = "conditional"
|
21 |
+
ITERATIVE = "iterative"
|
22 |
+
|
23 |
+
class SolutionStatus(Enum):
|
24 |
+
"""Status of subproblem solutions."""
|
25 |
+
PENDING = "pending"
|
26 |
+
IN_PROGRESS = "in_progress"
|
27 |
+
SOLVED = "solved"
|
28 |
+
FAILED = "failed"
|
29 |
+
BLOCKED = "blocked"
|
30 |
+
OPTIMIZING = "optimizing"
|
31 |
+
|
32 |
+
@dataclass
|
33 |
+
class Subproblem:
|
34 |
+
"""Represents a subproblem in recursive reasoning."""
|
35 |
+
id: str
|
36 |
+
type: SubproblemType
|
37 |
+
query: str
|
38 |
+
context: Dict[str, Any]
|
39 |
+
parent_id: Optional[str]
|
40 |
+
children: List[str]
|
41 |
+
status: SolutionStatus
|
42 |
+
solution: Optional[Dict[str, Any]]
|
43 |
+
confidence: float
|
44 |
+
dependencies: List[str]
|
45 |
+
metadata: Dict[str, Any] = field(default_factory=dict)
|
46 |
+
|
47 |
+
@dataclass
|
48 |
+
class RecursiveStep:
|
49 |
+
"""Represents a step in recursive reasoning."""
|
50 |
+
id: str
|
51 |
+
subproblem_id: str
|
52 |
+
action: str
|
53 |
+
timestamp: datetime
|
54 |
+
result: Optional[Dict[str, Any]]
|
55 |
+
metrics: Dict[str, float]
|
56 |
+
metadata: Dict[str, Any] = field(default_factory=dict)
|
57 |
+
|
58 |
+
class RecursiveReasoning(ReasoningStrategy):
|
59 |
+
"""
|
60 |
+
Advanced Recursive Reasoning implementation with:
|
61 |
+
- Dynamic problem decomposition
|
62 |
+
- Parallel subproblem solving
|
63 |
+
- Solution synthesis
|
64 |
+
- Cycle detection
|
65 |
+
- Optimization strategies
|
66 |
+
"""
|
67 |
+
|
68 |
+
def __init__(self, config: Optional[Dict[str, Any]] = None):
|
69 |
+
"""Initialize recursive reasoning."""
|
70 |
+
super().__init__()
|
71 |
+
self.config = config or {}
|
72 |
+
|
73 |
+
# Standard reasoning parameters
|
74 |
+
self.min_confidence = self.config.get('min_confidence', 0.7)
|
75 |
+
self.parallel_threshold = self.config.get('parallel_threshold', 3)
|
76 |
+
self.learning_rate = self.config.get('learning_rate', 0.1)
|
77 |
+
self.strategy_weights = self.config.get('strategy_weights', {
|
78 |
+
"LOCAL_LLM": 0.8,
|
79 |
+
"CHAIN_OF_THOUGHT": 0.6,
|
80 |
+
"TREE_OF_THOUGHTS": 0.5,
|
81 |
+
"META_LEARNING": 0.4
|
82 |
+
})
|
83 |
+
|
84 |
+
# Recursive reasoning specific parameters
|
85 |
+
self.max_depth = self.config.get('max_depth', 5)
|
86 |
+
self.optimization_rounds = self.config.get('optimization_rounds', 2)
|
87 |
+
|
88 |
+
# Problem tracking
|
89 |
+
self.subproblems: Dict[str, Subproblem] = {}
|
90 |
+
self.steps: List[RecursiveStep] = []
|
91 |
+
self.solution_cache: Dict[str, Dict[str, Any]] = {}
|
92 |
+
self.cycle_detection: Set[str] = set()
|
93 |
+
|
94 |
+
# Performance metrics
|
95 |
+
self.depth_distribution: Dict[int, int] = defaultdict(int)
|
96 |
+
self.type_distribution: Dict[SubproblemType, int] = defaultdict(int)
|
97 |
+
self.success_rate: Dict[SubproblemType, float] = defaultdict(float)
|
98 |
+
|
99 |
+
async def reason(self, query: str, context: Dict[str, Any]) -> Dict[str, Any]:
|
100 |
+
"""Main reasoning method implementing recursive reasoning."""
|
101 |
+
try:
|
102 |
+
# Initialize root problem
|
103 |
+
root = await self._initialize_problem(query, context)
|
104 |
+
self.subproblems[root.id] = root
|
105 |
+
|
106 |
+
# Recursively solve
|
107 |
+
solution = await self._solve_recursive(root.id, depth=0)
|
108 |
+
|
109 |
+
# Optimize solution
|
110 |
+
optimized = await self._optimize_solution(solution, root, context)
|
111 |
+
|
112 |
+
# Update metrics
|
113 |
+
self._update_metrics(root.id)
|
114 |
+
|
115 |
+
return {
|
116 |
+
"success": True,
|
117 |
+
"answer": optimized["answer"],
|
118 |
+
"confidence": optimized["confidence"],
|
119 |
+
"decomposition": self._get_problem_tree(root.id),
|
120 |
+
"solution_trace": self._get_solution_trace(root.id),
|
121 |
+
"performance_metrics": self._get_performance_metrics(),
|
122 |
+
"meta_insights": optimized["meta_insights"]
|
123 |
+
}
|
124 |
+
except Exception as e:
|
125 |
+
logging.error(f"Error in recursive reasoning: {str(e)}")
|
126 |
+
return {"success": False, "error": str(e)}
|
127 |
+
|
128 |
+
async def _initialize_problem(self, query: str, context: Dict[str, Any]) -> Subproblem:
|
129 |
+
"""Initialize the root problem."""
|
130 |
+
prompt = f"""
|
131 |
+
Initialize recursive reasoning problem:
|
132 |
+
Query: {query}
|
133 |
+
Context: {json.dumps(context)}
|
134 |
+
|
135 |
+
Analyze for:
|
136 |
+
1. Problem type classification
|
137 |
+
2. Initial decomposition strategy
|
138 |
+
3. Key dependencies
|
139 |
+
4. Solution approach
|
140 |
+
|
141 |
+
Format as:
|
142 |
+
[Problem]
|
143 |
+
Type: ...
|
144 |
+
Strategy: ...
|
145 |
+
Dependencies: ...
|
146 |
+
Approach: ...
|
147 |
+
"""
|
148 |
+
|
149 |
+
response = await context["groq_api"].predict(prompt)
|
150 |
+
return self._parse_problem_init(response["answer"], query, context)
|
151 |
+
|
152 |
+
async def _decompose_problem(self, problem: Subproblem, context: Dict[str, Any]) -> List[Subproblem]:
|
153 |
+
"""Decompose a problem into subproblems."""
|
154 |
+
prompt = f"""
|
155 |
+
Decompose problem into subproblems:
|
156 |
+
Problem: {json.dumps(self._problem_to_dict(problem))}
|
157 |
+
Context: {json.dumps(context)}
|
158 |
+
|
159 |
+
For each subproblem specify:
|
160 |
+
1. [Type]: {" | ".join([t.value for t in SubproblemType])}
|
161 |
+
2. [Query]: Specific question
|
162 |
+
3. [Dependencies]: Required solutions
|
163 |
+
4. [Approach]: Solution strategy
|
164 |
+
|
165 |
+
Format as:
|
166 |
+
[S1]
|
167 |
+
Type: ...
|
168 |
+
Query: ...
|
169 |
+
Dependencies: ...
|
170 |
+
Approach: ...
|
171 |
+
"""
|
172 |
+
|
173 |
+
response = await context["groq_api"].predict(prompt)
|
174 |
+
return self._parse_subproblems(response["answer"], problem.id, context)
|
175 |
+
|
176 |
+
async def _solve_recursive(self, problem_id: str, depth: int) -> Dict[str, Any]:
|
177 |
+
"""Recursively solve a problem and its subproblems."""
|
178 |
+
if depth > self.max_depth:
|
179 |
+
return {"success": False, "error": "Maximum recursion depth exceeded"}
|
180 |
+
|
181 |
+
if problem_id in self.cycle_detection:
|
182 |
+
return {"success": False, "error": "Cycle detected in recursive solving"}
|
183 |
+
|
184 |
+
problem = self.subproblems[problem_id]
|
185 |
+
self.cycle_detection.add(problem_id)
|
186 |
+
self.depth_distribution[depth] += 1
|
187 |
+
|
188 |
+
try:
|
189 |
+
# Check cache
|
190 |
+
cache_key = f"{problem.query}:{json.dumps(problem.context)}"
|
191 |
+
if cache_key in self.solution_cache:
|
192 |
+
return self.solution_cache[cache_key]
|
193 |
+
|
194 |
+
# Check if atomic
|
195 |
+
if problem.type == SubproblemType.ATOMIC:
|
196 |
+
solution = await self._solve_atomic(problem)
|
197 |
+
else:
|
198 |
+
# Decompose
|
199 |
+
subproblems = await self._decompose_problem(problem, problem.context)
|
200 |
+
for sub in subproblems:
|
201 |
+
self.subproblems[sub.id] = sub
|
202 |
+
problem.children.append(sub.id)
|
203 |
+
|
204 |
+
# Solve subproblems
|
205 |
+
if problem.type == SubproblemType.PARALLEL and len(subproblems) >= self.parallel_threshold:
|
206 |
+
# Solve in parallel
|
207 |
+
tasks = [self._solve_recursive(sub.id, depth + 1) for sub in subproblems]
|
208 |
+
subsolutions = await asyncio.gather(*tasks)
|
209 |
+
else:
|
210 |
+
# Solve sequentially
|
211 |
+
subsolutions = []
|
212 |
+
for sub in subproblems:
|
213 |
+
subsolution = await self._solve_recursive(sub.id, depth + 1)
|
214 |
+
subsolutions.append(subsolution)
|
215 |
+
|
216 |
+
# Synthesize solutions
|
217 |
+
solution = await self._synthesize_solutions(subsolutions, problem, problem.context)
|
218 |
+
|
219 |
+
# Cache solution
|
220 |
+
self.solution_cache[cache_key] = solution
|
221 |
+
problem.solution = solution
|
222 |
+
problem.status = SolutionStatus.SOLVED if solution["success"] else SolutionStatus.FAILED
|
223 |
+
|
224 |
+
return solution
|
225 |
+
|
226 |
+
finally:
|
227 |
+
self.cycle_detection.remove(problem_id)
|
228 |
+
|
229 |
+
async def _solve_atomic(self, problem: Subproblem) -> Dict[str, Any]:
|
230 |
+
"""Solve an atomic problem."""
|
231 |
+
prompt = f"""
|
232 |
+
Solve atomic problem:
|
233 |
+
Problem: {json.dumps(self._problem_to_dict(problem))}
|
234 |
+
|
235 |
+
Provide:
|
236 |
+
1. Direct solution
|
237 |
+
2. Confidence level
|
238 |
+
3. Supporting evidence
|
239 |
+
4. Alternative approaches
|
240 |
+
|
241 |
+
Format as:
|
242 |
+
[Solution]
|
243 |
+
Answer: ...
|
244 |
+
Confidence: ...
|
245 |
+
Evidence: ...
|
246 |
+
Alternatives: ...
|
247 |
+
"""
|
248 |
+
|
249 |
+
response = await problem.context["groq_api"].predict(prompt)
|
250 |
+
solution = self._parse_atomic_solution(response["answer"])
|
251 |
+
|
252 |
+
self._record_step(RecursiveStep(
|
253 |
+
id=f"step_{len(self.steps)}",
|
254 |
+
subproblem_id=problem.id,
|
255 |
+
action="atomic_solve",
|
256 |
+
timestamp=datetime.now(),
|
257 |
+
result=solution,
|
258 |
+
metrics={"confidence": solution.get("confidence", 0.0)},
|
259 |
+
metadata={}
|
260 |
+
))
|
261 |
+
|
262 |
+
return solution
|
263 |
+
|
264 |
+
async def _synthesize_solutions(self, subsolutions: List[Dict[str, Any]], problem: Subproblem, context: Dict[str, Any]) -> Dict[str, Any]:
|
265 |
+
"""Synthesize solutions from subproblems."""
|
266 |
+
prompt = f"""
|
267 |
+
Synthesize solutions:
|
268 |
+
Problem: {json.dumps(self._problem_to_dict(problem))}
|
269 |
+
Solutions: {json.dumps(subsolutions)}
|
270 |
+
Context: {json.dumps(context)}
|
271 |
+
|
272 |
+
Provide:
|
273 |
+
1. Integrated solution
|
274 |
+
2. Confidence assessment
|
275 |
+
3. Integration method
|
276 |
+
4. Quality metrics
|
277 |
+
|
278 |
+
Format as:
|
279 |
+
[Synthesis]
|
280 |
+
Solution: ...
|
281 |
+
Confidence: ...
|
282 |
+
Method: ...
|
283 |
+
Metrics: ...
|
284 |
+
"""
|
285 |
+
|
286 |
+
response = await context["groq_api"].predict(prompt)
|
287 |
+
synthesis = self._parse_synthesis(response["answer"])
|
288 |
+
|
289 |
+
self._record_step(RecursiveStep(
|
290 |
+
id=f"step_{len(self.steps)}",
|
291 |
+
subproblem_id=problem.id,
|
292 |
+
action="synthesize",
|
293 |
+
timestamp=datetime.now(),
|
294 |
+
result=synthesis,
|
295 |
+
metrics={"confidence": synthesis.get("confidence", 0.0)},
|
296 |
+
metadata={"num_subsolutions": len(subsolutions)}
|
297 |
+
))
|
298 |
+
|
299 |
+
return synthesis
|
300 |
+
|
301 |
+
async def _optimize_solution(self, solution: Dict[str, Any], problem: Subproblem, context: Dict[str, Any]) -> Dict[str, Any]:
|
302 |
+
"""Optimize the final solution."""
|
303 |
+
prompt = f"""
|
304 |
+
Optimize recursive solution:
|
305 |
+
Original: {json.dumps(solution)}
|
306 |
+
Problem: {json.dumps(self._problem_to_dict(problem))}
|
307 |
+
Context: {json.dumps(context)}
|
308 |
+
|
309 |
+
Optimize for:
|
310 |
+
1. Completeness
|
311 |
+
2. Consistency
|
312 |
+
3. Efficiency
|
313 |
+
4. Clarity
|
314 |
+
|
315 |
+
Format as:
|
316 |
+
[Optimization]
|
317 |
+
Answer: ...
|
318 |
+
Improvements: ...
|
319 |
+
Metrics: ...
|
320 |
+
Insights: ...
|
321 |
+
"""
|
322 |
+
|
323 |
+
response = await context["groq_api"].predict(prompt)
|
324 |
+
return self._parse_optimization(response["answer"])
|
325 |
+
|
326 |
+
def _update_metrics(self, root_id: str):
|
327 |
+
"""Update performance metrics."""
|
328 |
+
def update_recursive(problem_id: str):
|
329 |
+
problem = self.subproblems[problem_id]
|
330 |
+
self.type_distribution[problem.type] += 1
|
331 |
+
|
332 |
+
if problem.status == SolutionStatus.SOLVED:
|
333 |
+
self.success_rate[problem.type] = (
|
334 |
+
self.success_rate[problem.type] * (self.type_distribution[problem.type] - 1) +
|
335 |
+
problem.confidence
|
336 |
+
) / self.type_distribution[problem.type]
|
337 |
+
|
338 |
+
for child_id in problem.children:
|
339 |
+
update_recursive(child_id)
|
340 |
+
|
341 |
+
update_recursive(root_id)
|
342 |
+
|
343 |
+
def _get_problem_tree(self, root_id: str) -> Dict[str, Any]:
|
344 |
+
"""Get the problem decomposition tree."""
|
345 |
+
def build_tree(problem_id: str) -> Dict[str, Any]:
|
346 |
+
problem = self.subproblems[problem_id]
|
347 |
+
return {
|
348 |
+
"id": problem.id,
|
349 |
+
"type": problem.type.value,
|
350 |
+
"query": problem.query,
|
351 |
+
"status": problem.status.value,
|
352 |
+
"confidence": problem.confidence,
|
353 |
+
"children": [build_tree(child_id) for child_id in problem.children]
|
354 |
+
}
|
355 |
+
|
356 |
+
return build_tree(root_id)
|
357 |
+
|
358 |
+
def _get_solution_trace(self, root_id: str) -> List[Dict[str, Any]]:
|
359 |
+
"""Get the solution trace for a problem."""
|
360 |
+
return [self._step_to_dict(step) for step in self.steps
|
361 |
+
if step.subproblem_id == root_id or
|
362 |
+
any(step.subproblem_id == sub_id for sub_id in self.subproblems[root_id].children)]
|
363 |
+
|
364 |
+
def _get_performance_metrics(self) -> Dict[str, Any]:
|
365 |
+
"""Get current performance metrics."""
|
366 |
+
return {
|
367 |
+
"depth_distribution": dict(self.depth_distribution),
|
368 |
+
"type_distribution": {t.value: c for t, c in self.type_distribution.items()},
|
369 |
+
"success_rate": {t.value: r for t, r in self.success_rate.items()},
|
370 |
+
"cache_hits": len(self.solution_cache),
|
371 |
+
"total_steps": len(self.steps)
|
372 |
+
}
|
373 |
+
|
374 |
+
def _record_step(self, step: RecursiveStep):
|
375 |
+
"""Record a reasoning step."""
|
376 |
+
self.steps.append(step)
|
377 |
+
|
378 |
+
def _parse_problem_init(self, response: str, query: str, context: Dict[str, Any]) -> Subproblem:
|
379 |
+
"""Parse initial problem configuration."""
|
380 |
+
problem_type = SubproblemType.COMPOSITE # default
|
381 |
+
dependencies = []
|
382 |
+
metadata = {}
|
383 |
+
|
384 |
+
for line in response.split('\n'):
|
385 |
+
line = line.strip()
|
386 |
+
if line.startswith('Type:'):
|
387 |
+
try:
|
388 |
+
problem_type = SubproblemType(line[5:].strip().lower())
|
389 |
+
except ValueError:
|
390 |
+
pass
|
391 |
+
elif line.startswith('Dependencies:'):
|
392 |
+
dependencies = [d.strip() for d in line[13:].split(',')]
|
393 |
+
elif line.startswith('Strategy:') or line.startswith('Approach:'):
|
394 |
+
metadata["strategy"] = line.split(':', 1)[1].strip()
|
395 |
+
|
396 |
+
return Subproblem(
|
397 |
+
id="root",
|
398 |
+
type=problem_type,
|
399 |
+
query=query,
|
400 |
+
context=context,
|
401 |
+
parent_id=None,
|
402 |
+
children=[],
|
403 |
+
status=SolutionStatus.PENDING,
|
404 |
+
solution=None,
|
405 |
+
confidence=0.0,
|
406 |
+
dependencies=dependencies,
|
407 |
+
metadata=metadata
|
408 |
+
)
|
409 |
+
|
410 |
+
def _parse_subproblems(self, response: str, parent_id: str, context: Dict[str, Any]) -> List[Subproblem]:
|
411 |
+
"""Parse subproblems from response."""
|
412 |
+
subproblems = []
|
413 |
+
current = None
|
414 |
+
|
415 |
+
for line in response.split('\n'):
|
416 |
+
line = line.strip()
|
417 |
+
if not line:
|
418 |
+
continue
|
419 |
+
|
420 |
+
if line.startswith('[S'):
|
421 |
+
if current:
|
422 |
+
subproblems.append(current)
|
423 |
+
current = None
|
424 |
+
elif line.startswith('Type:'):
|
425 |
+
try:
|
426 |
+
problem_type = SubproblemType(line[5:].strip().lower())
|
427 |
+
current = Subproblem(
|
428 |
+
id=f"{parent_id}_{len(subproblems)}",
|
429 |
+
type=problem_type,
|
430 |
+
query="",
|
431 |
+
context=context,
|
432 |
+
parent_id=parent_id,
|
433 |
+
children=[],
|
434 |
+
status=SolutionStatus.PENDING,
|
435 |
+
solution=None,
|
436 |
+
confidence=0.0,
|
437 |
+
dependencies=[],
|
438 |
+
metadata={}
|
439 |
+
)
|
440 |
+
except ValueError:
|
441 |
+
current = None
|
442 |
+
elif current:
|
443 |
+
if line.startswith('Query:'):
|
444 |
+
current.query = line[6:].strip()
|
445 |
+
elif line.startswith('Dependencies:'):
|
446 |
+
current.dependencies = [d.strip() for d in line[13:].split(',')]
|
447 |
+
elif line.startswith('Approach:'):
|
448 |
+
current.metadata["approach"] = line[9:].strip()
|
449 |
+
|
450 |
+
if current:
|
451 |
+
subproblems.append(current)
|
452 |
+
|
453 |
+
return subproblems
|
454 |
+
|
455 |
+
def _parse_atomic_solution(self, response: str) -> Dict[str, Any]:
|
456 |
+
"""Parse atomic solution from response."""
|
457 |
+
solution = {
|
458 |
+
"success": True,
|
459 |
+
"answer": "",
|
460 |
+
"confidence": 0.0,
|
461 |
+
"evidence": [],
|
462 |
+
"alternatives": []
|
463 |
+
}
|
464 |
+
|
465 |
+
for line in response.split('\n'):
|
466 |
+
line = line.strip()
|
467 |
+
if line.startswith('Answer:'):
|
468 |
+
solution["answer"] = line[7:].strip()
|
469 |
+
elif line.startswith('Confidence:'):
|
470 |
+
try:
|
471 |
+
solution["confidence"] = float(line[11:].strip())
|
472 |
+
except:
|
473 |
+
pass
|
474 |
+
elif line.startswith('Evidence:'):
|
475 |
+
solution["evidence"] = [e.strip() for e in line[9:].split(',')]
|
476 |
+
elif line.startswith('Alternatives:'):
|
477 |
+
solution["alternatives"] = [a.strip() for a in line[13:].split(',')]
|
478 |
+
|
479 |
+
return solution
|
480 |
+
|
481 |
+
def _parse_synthesis(self, response: str) -> Dict[str, Any]:
|
482 |
+
"""Parse synthesis result from response."""
|
483 |
+
synthesis = {
|
484 |
+
"success": True,
|
485 |
+
"solution": "",
|
486 |
+
"confidence": 0.0,
|
487 |
+
"method": "",
|
488 |
+
"metrics": {}
|
489 |
+
}
|
490 |
+
|
491 |
+
for line in response.split('\n'):
|
492 |
+
line = line.strip()
|
493 |
+
if line.startswith('Solution:'):
|
494 |
+
synthesis["solution"] = line[9:].strip()
|
495 |
+
elif line.startswith('Confidence:'):
|
496 |
+
try:
|
497 |
+
synthesis["confidence"] = float(line[11:].strip())
|
498 |
+
except:
|
499 |
+
pass
|
500 |
+
elif line.startswith('Method:'):
|
501 |
+
synthesis["method"] = line[7:].strip()
|
502 |
+
elif line.startswith('Metrics:'):
|
503 |
+
try:
|
504 |
+
synthesis["metrics"] = json.loads(line[8:].strip())
|
505 |
+
except:
|
506 |
+
pass
|
507 |
+
|
508 |
+
return synthesis
|
509 |
+
|
510 |
+
def _parse_optimization(self, response: str) -> Dict[str, Any]:
|
511 |
+
"""Parse optimization result from response."""
|
512 |
+
optimization = {
|
513 |
+
"answer": "",
|
514 |
+
"confidence": 0.0,
|
515 |
+
"improvements": [],
|
516 |
+
"metrics": {},
|
517 |
+
"meta_insights": []
|
518 |
+
}
|
519 |
+
|
520 |
+
for line in response.split('\n'):
|
521 |
+
line = line.strip()
|
522 |
+
if line.startswith('Answer:'):
|
523 |
+
optimization["answer"] = line[7:].strip()
|
524 |
+
elif line.startswith('Improvements:'):
|
525 |
+
optimization["improvements"] = [i.strip() for i in line[13:].split(',')]
|
526 |
+
elif line.startswith('Metrics:'):
|
527 |
+
try:
|
528 |
+
optimization["metrics"] = json.loads(line[8:].strip())
|
529 |
+
except:
|
530 |
+
pass
|
531 |
+
elif line.startswith('Insights:'):
|
532 |
+
optimization["meta_insights"] = [i.strip() for i in line[9:].split(',')]
|
533 |
+
|
534 |
+
return optimization
|
535 |
+
|
536 |
+
def _problem_to_dict(self, problem: Subproblem) -> Dict[str, Any]:
|
537 |
+
"""Convert problem to dictionary for serialization."""
|
538 |
+
return {
|
539 |
+
"id": problem.id,
|
540 |
+
"type": problem.type.value,
|
541 |
+
"query": problem.query,
|
542 |
+
"parent_id": problem.parent_id,
|
543 |
+
"children": problem.children,
|
544 |
+
"status": problem.status.value,
|
545 |
+
"confidence": problem.confidence,
|
546 |
+
"dependencies": problem.dependencies,
|
547 |
+
"metadata": problem.metadata
|
548 |
+
}
|
549 |
+
|
550 |
+
def _step_to_dict(self, step: RecursiveStep) -> Dict[str, Any]:
|
551 |
+
"""Convert step to dictionary for serialization."""
|
552 |
+
return {
|
553 |
+
"id": step.id,
|
554 |
+
"subproblem_id": step.subproblem_id,
|
555 |
+
"action": step.action,
|
556 |
+
"timestamp": step.timestamp.isoformat(),
|
557 |
+
"result": step.result,
|
558 |
+
"metrics": step.metrics,
|
559 |
+
"metadata": step.metadata
|
560 |
+
}
|
561 |
+
|
562 |
+
def clear_cache(self):
|
563 |
+
"""Clear solution cache."""
|
564 |
+
self.solution_cache.clear()
|
565 |
+
|
566 |
+
def get_statistics(self) -> Dict[str, Any]:
|
567 |
+
"""Get detailed statistics about the reasoning process."""
|
568 |
+
return {
|
569 |
+
"total_problems": len(self.subproblems),
|
570 |
+
"total_steps": len(self.steps),
|
571 |
+
"cache_size": len(self.solution_cache),
|
572 |
+
"type_distribution": dict(self.type_distribution),
|
573 |
+
"depth_distribution": dict(self.depth_distribution),
|
574 |
+
"success_rates": dict(self.success_rate),
|
575 |
+
"average_confidence": sum(p.confidence for p in self.subproblems.values()) / len(self.subproblems) if self.subproblems else 0.0
|
576 |
+
}
|
reasoning/specialized.py
ADDED
@@ -0,0 +1,476 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Specialized reasoning strategies for specific domains and tasks."""
|
2 |
+
|
3 |
+
import logging
|
4 |
+
from typing import Dict, Any, List, Optional, Set, Union, Type, Callable
|
5 |
+
import json
|
6 |
+
from dataclasses import dataclass, field
|
7 |
+
from enum import Enum
|
8 |
+
from datetime import datetime
|
9 |
+
import asyncio
|
10 |
+
from collections import defaultdict
|
11 |
+
|
12 |
+
from .base import ReasoningStrategy
|
13 |
+
|
14 |
+
class SpecializedReasoning(ReasoningStrategy):
|
15 |
+
"""
|
16 |
+
A composite reasoning strategy that combines multiple specialized strategies
|
17 |
+
for different domains and tasks.
|
18 |
+
"""
|
19 |
+
|
20 |
+
def __init__(self, config: Optional[Dict[str, Any]] = None):
|
21 |
+
"""Initialize specialized reasoning with component strategies."""
|
22 |
+
super().__init__()
|
23 |
+
self.config = config or {}
|
24 |
+
|
25 |
+
# Standard reasoning parameters
|
26 |
+
self.min_confidence = self.config.get('min_confidence', 0.7)
|
27 |
+
self.parallel_threshold = self.config.get('parallel_threshold', 3)
|
28 |
+
self.learning_rate = self.config.get('learning_rate', 0.1)
|
29 |
+
self.strategy_weights = self.config.get('strategy_weights', {
|
30 |
+
"LOCAL_LLM": 0.8,
|
31 |
+
"CHAIN_OF_THOUGHT": 0.6,
|
32 |
+
"TREE_OF_THOUGHTS": 0.5,
|
33 |
+
"META_LEARNING": 0.4
|
34 |
+
})
|
35 |
+
|
36 |
+
# Initialize component strategies with shared config
|
37 |
+
strategy_config = {
|
38 |
+
'min_confidence': self.min_confidence,
|
39 |
+
'parallel_threshold': self.parallel_threshold,
|
40 |
+
'learning_rate': self.learning_rate,
|
41 |
+
'strategy_weights': self.strategy_weights
|
42 |
+
}
|
43 |
+
|
44 |
+
self.strategies = {
|
45 |
+
'code_rewrite': CodeRewriteStrategy(strategy_config),
|
46 |
+
'security_audit': SecurityAuditStrategy(strategy_config),
|
47 |
+
'performance': PerformanceOptimizationStrategy(strategy_config),
|
48 |
+
'testing': TestGenerationStrategy(strategy_config),
|
49 |
+
'documentation': DocumentationStrategy(strategy_config),
|
50 |
+
'api_design': APIDesignStrategy(strategy_config),
|
51 |
+
'dependencies': DependencyManagementStrategy(strategy_config),
|
52 |
+
'code_review': CodeReviewStrategy(strategy_config)
|
53 |
+
}
|
54 |
+
|
55 |
+
async def reason(self, query: str, context: Dict[str, Any]) -> Dict[str, Any]:
|
56 |
+
"""
|
57 |
+
Apply specialized reasoning by selecting and combining appropriate
|
58 |
+
strategies based on the query and context.
|
59 |
+
|
60 |
+
Args:
|
61 |
+
query: The input query to reason about
|
62 |
+
context: Additional context and parameters
|
63 |
+
|
64 |
+
Returns:
|
65 |
+
Dict containing reasoning results and confidence scores
|
66 |
+
"""
|
67 |
+
try:
|
68 |
+
# Determine which strategies to use based on context
|
69 |
+
selected_strategies = await self._select_strategies(query, context)
|
70 |
+
|
71 |
+
# Get results from each selected strategy
|
72 |
+
results = {}
|
73 |
+
for strategy_name in selected_strategies:
|
74 |
+
strategy = self.strategies[strategy_name]
|
75 |
+
results[strategy_name] = await strategy.reason(query, context)
|
76 |
+
|
77 |
+
# Combine results
|
78 |
+
combined_result = await self._combine_results(results, context)
|
79 |
+
|
80 |
+
return {
|
81 |
+
'answer': combined_result.get('answer', ''),
|
82 |
+
'confidence': combined_result.get('confidence', 0.0),
|
83 |
+
'reasoning_path': {
|
84 |
+
'selected_strategies': selected_strategies,
|
85 |
+
'individual_results': results,
|
86 |
+
'combination_method': combined_result.get('method', '')
|
87 |
+
}
|
88 |
+
}
|
89 |
+
|
90 |
+
except Exception as e:
|
91 |
+
logging.error(f"Specialized reasoning failed: {str(e)}")
|
92 |
+
return {
|
93 |
+
'error': f"Specialized reasoning failed: {str(e)}",
|
94 |
+
'confidence': 0.0
|
95 |
+
}
|
96 |
+
|
97 |
+
async def _select_strategies(self, query: str, context: Dict[str, Any]) -> List[str]:
|
98 |
+
"""Select appropriate strategies based on query and context."""
|
99 |
+
selected = []
|
100 |
+
|
101 |
+
# Simple keyword-based selection for now
|
102 |
+
keywords = {
|
103 |
+
'code_rewrite': ['rewrite', 'refactor', 'improve'],
|
104 |
+
'security_audit': ['security', 'vulnerability', 'audit'],
|
105 |
+
'performance': ['performance', 'optimize', 'speed'],
|
106 |
+
'testing': ['test', 'coverage', 'verify'],
|
107 |
+
'documentation': ['document', 'explain', 'describe'],
|
108 |
+
'api_design': ['api', 'interface', 'endpoint'],
|
109 |
+
'dependencies': ['dependency', 'package', 'version'],
|
110 |
+
'code_review': ['review', 'quality', 'check']
|
111 |
+
}
|
112 |
+
|
113 |
+
query_lower = query.lower()
|
114 |
+
for strategy, terms in keywords.items():
|
115 |
+
if any(term in query_lower for term in terms):
|
116 |
+
selected.append(strategy)
|
117 |
+
|
118 |
+
# If no specific strategies selected, use code review as default
|
119 |
+
if not selected:
|
120 |
+
selected = ['code_review']
|
121 |
+
|
122 |
+
return selected
|
123 |
+
|
124 |
+
async def _combine_results(
|
125 |
+
self,
|
126 |
+
results: Dict[str, Dict[str, Any]],
|
127 |
+
context: Dict[str, Any]
|
128 |
+
) -> Dict[str, Any]:
|
129 |
+
"""Combine results from multiple strategies."""
|
130 |
+
if not results:
|
131 |
+
return {'answer': '', 'confidence': 0.0, 'method': 'none'}
|
132 |
+
|
133 |
+
# For now, use the highest confidence result
|
134 |
+
best_result = max(
|
135 |
+
results.items(),
|
136 |
+
key=lambda x: x[1].get('confidence', 0)
|
137 |
+
)
|
138 |
+
|
139 |
+
return {
|
140 |
+
'answer': best_result[1].get('answer', ''),
|
141 |
+
'confidence': best_result[1].get('confidence', 0.0),
|
142 |
+
'method': 'highest_confidence'
|
143 |
+
}
|
144 |
+
|
145 |
+
class CodeRewriteStrategy(ReasoningStrategy):
|
146 |
+
"""
|
147 |
+
Advanced code rewriting strategy that:
|
148 |
+
1. Analyzes code structure and patterns
|
149 |
+
2. Identifies refactoring opportunities
|
150 |
+
3. Maintains code semantics
|
151 |
+
4. Optimizes code quality
|
152 |
+
5. Ensures backward compatibility
|
153 |
+
"""
|
154 |
+
|
155 |
+
def __init__(self, config: Optional[Dict[str, Any]] = None):
|
156 |
+
super().__init__()
|
157 |
+
self.config = config or {}
|
158 |
+
|
159 |
+
async def reason(self, query: str, context: Dict[str, Any]) -> Dict[str, Any]:
|
160 |
+
"""Rewrite code while preserving functionality."""
|
161 |
+
try:
|
162 |
+
# Analyze code
|
163 |
+
analysis = await self._analyze_code(query, context)
|
164 |
+
|
165 |
+
# Generate rewrite plan
|
166 |
+
plan = await self._generate_rewrite_plan(analysis, context)
|
167 |
+
|
168 |
+
# Execute rewrites
|
169 |
+
rewrites = await self._execute_rewrites(plan, context)
|
170 |
+
|
171 |
+
# Validate changes
|
172 |
+
validation = await self._validate_changes(rewrites, context)
|
173 |
+
|
174 |
+
return {
|
175 |
+
"success": validation["success"],
|
176 |
+
"rewrites": rewrites,
|
177 |
+
"validation": validation,
|
178 |
+
"metrics": {
|
179 |
+
"quality_improvement": validation.get("quality_score", 0.0),
|
180 |
+
"semantic_preservation": validation.get("semantic_score", 0.0)
|
181 |
+
}
|
182 |
+
}
|
183 |
+
except Exception as e:
|
184 |
+
logging.error(f"Error in code rewrite: {str(e)}")
|
185 |
+
return {"success": False, "error": str(e)}
|
186 |
+
|
187 |
+
class SecurityAuditStrategy(ReasoningStrategy):
|
188 |
+
"""
|
189 |
+
Advanced security audit strategy that:
|
190 |
+
1. Identifies security vulnerabilities
|
191 |
+
2. Analyzes attack vectors
|
192 |
+
3. Recommends security fixes
|
193 |
+
4. Validates security measures
|
194 |
+
5. Monitors security state
|
195 |
+
"""
|
196 |
+
|
197 |
+
def __init__(self, config: Optional[Dict[str, Any]] = None):
|
198 |
+
super().__init__()
|
199 |
+
self.config = config or {}
|
200 |
+
|
201 |
+
async def reason(self, query: str, context: Dict[str, Any]) -> Dict[str, Any]:
|
202 |
+
"""Perform security audit and generate recommendations."""
|
203 |
+
try:
|
204 |
+
# Scan for vulnerabilities
|
205 |
+
vulnerabilities = await self._scan_vulnerabilities(query, context)
|
206 |
+
|
207 |
+
# Analyze risks
|
208 |
+
risks = await self._analyze_risks(vulnerabilities, context)
|
209 |
+
|
210 |
+
# Generate fixes
|
211 |
+
fixes = await self._generate_fixes(risks, context)
|
212 |
+
|
213 |
+
# Validate security
|
214 |
+
validation = await self._validate_security(fixes, context)
|
215 |
+
|
216 |
+
return {
|
217 |
+
"success": True,
|
218 |
+
"vulnerabilities": vulnerabilities,
|
219 |
+
"risks": risks,
|
220 |
+
"fixes": fixes,
|
221 |
+
"validation": validation
|
222 |
+
}
|
223 |
+
except Exception as e:
|
224 |
+
logging.error(f"Error in security audit: {str(e)}")
|
225 |
+
return {"success": False, "error": str(e)}
|
226 |
+
|
227 |
+
class PerformanceOptimizationStrategy(ReasoningStrategy):
|
228 |
+
"""
|
229 |
+
Advanced performance optimization strategy that:
|
230 |
+
1. Profiles code performance
|
231 |
+
2. Identifies bottlenecks
|
232 |
+
3. Generates optimizations
|
233 |
+
4. Measures improvements
|
234 |
+
5. Validates optimizations
|
235 |
+
"""
|
236 |
+
|
237 |
+
def __init__(self, config: Optional[Dict[str, Any]] = None):
|
238 |
+
super().__init__()
|
239 |
+
self.config = config or {}
|
240 |
+
|
241 |
+
async def reason(self, query: str, context: Dict[str, Any]) -> Dict[str, Any]:
|
242 |
+
"""Optimize code performance."""
|
243 |
+
try:
|
244 |
+
# Profile performance
|
245 |
+
profile = await self._profile_performance(query, context)
|
246 |
+
|
247 |
+
# Identify bottlenecks
|
248 |
+
bottlenecks = await self._identify_bottlenecks(profile, context)
|
249 |
+
|
250 |
+
# Generate optimizations
|
251 |
+
optimizations = await self._generate_optimizations(bottlenecks, context)
|
252 |
+
|
253 |
+
# Measure improvements
|
254 |
+
measurements = await self._measure_improvements(optimizations, context)
|
255 |
+
|
256 |
+
return {
|
257 |
+
"success": measurements["success"],
|
258 |
+
"profile": profile,
|
259 |
+
"bottlenecks": bottlenecks,
|
260 |
+
"optimizations": optimizations,
|
261 |
+
"improvements": measurements
|
262 |
+
}
|
263 |
+
except Exception as e:
|
264 |
+
logging.error(f"Error in performance optimization: {str(e)}")
|
265 |
+
return {"success": False, "error": str(e)}
|
266 |
+
|
267 |
+
class TestGenerationStrategy(ReasoningStrategy):
|
268 |
+
"""
|
269 |
+
Advanced test generation strategy that:
|
270 |
+
1. Analyzes code coverage
|
271 |
+
2. Generates test cases
|
272 |
+
3. Creates test fixtures
|
273 |
+
4. Validates test quality
|
274 |
+
5. Maintains test suite
|
275 |
+
"""
|
276 |
+
|
277 |
+
def __init__(self, config: Optional[Dict[str, Any]] = None):
|
278 |
+
super().__init__()
|
279 |
+
self.config = config or {}
|
280 |
+
|
281 |
+
async def reason(self, query: str, context: Dict[str, Any]) -> Dict[str, Any]:
|
282 |
+
"""Generate comprehensive test suite."""
|
283 |
+
try:
|
284 |
+
# Analyze coverage
|
285 |
+
coverage = await self._analyze_coverage(query, context)
|
286 |
+
|
287 |
+
# Generate test cases
|
288 |
+
test_cases = await self._generate_test_cases(coverage, context)
|
289 |
+
|
290 |
+
# Create fixtures
|
291 |
+
fixtures = await self._create_fixtures(test_cases, context)
|
292 |
+
|
293 |
+
# Validate tests
|
294 |
+
validation = await self._validate_tests(test_cases, fixtures, context)
|
295 |
+
|
296 |
+
return {
|
297 |
+
"success": validation["success"],
|
298 |
+
"test_cases": test_cases,
|
299 |
+
"fixtures": fixtures,
|
300 |
+
"validation": validation,
|
301 |
+
"metrics": {
|
302 |
+
"coverage": coverage.get("percentage", 0.0),
|
303 |
+
"quality_score": validation.get("quality_score", 0.0)
|
304 |
+
}
|
305 |
+
}
|
306 |
+
except Exception as e:
|
307 |
+
logging.error(f"Error in test generation: {str(e)}")
|
308 |
+
return {"success": False, "error": str(e)}
|
309 |
+
|
310 |
+
class DocumentationStrategy(ReasoningStrategy):
|
311 |
+
"""
|
312 |
+
Advanced documentation strategy that:
|
313 |
+
1. Analyzes code structure
|
314 |
+
2. Generates documentation
|
315 |
+
3. Maintains consistency
|
316 |
+
4. Updates references
|
317 |
+
5. Validates completeness
|
318 |
+
"""
|
319 |
+
|
320 |
+
def __init__(self, config: Optional[Dict[str, Any]] = None):
|
321 |
+
super().__init__()
|
322 |
+
self.config = config or {}
|
323 |
+
|
324 |
+
async def reason(self, query: str, context: Dict[str, Any]) -> Dict[str, Any]:
|
325 |
+
"""Generate and maintain documentation."""
|
326 |
+
try:
|
327 |
+
# Analyze structure
|
328 |
+
structure = await self._analyze_structure(query, context)
|
329 |
+
|
330 |
+
# Generate documentation
|
331 |
+
documentation = await self._generate_documentation(structure, context)
|
332 |
+
|
333 |
+
# Update references
|
334 |
+
references = await self._update_references(documentation, context)
|
335 |
+
|
336 |
+
# Validate completeness
|
337 |
+
validation = await self._validate_documentation(documentation, references, context)
|
338 |
+
|
339 |
+
return {
|
340 |
+
"success": validation["success"],
|
341 |
+
"documentation": documentation,
|
342 |
+
"references": references,
|
343 |
+
"validation": validation,
|
344 |
+
"metrics": {
|
345 |
+
"completeness": validation.get("completeness_score", 0.0),
|
346 |
+
"consistency": validation.get("consistency_score", 0.0)
|
347 |
+
}
|
348 |
+
}
|
349 |
+
except Exception as e:
|
350 |
+
logging.error(f"Error in documentation: {str(e)}")
|
351 |
+
return {"success": False, "error": str(e)}
|
352 |
+
|
353 |
+
class APIDesignStrategy(ReasoningStrategy):
|
354 |
+
"""
|
355 |
+
Advanced API design strategy that:
|
356 |
+
1. Analyzes requirements
|
357 |
+
2. Designs API structure
|
358 |
+
3. Generates specifications
|
359 |
+
4. Validates design
|
360 |
+
5. Maintains versioning
|
361 |
+
"""
|
362 |
+
|
363 |
+
def __init__(self, config: Optional[Dict[str, Any]] = None):
|
364 |
+
super().__init__()
|
365 |
+
self.config = config or {}
|
366 |
+
|
367 |
+
async def reason(self, query: str, context: Dict[str, Any]) -> Dict[str, Any]:
|
368 |
+
"""Design and validate API."""
|
369 |
+
try:
|
370 |
+
# Analyze requirements
|
371 |
+
requirements = await self._analyze_requirements(query, context)
|
372 |
+
|
373 |
+
# Design structure
|
374 |
+
design = await self._design_structure(requirements, context)
|
375 |
+
|
376 |
+
# Generate specs
|
377 |
+
specs = await self._generate_specs(design, context)
|
378 |
+
|
379 |
+
# Validate design
|
380 |
+
validation = await self._validate_design(specs, context)
|
381 |
+
|
382 |
+
return {
|
383 |
+
"success": validation["success"],
|
384 |
+
"requirements": requirements,
|
385 |
+
"design": design,
|
386 |
+
"specs": specs,
|
387 |
+
"validation": validation
|
388 |
+
}
|
389 |
+
except Exception as e:
|
390 |
+
logging.error(f"Error in API design: {str(e)}")
|
391 |
+
return {"success": False, "error": str(e)}
|
392 |
+
|
393 |
+
class DependencyManagementStrategy(ReasoningStrategy):
|
394 |
+
"""
|
395 |
+
Advanced dependency management strategy that:
|
396 |
+
1. Analyzes dependencies
|
397 |
+
2. Resolves conflicts
|
398 |
+
3. Optimizes versions
|
399 |
+
4. Ensures compatibility
|
400 |
+
5. Maintains security
|
401 |
+
"""
|
402 |
+
|
403 |
+
def __init__(self, config: Optional[Dict[str, Any]] = None):
|
404 |
+
super().__init__()
|
405 |
+
self.config = config or {}
|
406 |
+
|
407 |
+
async def reason(self, query: str, context: Dict[str, Any]) -> Dict[str, Any]:
|
408 |
+
"""Manage and optimize dependencies."""
|
409 |
+
try:
|
410 |
+
# Analyze dependencies
|
411 |
+
analysis = await self._analyze_dependencies(query, context)
|
412 |
+
|
413 |
+
# Resolve conflicts
|
414 |
+
resolution = await self._resolve_conflicts(analysis, context)
|
415 |
+
|
416 |
+
# Optimize versions
|
417 |
+
optimization = await self._optimize_versions(resolution, context)
|
418 |
+
|
419 |
+
# Validate compatibility
|
420 |
+
validation = await self._validate_compatibility(optimization, context)
|
421 |
+
|
422 |
+
return {
|
423 |
+
"success": validation["success"],
|
424 |
+
"analysis": analysis,
|
425 |
+
"resolution": resolution,
|
426 |
+
"optimization": optimization,
|
427 |
+
"validation": validation
|
428 |
+
}
|
429 |
+
except Exception as e:
|
430 |
+
logging.error(f"Error in dependency management: {str(e)}")
|
431 |
+
return {"success": False, "error": str(e)}
|
432 |
+
|
433 |
+
class CodeReviewStrategy(ReasoningStrategy):
|
434 |
+
"""
|
435 |
+
Advanced code review strategy that:
|
436 |
+
1. Analyzes code quality
|
437 |
+
2. Identifies issues
|
438 |
+
3. Suggests improvements
|
439 |
+
4. Tracks changes
|
440 |
+
5. Validates fixes
|
441 |
+
"""
|
442 |
+
|
443 |
+
def __init__(self, config: Optional[Dict[str, Any]] = None):
|
444 |
+
super().__init__()
|
445 |
+
self.config = config or {}
|
446 |
+
|
447 |
+
async def reason(self, query: str, context: Dict[str, Any]) -> Dict[str, Any]:
|
448 |
+
"""Perform comprehensive code review."""
|
449 |
+
try:
|
450 |
+
# Analyze quality
|
451 |
+
quality = await self._analyze_quality(query, context)
|
452 |
+
|
453 |
+
# Identify issues
|
454 |
+
issues = await self._identify_issues(quality, context)
|
455 |
+
|
456 |
+
# Generate suggestions
|
457 |
+
suggestions = await self._generate_suggestions(issues, context)
|
458 |
+
|
459 |
+
# Track changes
|
460 |
+
tracking = await self._track_changes(suggestions, context)
|
461 |
+
|
462 |
+
return {
|
463 |
+
"success": True,
|
464 |
+
"quality": quality,
|
465 |
+
"issues": issues,
|
466 |
+
"suggestions": suggestions,
|
467 |
+
"tracking": tracking,
|
468 |
+
"metrics": {
|
469 |
+
"quality_score": quality.get("score", 0.0),
|
470 |
+
"issues_found": len(issues),
|
471 |
+
"suggestions_made": len(suggestions)
|
472 |
+
}
|
473 |
+
}
|
474 |
+
except Exception as e:
|
475 |
+
logging.error(f"Error in code review: {str(e)}")
|
476 |
+
return {"success": False, "error": str(e)}
|
reasoning/tree_of_thoughts.py
ADDED
@@ -0,0 +1,516 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Tree of Thoughts reasoning implementation with advanced tree exploration."""
|
2 |
+
|
3 |
+
import logging
|
4 |
+
from typing import Dict, Any, List, Optional, Set, Tuple
|
5 |
+
import json
|
6 |
+
from dataclasses import dataclass
|
7 |
+
from enum import Enum
|
8 |
+
import heapq
|
9 |
+
from collections import defaultdict
|
10 |
+
|
11 |
+
from .base import ReasoningStrategy
|
12 |
+
|
13 |
+
class NodeType(Enum):
|
14 |
+
"""Types of nodes in the thought tree."""
|
15 |
+
ROOT = "root"
|
16 |
+
HYPOTHESIS = "hypothesis"
|
17 |
+
EVIDENCE = "evidence"
|
18 |
+
ANALYSIS = "analysis"
|
19 |
+
SYNTHESIS = "synthesis"
|
20 |
+
EVALUATION = "evaluation"
|
21 |
+
CONCLUSION = "conclusion"
|
22 |
+
|
23 |
+
@dataclass
|
24 |
+
class TreeNode:
|
25 |
+
"""Represents a node in the thought tree."""
|
26 |
+
id: str
|
27 |
+
type: NodeType
|
28 |
+
content: str
|
29 |
+
confidence: float
|
30 |
+
children: List['TreeNode']
|
31 |
+
parent: Optional['TreeNode']
|
32 |
+
metadata: Dict[str, Any]
|
33 |
+
depth: int
|
34 |
+
evaluation_score: float = 0.0
|
35 |
+
|
36 |
+
class TreeOfThoughtsStrategy(ReasoningStrategy):
|
37 |
+
"""
|
38 |
+
Advanced Tree of Thoughts reasoning implementation with:
|
39 |
+
- Beam search for path exploration
|
40 |
+
- Dynamic node evaluation
|
41 |
+
- Pruning strategies
|
42 |
+
- Path optimization
|
43 |
+
- Meta-learning from tree patterns
|
44 |
+
"""
|
45 |
+
|
46 |
+
def __init__(self,
|
47 |
+
min_confidence: float = 0.7,
|
48 |
+
parallel_threshold: int = 3,
|
49 |
+
learning_rate: float = 0.1,
|
50 |
+
strategy_weights: Optional[Dict[str, float]] = None):
|
51 |
+
self.min_confidence = min_confidence
|
52 |
+
self.parallel_threshold = parallel_threshold
|
53 |
+
self.learning_rate = learning_rate
|
54 |
+
self.strategy_weights = strategy_weights or {
|
55 |
+
"LOCAL_LLM": 0.8,
|
56 |
+
"CHAIN_OF_THOUGHT": 0.6,
|
57 |
+
"TREE_OF_THOUGHTS": 0.5,
|
58 |
+
"META_LEARNING": 0.4
|
59 |
+
}
|
60 |
+
self.node_history: Dict[str, TreeNode] = {}
|
61 |
+
self.path_patterns: Dict[str, float] = defaultdict(float)
|
62 |
+
|
63 |
+
async def reason(self, query: str, context: Dict[str, Any]) -> Dict[str, Any]:
|
64 |
+
"""Main reasoning method implementing tree of thoughts."""
|
65 |
+
try:
|
66 |
+
# Initialize root node
|
67 |
+
root = await self._create_root_node(query, context)
|
68 |
+
|
69 |
+
# Build and explore tree
|
70 |
+
tree = await self._build_tree(root, context)
|
71 |
+
|
72 |
+
# Find best paths
|
73 |
+
paths = await self._find_best_paths(tree, context)
|
74 |
+
|
75 |
+
# Synthesize conclusion
|
76 |
+
conclusion = await self._synthesize_conclusion(paths, context)
|
77 |
+
|
78 |
+
# Update history and patterns
|
79 |
+
self._update_history(tree)
|
80 |
+
self._update_patterns(paths)
|
81 |
+
|
82 |
+
return {
|
83 |
+
"success": True,
|
84 |
+
"answer": conclusion["answer"],
|
85 |
+
"confidence": conclusion["confidence"],
|
86 |
+
"tree": self._tree_to_dict(tree),
|
87 |
+
"best_paths": [self._path_to_dict(p) for p in paths],
|
88 |
+
"reasoning_trace": conclusion["trace"],
|
89 |
+
"meta_insights": conclusion["meta_insights"]
|
90 |
+
}
|
91 |
+
except Exception as e:
|
92 |
+
logging.error(f"Error in tree of thoughts reasoning: {str(e)}")
|
93 |
+
return {"success": False, "error": str(e)}
|
94 |
+
|
95 |
+
async def _create_root_node(self, query: str, context: Dict[str, Any]) -> TreeNode:
|
96 |
+
"""Create the root node of the thought tree."""
|
97 |
+
prompt = f"""
|
98 |
+
Initialize root thought node for query:
|
99 |
+
Query: {query}
|
100 |
+
Context: {json.dumps(context)}
|
101 |
+
|
102 |
+
Provide:
|
103 |
+
1. Initial problem decomposition
|
104 |
+
2. Key aspects to explore
|
105 |
+
3. Evaluation criteria
|
106 |
+
4. Success metrics
|
107 |
+
|
108 |
+
Format as:
|
109 |
+
[Root]
|
110 |
+
Decomposition: ...
|
111 |
+
Aspects: ...
|
112 |
+
Criteria: ...
|
113 |
+
Metrics: ...
|
114 |
+
"""
|
115 |
+
|
116 |
+
response = await context["groq_api"].predict(prompt)
|
117 |
+
return self._parse_root_node(response["answer"], query)
|
118 |
+
|
119 |
+
async def _build_tree(self, root: TreeNode, context: Dict[str, Any]) -> TreeNode:
|
120 |
+
"""Build and explore the thought tree."""
|
121 |
+
# Initialize beam with root
|
122 |
+
beam = [(root.evaluation_score, root)]
|
123 |
+
visited: Set[str] = set()
|
124 |
+
|
125 |
+
for depth in range(5):
|
126 |
+
next_beam = []
|
127 |
+
|
128 |
+
for _, node in beam:
|
129 |
+
if node.id in visited:
|
130 |
+
continue
|
131 |
+
|
132 |
+
visited.add(node.id)
|
133 |
+
|
134 |
+
# Generate child nodes
|
135 |
+
children = await self._generate_children(node, context)
|
136 |
+
|
137 |
+
# Evaluate and filter children
|
138 |
+
evaluated_children = await self._evaluate_nodes(children, context)
|
139 |
+
|
140 |
+
# Add to beam
|
141 |
+
for child in evaluated_children:
|
142 |
+
if child.evaluation_score > 0.4:
|
143 |
+
next_beam.append((child.evaluation_score, child))
|
144 |
+
node.children.append(child)
|
145 |
+
|
146 |
+
# Select best nodes for next iteration
|
147 |
+
beam = heapq.nlargest(3, next_beam, key=lambda x: x[0])
|
148 |
+
|
149 |
+
if not beam:
|
150 |
+
break
|
151 |
+
|
152 |
+
return root
|
153 |
+
|
154 |
+
async def _generate_children(self, parent: TreeNode, context: Dict[str, Any]) -> List[TreeNode]:
|
155 |
+
"""Generate child nodes for a given parent."""
|
156 |
+
prompt = f"""
|
157 |
+
Generate child thoughts for node:
|
158 |
+
Parent: {json.dumps(self._node_to_dict(parent))}
|
159 |
+
Context: {json.dumps(context)}
|
160 |
+
|
161 |
+
For each child provide:
|
162 |
+
1. [Type]: {" | ".join([t.value for t in NodeType if t != NodeType.ROOT])}
|
163 |
+
2. [Content]: Main thought
|
164 |
+
3. [Confidence]: 0-1 score
|
165 |
+
4. [Rationale]: Why this follows from parent
|
166 |
+
5. [Potential]: Future exploration potential
|
167 |
+
|
168 |
+
Format as:
|
169 |
+
[C1]
|
170 |
+
Type: ...
|
171 |
+
Content: ...
|
172 |
+
Confidence: ...
|
173 |
+
Rationale: ...
|
174 |
+
Potential: ...
|
175 |
+
"""
|
176 |
+
|
177 |
+
response = await context["groq_api"].predict(prompt)
|
178 |
+
return self._parse_child_nodes(response["answer"], parent)
|
179 |
+
|
180 |
+
async def _evaluate_nodes(self, nodes: List[TreeNode], context: Dict[str, Any]) -> List[TreeNode]:
|
181 |
+
"""Evaluate a list of nodes."""
|
182 |
+
prompt = f"""
|
183 |
+
Evaluate thought nodes:
|
184 |
+
Nodes: {json.dumps([self._node_to_dict(n) for n in nodes])}
|
185 |
+
Context: {json.dumps(context)}
|
186 |
+
|
187 |
+
For each node evaluate:
|
188 |
+
1. Logical coherence
|
189 |
+
2. Evidence support
|
190 |
+
3. Novelty value
|
191 |
+
4. Exploration potential
|
192 |
+
|
193 |
+
Format as:
|
194 |
+
[N1]
|
195 |
+
Coherence: 0-1
|
196 |
+
Evidence: 0-1
|
197 |
+
Novelty: 0-1
|
198 |
+
Potential: 0-1
|
199 |
+
Overall: 0-1
|
200 |
+
"""
|
201 |
+
|
202 |
+
response = await context["groq_api"].predict(prompt)
|
203 |
+
return self._apply_evaluations(nodes, response["answer"])
|
204 |
+
|
205 |
+
async def _find_best_paths(self, root: TreeNode, context: Dict[str, Any]) -> List[List[TreeNode]]:
|
206 |
+
"""Find the best paths through the tree."""
|
207 |
+
paths = []
|
208 |
+
current_path = [root]
|
209 |
+
|
210 |
+
def dfs(node: TreeNode, path: List[TreeNode]):
|
211 |
+
if not node.children:
|
212 |
+
paths.append(path[:])
|
213 |
+
return
|
214 |
+
|
215 |
+
# Sort children by score
|
216 |
+
sorted_children = sorted(node.children, key=lambda x: x.evaluation_score, reverse=True)
|
217 |
+
|
218 |
+
# Explore top paths
|
219 |
+
for child in sorted_children[:3]:
|
220 |
+
path.append(child)
|
221 |
+
dfs(child, path)
|
222 |
+
path.pop()
|
223 |
+
|
224 |
+
dfs(root, current_path)
|
225 |
+
|
226 |
+
# Evaluate complete paths
|
227 |
+
evaluated_paths = await self._evaluate_paths(paths, context)
|
228 |
+
|
229 |
+
# Return top paths
|
230 |
+
return sorted(evaluated_paths, key=lambda p: sum(n.evaluation_score for n in p), reverse=True)[:3]
|
231 |
+
|
232 |
+
async def _synthesize_conclusion(self, paths: List[List[TreeNode]], context: Dict[str, Any]) -> Dict[str, Any]:
|
233 |
+
"""Synthesize final conclusion from best paths."""
|
234 |
+
prompt = f"""
|
235 |
+
Synthesize conclusion from thought paths:
|
236 |
+
Paths: {json.dumps([[self._node_to_dict(n) for n in path] for path in paths])}
|
237 |
+
Context: {json.dumps(context)}
|
238 |
+
|
239 |
+
Provide:
|
240 |
+
1. Main conclusion
|
241 |
+
2. Confidence level
|
242 |
+
3. Reasoning trace
|
243 |
+
4. Supporting evidence
|
244 |
+
5. Alternative perspectives
|
245 |
+
6. Meta-insights
|
246 |
+
|
247 |
+
Format as:
|
248 |
+
[Conclusion]
|
249 |
+
Answer: ...
|
250 |
+
Confidence: ...
|
251 |
+
Trace: ...
|
252 |
+
Evidence: ...
|
253 |
+
Alternatives: ...
|
254 |
+
|
255 |
+
[Meta]
|
256 |
+
Insights: ...
|
257 |
+
Patterns: ...
|
258 |
+
"""
|
259 |
+
|
260 |
+
response = await context["groq_api"].predict(prompt)
|
261 |
+
return self._parse_conclusion(response["answer"])
|
262 |
+
|
263 |
+
def _parse_root_node(self, response: str, query: str) -> TreeNode:
|
264 |
+
"""Parse root node from response."""
|
265 |
+
root = TreeNode(
|
266 |
+
id="root",
|
267 |
+
type=NodeType.ROOT,
|
268 |
+
content=query,
|
269 |
+
confidence=1.0,
|
270 |
+
children=[],
|
271 |
+
parent=None,
|
272 |
+
metadata={},
|
273 |
+
depth=0
|
274 |
+
)
|
275 |
+
|
276 |
+
for line in response.split('\n'):
|
277 |
+
line = line.strip()
|
278 |
+
if line.startswith('Decomposition:'):
|
279 |
+
root.metadata["decomposition"] = line[14:].strip()
|
280 |
+
elif line.startswith('Aspects:'):
|
281 |
+
root.metadata["aspects"] = [a.strip() for a in line[8:].split(',')]
|
282 |
+
elif line.startswith('Criteria:'):
|
283 |
+
root.metadata["criteria"] = [c.strip() for c in line[9:].split(',')]
|
284 |
+
elif line.startswith('Metrics:'):
|
285 |
+
root.metadata["metrics"] = [m.strip() for m in line[8:].split(',')]
|
286 |
+
|
287 |
+
return root
|
288 |
+
|
289 |
+
def _parse_child_nodes(self, response: str, parent: TreeNode) -> List[TreeNode]:
|
290 |
+
"""Parse child nodes from response."""
|
291 |
+
children = []
|
292 |
+
current = None
|
293 |
+
|
294 |
+
for line in response.split('\n'):
|
295 |
+
line = line.strip()
|
296 |
+
if not line:
|
297 |
+
continue
|
298 |
+
|
299 |
+
if line.startswith('[C'):
|
300 |
+
if current:
|
301 |
+
children.append(current)
|
302 |
+
current = None
|
303 |
+
elif line.startswith('Type:'):
|
304 |
+
type_str = line[5:].strip()
|
305 |
+
try:
|
306 |
+
node_type = NodeType(type_str.lower())
|
307 |
+
current = TreeNode(
|
308 |
+
id=f"{parent.id}_{len(children)}",
|
309 |
+
type=node_type,
|
310 |
+
content="",
|
311 |
+
confidence=0.0,
|
312 |
+
children=[],
|
313 |
+
parent=parent,
|
314 |
+
metadata={},
|
315 |
+
depth=parent.depth + 1
|
316 |
+
)
|
317 |
+
except ValueError:
|
318 |
+
logging.warning(f"Invalid node type: {type_str}")
|
319 |
+
elif current:
|
320 |
+
if line.startswith('Content:'):
|
321 |
+
current.content = line[8:].strip()
|
322 |
+
elif line.startswith('Confidence:'):
|
323 |
+
try:
|
324 |
+
current.confidence = float(line[11:].strip())
|
325 |
+
except:
|
326 |
+
current.confidence = 0.5
|
327 |
+
elif line.startswith('Rationale:'):
|
328 |
+
current.metadata["rationale"] = line[10:].strip()
|
329 |
+
elif line.startswith('Potential:'):
|
330 |
+
current.metadata["potential"] = line[10:].strip()
|
331 |
+
|
332 |
+
if current:
|
333 |
+
children.append(current)
|
334 |
+
|
335 |
+
return children
|
336 |
+
|
337 |
+
def _apply_evaluations(self, nodes: List[TreeNode], response: str) -> List[TreeNode]:
|
338 |
+
"""Apply evaluation scores to nodes."""
|
339 |
+
current_node_idx = 0
|
340 |
+
current_scores = {}
|
341 |
+
|
342 |
+
for line in response.split('\n'):
|
343 |
+
line = line.strip()
|
344 |
+
if not line:
|
345 |
+
continue
|
346 |
+
|
347 |
+
if line.startswith('[N'):
|
348 |
+
if current_scores and current_node_idx < len(nodes):
|
349 |
+
nodes[current_node_idx].evaluation_score = current_scores.get("Overall", 0.0)
|
350 |
+
nodes[current_node_idx].metadata.update(current_scores)
|
351 |
+
current_node_idx += 1
|
352 |
+
current_scores = {}
|
353 |
+
elif ':' in line:
|
354 |
+
key, value = line.split(':')
|
355 |
+
try:
|
356 |
+
current_scores[key.strip()] = float(value.strip())
|
357 |
+
except:
|
358 |
+
pass
|
359 |
+
|
360 |
+
if current_scores and current_node_idx < len(nodes):
|
361 |
+
nodes[current_node_idx].evaluation_score = current_scores.get("Overall", 0.0)
|
362 |
+
nodes[current_node_idx].metadata.update(current_scores)
|
363 |
+
|
364 |
+
return nodes
|
365 |
+
|
366 |
+
async def _evaluate_paths(self, paths: List[List[TreeNode]], context: Dict[str, Any]) -> List[List[TreeNode]]:
|
367 |
+
"""Evaluate complete reasoning paths."""
|
368 |
+
prompt = f"""
|
369 |
+
Evaluate complete reasoning paths:
|
370 |
+
Paths: {json.dumps([[self._node_to_dict(n) for n in path] for path in paths])}
|
371 |
+
Context: {json.dumps(context)}
|
372 |
+
|
373 |
+
For each path evaluate:
|
374 |
+
1. Coherence of progression
|
375 |
+
2. Evidence support
|
376 |
+
3. Conclusion strength
|
377 |
+
4. Novel insights
|
378 |
+
|
379 |
+
Format as:
|
380 |
+
[P1]
|
381 |
+
Coherence: 0-1
|
382 |
+
Evidence: 0-1
|
383 |
+
Conclusion: 0-1
|
384 |
+
Insights: 0-1
|
385 |
+
Overall: 0-1
|
386 |
+
"""
|
387 |
+
|
388 |
+
response = await context["groq_api"].predict(prompt)
|
389 |
+
scores = self._parse_path_scores(response["answer"])
|
390 |
+
|
391 |
+
# Apply scores to paths
|
392 |
+
for i, path in enumerate(paths):
|
393 |
+
if i < len(scores):
|
394 |
+
for node in path:
|
395 |
+
node.evaluation_score *= scores[i]
|
396 |
+
|
397 |
+
return paths
|
398 |
+
|
399 |
+
def _parse_path_scores(self, response: str) -> List[float]:
|
400 |
+
"""Parse path evaluation scores."""
|
401 |
+
scores = []
|
402 |
+
current_score = None
|
403 |
+
|
404 |
+
for line in response.split('\n'):
|
405 |
+
line = line.strip()
|
406 |
+
if not line:
|
407 |
+
continue
|
408 |
+
|
409 |
+
if line.startswith('[P'):
|
410 |
+
if current_score is not None:
|
411 |
+
scores.append(current_score)
|
412 |
+
current_score = None
|
413 |
+
elif line.startswith('Overall:'):
|
414 |
+
try:
|
415 |
+
current_score = float(line[8:].strip())
|
416 |
+
except:
|
417 |
+
current_score = 0.5
|
418 |
+
|
419 |
+
if current_score is not None:
|
420 |
+
scores.append(current_score)
|
421 |
+
|
422 |
+
return scores
|
423 |
+
|
424 |
+
def _parse_conclusion(self, response: str) -> Dict[str, Any]:
|
425 |
+
"""Parse final conclusion."""
|
426 |
+
conclusion = {
|
427 |
+
"answer": "",
|
428 |
+
"confidence": 0.0,
|
429 |
+
"trace": [],
|
430 |
+
"evidence": [],
|
431 |
+
"alternatives": [],
|
432 |
+
"meta_insights": []
|
433 |
+
}
|
434 |
+
|
435 |
+
section = None
|
436 |
+
for line in response.split('\n'):
|
437 |
+
line = line.strip()
|
438 |
+
if not line:
|
439 |
+
continue
|
440 |
+
|
441 |
+
if line.startswith('[Conclusion]'):
|
442 |
+
section = "conclusion"
|
443 |
+
elif line.startswith('[Meta]'):
|
444 |
+
section = "meta"
|
445 |
+
elif section == "conclusion":
|
446 |
+
if line.startswith('Answer:'):
|
447 |
+
conclusion["answer"] = line[7:].strip()
|
448 |
+
elif line.startswith('Confidence:'):
|
449 |
+
try:
|
450 |
+
conclusion["confidence"] = float(line[11:].strip())
|
451 |
+
except:
|
452 |
+
conclusion["confidence"] = 0.5
|
453 |
+
elif line.startswith('Trace:'):
|
454 |
+
conclusion["trace"] = [t.strip() for t in line[6:].split(',')]
|
455 |
+
elif line.startswith('Evidence:'):
|
456 |
+
conclusion["evidence"] = [e.strip() for e in line[9:].split(',')]
|
457 |
+
elif line.startswith('Alternatives:'):
|
458 |
+
conclusion["alternatives"] = [a.strip() for a in line[13:].split(',')]
|
459 |
+
elif section == "meta":
|
460 |
+
if line.startswith('Insights:'):
|
461 |
+
conclusion["meta_insights"].extend([i.strip() for i in line[9:].split(',')])
|
462 |
+
|
463 |
+
return conclusion
|
464 |
+
|
465 |
+
def _node_to_dict(self, node: TreeNode) -> Dict[str, Any]:
|
466 |
+
"""Convert node to dictionary for serialization."""
|
467 |
+
return {
|
468 |
+
"id": node.id,
|
469 |
+
"type": node.type.value,
|
470 |
+
"content": node.content,
|
471 |
+
"confidence": node.confidence,
|
472 |
+
"evaluation_score": node.evaluation_score,
|
473 |
+
"metadata": node.metadata,
|
474 |
+
"depth": node.depth
|
475 |
+
}
|
476 |
+
|
477 |
+
def _tree_to_dict(self, root: TreeNode) -> Dict[str, Any]:
|
478 |
+
"""Convert entire tree to dictionary."""
|
479 |
+
def convert_node(node: TreeNode) -> Dict[str, Any]:
|
480 |
+
node_dict = self._node_to_dict(node)
|
481 |
+
node_dict["children"] = [convert_node(c) for c in node.children]
|
482 |
+
return node_dict
|
483 |
+
|
484 |
+
return convert_node(root)
|
485 |
+
|
486 |
+
def _path_to_dict(self, path: List[TreeNode]) -> List[Dict[str, Any]]:
|
487 |
+
"""Convert path to dictionary."""
|
488 |
+
return [self._node_to_dict(n) for n in path]
|
489 |
+
|
490 |
+
def _update_history(self, root: TreeNode):
|
491 |
+
"""Update node history."""
|
492 |
+
def add_to_history(node: TreeNode):
|
493 |
+
self.node_history[node.id] = node
|
494 |
+
for child in node.children:
|
495 |
+
add_to_history(child)
|
496 |
+
|
497 |
+
add_to_history(root)
|
498 |
+
|
499 |
+
def _update_patterns(self, paths: List[List[TreeNode]]):
|
500 |
+
"""Update path patterns."""
|
501 |
+
for path in paths:
|
502 |
+
pattern = "->".join(n.type.value for n in path)
|
503 |
+
self.path_patterns[pattern] += path[-1].evaluation_score
|
504 |
+
|
505 |
+
def get_node_history(self) -> Dict[str, Dict[str, Any]]:
|
506 |
+
"""Get history of all nodes."""
|
507 |
+
return {k: self._node_to_dict(v) for k, v in self.node_history.items()}
|
508 |
+
|
509 |
+
def get_successful_patterns(self) -> Dict[str, float]:
|
510 |
+
"""Get successful reasoning patterns."""
|
511 |
+
return dict(sorted(self.path_patterns.items(), key=lambda x: x[1], reverse=True))
|
512 |
+
|
513 |
+
def clear_history(self):
|
514 |
+
"""Clear node history and patterns."""
|
515 |
+
self.node_history.clear()
|
516 |
+
self.path_patterns.clear()
|
reasoning/unified_engine.py
ADDED
@@ -0,0 +1,707 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Unified reasoning engine that combines multiple reasoning strategies."""
|
2 |
+
|
3 |
+
import logging
|
4 |
+
from typing import Dict, Any, List, Optional, Set, Union, Type
|
5 |
+
import json
|
6 |
+
from dataclasses import dataclass, field
|
7 |
+
from enum import Enum
|
8 |
+
from datetime import datetime
|
9 |
+
import asyncio
|
10 |
+
from collections import defaultdict
|
11 |
+
import numpy as np
|
12 |
+
|
13 |
+
from .base import ReasoningStrategy
|
14 |
+
from .groq_strategy import GroqStrategy
|
15 |
+
from .chain_of_thought import ChainOfThoughtStrategy
|
16 |
+
from .tree_of_thoughts import TreeOfThoughtsStrategy
|
17 |
+
from .meta_learning import MetaLearningStrategy
|
18 |
+
from .recursive import RecursiveReasoning
|
19 |
+
from .analogical import AnalogicalReasoning
|
20 |
+
from .local_llm import LocalLLMStrategy
|
21 |
+
from .agentic import (
|
22 |
+
TaskDecompositionStrategy,
|
23 |
+
ResourceManagementStrategy,
|
24 |
+
ContextualPlanningStrategy,
|
25 |
+
AdaptiveExecutionStrategy,
|
26 |
+
FeedbackIntegrationStrategy
|
27 |
+
)
|
28 |
+
# Import additional strategies
|
29 |
+
from .bayesian import BayesianStrategy
|
30 |
+
from .market_analysis import MarketAnalysisStrategy
|
31 |
+
from .monetization import MonetizationStrategy
|
32 |
+
from .multimodal import MultimodalStrategy
|
33 |
+
from .neurosymbolic import NeurosymbolicStrategy
|
34 |
+
from .portfolio_optimization import PortfolioOptimizationStrategy
|
35 |
+
from .specialized import SpecializedStrategy
|
36 |
+
from .venture_strategies import VentureStrategy
|
37 |
+
from .venture_types import VentureTypeStrategy
|
38 |
+
|
39 |
+
class StrategyType(str, Enum):
|
40 |
+
"""Types of reasoning strategies."""
|
41 |
+
GROQ = "groq"
|
42 |
+
CHAIN_OF_THOUGHT = "chain_of_thought"
|
43 |
+
TREE_OF_THOUGHTS = "tree_of_thoughts"
|
44 |
+
META_LEARNING = "meta_learning"
|
45 |
+
RECURSIVE = "recursive"
|
46 |
+
ANALOGICAL = "analogical"
|
47 |
+
LOCAL_LLM = "local_llm"
|
48 |
+
TASK_DECOMPOSITION = "task_decomposition"
|
49 |
+
RESOURCE_MANAGEMENT = "resource_management"
|
50 |
+
CONTEXTUAL_PLANNING = "contextual_planning"
|
51 |
+
ADAPTIVE_EXECUTION = "adaptive_execution"
|
52 |
+
FEEDBACK_INTEGRATION = "feedback_integration"
|
53 |
+
BAYESIAN = "bayesian"
|
54 |
+
MARKET_ANALYSIS = "market_analysis"
|
55 |
+
MONETIZATION = "monetization"
|
56 |
+
MULTIMODAL = "multimodal"
|
57 |
+
NEUROSYMBOLIC = "neurosymbolic"
|
58 |
+
PORTFOLIO_OPTIMIZATION = "portfolio_optimization"
|
59 |
+
SPECIALIZED = "specialized"
|
60 |
+
VENTURE = "venture"
|
61 |
+
VENTURE_TYPE = "venture_type"
|
62 |
+
|
63 |
+
@dataclass
|
64 |
+
class StrategyResult:
|
65 |
+
"""Result from a reasoning strategy."""
|
66 |
+
strategy_type: StrategyType
|
67 |
+
success: bool
|
68 |
+
answer: Optional[str]
|
69 |
+
confidence: float
|
70 |
+
reasoning_trace: List[Dict[str, Any]]
|
71 |
+
metadata: Dict[str, Any]
|
72 |
+
performance_metrics: Dict[str, Any]
|
73 |
+
timestamp: datetime = field(default_factory=datetime.now)
|
74 |
+
|
75 |
+
@dataclass
|
76 |
+
class UnifiedResult:
|
77 |
+
"""Combined result from multiple strategies."""
|
78 |
+
success: bool
|
79 |
+
answer: str
|
80 |
+
confidence: float
|
81 |
+
strategy_results: Dict[StrategyType, StrategyResult]
|
82 |
+
synthesis_method: str
|
83 |
+
meta_insights: List[str]
|
84 |
+
performance_metrics: Dict[str, Any]
|
85 |
+
timestamp: datetime = field(default_factory=datetime.now)
|
86 |
+
|
87 |
+
class UnifiedReasoningEngine:
|
88 |
+
"""
|
89 |
+
Advanced unified reasoning engine that:
|
90 |
+
1. Combines multiple reasoning strategies
|
91 |
+
2. Dynamically selects and weights strategies
|
92 |
+
3. Synthesizes results from different approaches
|
93 |
+
4. Learns from experience
|
94 |
+
5. Adapts to different types of tasks
|
95 |
+
"""
|
96 |
+
|
97 |
+
def __init__(self,
|
98 |
+
min_confidence: float = 0.7,
|
99 |
+
strategy_weights: Optional[Dict[StrategyType, float]] = None,
|
100 |
+
parallel_threshold: int = 3,
|
101 |
+
learning_rate: float = 0.1):
|
102 |
+
self.min_confidence = min_confidence
|
103 |
+
self.parallel_threshold = parallel_threshold
|
104 |
+
self.learning_rate = learning_rate
|
105 |
+
|
106 |
+
# Initialize strategies
|
107 |
+
self.strategies: Dict[StrategyType, ReasoningStrategy] = {
|
108 |
+
# Primary strategy (Groq)
|
109 |
+
StrategyType.GROQ: GroqStrategy(),
|
110 |
+
|
111 |
+
# Core strategies
|
112 |
+
StrategyType.CHAIN_OF_THOUGHT: ChainOfThoughtStrategy(),
|
113 |
+
StrategyType.TREE_OF_THOUGHTS: TreeOfThoughtsStrategy(),
|
114 |
+
StrategyType.META_LEARNING: MetaLearningStrategy(),
|
115 |
+
StrategyType.RECURSIVE: RecursiveReasoning(),
|
116 |
+
StrategyType.ANALOGICAL: AnalogicalReasoning(),
|
117 |
+
StrategyType.LOCAL_LLM: LocalLLMStrategy(),
|
118 |
+
|
119 |
+
# Agentic strategies
|
120 |
+
StrategyType.TASK_DECOMPOSITION: TaskDecompositionStrategy(),
|
121 |
+
StrategyType.RESOURCE_MANAGEMENT: ResourceManagementStrategy(),
|
122 |
+
StrategyType.CONTEXTUAL_PLANNING: ContextualPlanningStrategy(),
|
123 |
+
StrategyType.ADAPTIVE_EXECUTION: AdaptiveExecutionStrategy(),
|
124 |
+
StrategyType.FEEDBACK_INTEGRATION: FeedbackIntegrationStrategy(),
|
125 |
+
|
126 |
+
# Additional specialized strategies
|
127 |
+
StrategyType.BAYESIAN: BayesianStrategy(),
|
128 |
+
StrategyType.MARKET_ANALYSIS: MarketAnalysisStrategy(),
|
129 |
+
StrategyType.MONETIZATION: MonetizationStrategy(),
|
130 |
+
StrategyType.MULTIMODAL: MultimodalStrategy(),
|
131 |
+
StrategyType.NEUROSYMBOLIC: NeurosymbolicStrategy(),
|
132 |
+
StrategyType.PORTFOLIO_OPTIMIZATION: PortfolioOptimizationStrategy(),
|
133 |
+
StrategyType.SPECIALIZED: SpecializedStrategy(),
|
134 |
+
StrategyType.VENTURE: VentureStrategy(),
|
135 |
+
StrategyType.VENTURE_TYPE: VentureTypeStrategy()
|
136 |
+
}
|
137 |
+
|
138 |
+
# Strategy weights with Groq as primary
|
139 |
+
self.strategy_weights = strategy_weights or {
|
140 |
+
# Primary strategy (highest weight)
|
141 |
+
StrategyType.GROQ: 2.5,
|
142 |
+
|
143 |
+
# Core strategies (high weights)
|
144 |
+
StrategyType.CHAIN_OF_THOUGHT: 1.5,
|
145 |
+
StrategyType.TREE_OF_THOUGHTS: 1.5,
|
146 |
+
StrategyType.META_LEARNING: 1.5,
|
147 |
+
|
148 |
+
# Agentic strategies (medium-high weights)
|
149 |
+
StrategyType.TASK_DECOMPOSITION: 1.3,
|
150 |
+
StrategyType.RESOURCE_MANAGEMENT: 1.3,
|
151 |
+
StrategyType.CONTEXTUAL_PLANNING: 1.3,
|
152 |
+
StrategyType.ADAPTIVE_EXECUTION: 1.3,
|
153 |
+
StrategyType.FEEDBACK_INTEGRATION: 1.3,
|
154 |
+
|
155 |
+
# Domain-specific strategies (context-dependent weights)
|
156 |
+
StrategyType.BAYESIAN: 1.2,
|
157 |
+
StrategyType.MARKET_ANALYSIS: 1.2,
|
158 |
+
StrategyType.PORTFOLIO_OPTIMIZATION: 1.2,
|
159 |
+
StrategyType.VENTURE: 1.2,
|
160 |
+
|
161 |
+
# Other specialized strategies (base weights)
|
162 |
+
StrategyType.MONETIZATION: 1.0,
|
163 |
+
StrategyType.MULTIMODAL: 1.0,
|
164 |
+
StrategyType.NEUROSYMBOLIC: 1.0,
|
165 |
+
StrategyType.SPECIALIZED: 1.0,
|
166 |
+
StrategyType.VENTURE_TYPE: 1.0,
|
167 |
+
StrategyType.RECURSIVE: 1.0,
|
168 |
+
StrategyType.ANALOGICAL: 1.0,
|
169 |
+
StrategyType.LOCAL_LLM: 1.0 # Reduced weight since using Groq
|
170 |
+
}
|
171 |
+
|
172 |
+
# Performance tracking
|
173 |
+
self.strategy_performance: Dict[StrategyType, List[float]] = defaultdict(list)
|
174 |
+
self.task_type_performance: Dict[str, Dict[StrategyType, float]] = defaultdict(lambda: defaultdict(float))
|
175 |
+
self.synthesis_performance: Dict[str, List[float]] = defaultdict(list)
|
176 |
+
|
177 |
+
async def reason(self, query: str, context: Dict[str, Any]) -> UnifiedResult:
|
178 |
+
"""Main reasoning method combining multiple strategies."""
|
179 |
+
try:
|
180 |
+
# Analyze task
|
181 |
+
task_analysis = await self._analyze_task(query, context)
|
182 |
+
|
183 |
+
# Select strategies
|
184 |
+
selected_strategies = await self._select_strategies(task_analysis, context)
|
185 |
+
|
186 |
+
# Execute strategies
|
187 |
+
strategy_results = await self._execute_strategies(
|
188 |
+
selected_strategies, query, context)
|
189 |
+
|
190 |
+
# Synthesize results
|
191 |
+
unified_result = await self._synthesize_results(
|
192 |
+
strategy_results, task_analysis, context)
|
193 |
+
|
194 |
+
# Learn from experience
|
195 |
+
self._update_performance(unified_result)
|
196 |
+
|
197 |
+
return unified_result
|
198 |
+
|
199 |
+
except Exception as e:
|
200 |
+
logging.error(f"Error in unified reasoning: {str(e)}")
|
201 |
+
return UnifiedResult(
|
202 |
+
success=False,
|
203 |
+
answer=f"Error: {str(e)}",
|
204 |
+
confidence=0.0,
|
205 |
+
strategy_results={},
|
206 |
+
synthesis_method="failed",
|
207 |
+
meta_insights=[f"Error occurred: {str(e)}"],
|
208 |
+
performance_metrics={}
|
209 |
+
)
|
210 |
+
|
211 |
+
async def reason_stream(
|
212 |
+
self,
|
213 |
+
query: str,
|
214 |
+
context: Dict[str, Any] = None,
|
215 |
+
strategy_type: Optional[StrategyType] = None,
|
216 |
+
chunk_handler: Optional[callable] = None
|
217 |
+
) -> AsyncGenerator[str, None]:
|
218 |
+
"""
|
219 |
+
Stream reasoning results from the selected strategy.
|
220 |
+
|
221 |
+
Args:
|
222 |
+
query: Query to reason about
|
223 |
+
context: Additional context for reasoning
|
224 |
+
strategy_type: Specific strategy to use (optional)
|
225 |
+
chunk_handler: Optional callback for handling chunks
|
226 |
+
"""
|
227 |
+
context = context or {}
|
228 |
+
|
229 |
+
# Default to Groq strategy for streaming
|
230 |
+
if not strategy_type:
|
231 |
+
strategy_type = StrategyType.GROQ
|
232 |
+
|
233 |
+
strategy = self.strategies.get(strategy_type)
|
234 |
+
if not strategy:
|
235 |
+
yield f"Error: Strategy {strategy_type} not found"
|
236 |
+
return
|
237 |
+
|
238 |
+
if not hasattr(strategy, 'reason_stream'):
|
239 |
+
yield f"Error: Strategy {strategy_type} does not support streaming"
|
240 |
+
return
|
241 |
+
|
242 |
+
try:
|
243 |
+
async for chunk in strategy.reason_stream(
|
244 |
+
query=query,
|
245 |
+
context=context,
|
246 |
+
chunk_handler=chunk_handler
|
247 |
+
):
|
248 |
+
yield chunk
|
249 |
+
except Exception as e:
|
250 |
+
logging.error(f"Streaming error: {str(e)}")
|
251 |
+
yield f"Error: {str(e)}"
|
252 |
+
|
253 |
+
async def _analyze_task(self, query: str, context: Dict[str, Any]) -> Dict[str, Any]:
|
254 |
+
"""Analyze the task to determine optimal strategy selection."""
|
255 |
+
prompt = f"""
|
256 |
+
Analyze reasoning task:
|
257 |
+
Query: {query}
|
258 |
+
Context: {json.dumps(context)}
|
259 |
+
|
260 |
+
Determine:
|
261 |
+
1. Task type and complexity
|
262 |
+
2. Required reasoning capabilities
|
263 |
+
3. Resource requirements
|
264 |
+
4. Success criteria
|
265 |
+
5. Risk factors
|
266 |
+
|
267 |
+
Format as:
|
268 |
+
[Analysis]
|
269 |
+
Type: ...
|
270 |
+
Complexity: ...
|
271 |
+
Capabilities: ...
|
272 |
+
Resources: ...
|
273 |
+
Criteria: ...
|
274 |
+
Risks: ...
|
275 |
+
"""
|
276 |
+
|
277 |
+
response = await context["groq_api"].predict(prompt)
|
278 |
+
return self._parse_task_analysis(response["answer"])
|
279 |
+
|
280 |
+
async def _select_strategies(self, task_analysis: Dict[str, Any], context: Dict[str, Any]) -> List[StrategyType]:
|
281 |
+
"""Select appropriate strategies based on task analysis."""
|
282 |
+
# Calculate strategy scores
|
283 |
+
scores: Dict[StrategyType, float] = {}
|
284 |
+
for strategy_type in StrategyType:
|
285 |
+
base_score = self.strategy_weights[strategy_type]
|
286 |
+
|
287 |
+
# Task type performance
|
288 |
+
task_type = task_analysis["type"]
|
289 |
+
type_score = self.task_type_performance[task_type][strategy_type]
|
290 |
+
|
291 |
+
# Recent performance
|
292 |
+
recent_performance = (
|
293 |
+
sum(self.strategy_performance[strategy_type][-5:]) / 5
|
294 |
+
if self.strategy_performance[strategy_type] else 0.5
|
295 |
+
)
|
296 |
+
|
297 |
+
# Resource match
|
298 |
+
resource_match = self._calculate_resource_match(
|
299 |
+
strategy_type, task_analysis["resources"])
|
300 |
+
|
301 |
+
# Capability match
|
302 |
+
capability_match = self._calculate_capability_match(
|
303 |
+
strategy_type, task_analysis["capabilities"])
|
304 |
+
|
305 |
+
# Combined score
|
306 |
+
scores[strategy_type] = (
|
307 |
+
0.3 * base_score +
|
308 |
+
0.2 * type_score +
|
309 |
+
0.2 * recent_performance +
|
310 |
+
0.15 * resource_match +
|
311 |
+
0.15 * capability_match
|
312 |
+
)
|
313 |
+
|
314 |
+
# Select top strategies
|
315 |
+
selected = sorted(
|
316 |
+
StrategyType,
|
317 |
+
key=lambda x: scores[x],
|
318 |
+
reverse=True
|
319 |
+
)[:self.parallel_threshold]
|
320 |
+
|
321 |
+
return selected
|
322 |
+
|
323 |
+
async def _execute_strategies(self,
|
324 |
+
strategies: List[StrategyType],
|
325 |
+
query: str,
|
326 |
+
context: Dict[str, Any]) -> Dict[StrategyType, StrategyResult]:
|
327 |
+
"""Execute selected strategies in parallel."""
|
328 |
+
async def execute_strategy(strategy_type: StrategyType) -> StrategyResult:
|
329 |
+
strategy = self.strategies[strategy_type]
|
330 |
+
start_time = datetime.now()
|
331 |
+
|
332 |
+
try:
|
333 |
+
result = await strategy.reason(query, context)
|
334 |
+
|
335 |
+
return StrategyResult(
|
336 |
+
strategy_type=strategy_type,
|
337 |
+
success=result.get("success", False),
|
338 |
+
answer=result.get("answer"),
|
339 |
+
confidence=result.get("confidence", 0.0),
|
340 |
+
reasoning_trace=result.get("reasoning_trace", []),
|
341 |
+
metadata=result.get("metadata", {}),
|
342 |
+
performance_metrics={
|
343 |
+
"execution_time": (datetime.now() - start_time).total_seconds(),
|
344 |
+
**result.get("performance_metrics", {})
|
345 |
+
}
|
346 |
+
)
|
347 |
+
except Exception as e:
|
348 |
+
logging.error(f"Error in strategy {strategy_type}: {str(e)}")
|
349 |
+
return StrategyResult(
|
350 |
+
strategy_type=strategy_type,
|
351 |
+
success=False,
|
352 |
+
answer=None,
|
353 |
+
confidence=0.0,
|
354 |
+
reasoning_trace=[{"error": str(e)}],
|
355 |
+
metadata={},
|
356 |
+
performance_metrics={"execution_time": (datetime.now() - start_time).total_seconds()}
|
357 |
+
)
|
358 |
+
|
359 |
+
# Execute strategies in parallel
|
360 |
+
tasks = [execute_strategy(strategy) for strategy in strategies]
|
361 |
+
results = await asyncio.gather(*tasks)
|
362 |
+
|
363 |
+
return {result.strategy_type: result for result in results}
|
364 |
+
|
365 |
+
async def _synthesize_results(self,
|
366 |
+
strategy_results: Dict[StrategyType, StrategyResult],
|
367 |
+
task_analysis: Dict[str, Any],
|
368 |
+
context: Dict[str, Any]) -> UnifiedResult:
|
369 |
+
"""Synthesize results from multiple strategies with specialized combination methods."""
|
370 |
+
if not strategy_results:
|
371 |
+
return UnifiedResult(
|
372 |
+
success=False,
|
373 |
+
answer="No strategy results available",
|
374 |
+
confidence=0.0,
|
375 |
+
strategy_results={},
|
376 |
+
synthesis_method="none",
|
377 |
+
meta_insights=[],
|
378 |
+
performance_metrics={}
|
379 |
+
)
|
380 |
+
|
381 |
+
# Group results by strategy category
|
382 |
+
core_results = {k: v for k, v in strategy_results.items()
|
383 |
+
if k in {StrategyType.CHAIN_OF_THOUGHT, StrategyType.TREE_OF_THOUGHTS,
|
384 |
+
StrategyType.META_LEARNING, StrategyType.LOCAL_LLM}}
|
385 |
+
|
386 |
+
agentic_results = {k: v for k, v in strategy_results.items()
|
387 |
+
if k in {StrategyType.TASK_DECOMPOSITION, StrategyType.RESOURCE_MANAGEMENT,
|
388 |
+
StrategyType.CONTEXTUAL_PLANNING, StrategyType.ADAPTIVE_EXECUTION,
|
389 |
+
StrategyType.FEEDBACK_INTEGRATION}}
|
390 |
+
|
391 |
+
market_results = {k: v for k, v in strategy_results.items()
|
392 |
+
if k in {StrategyType.MARKET_ANALYSIS, StrategyType.PORTFOLIO_OPTIMIZATION,
|
393 |
+
StrategyType.VENTURE, StrategyType.MONETIZATION}}
|
394 |
+
|
395 |
+
analytical_results = {k: v for k, v in strategy_results.items()
|
396 |
+
if k in {StrategyType.BAYESIAN, StrategyType.NEUROSYMBOLIC,
|
397 |
+
StrategyType.SPECIALIZED, StrategyType.MULTIMODAL}}
|
398 |
+
|
399 |
+
# Determine synthesis method based on task type and available results
|
400 |
+
task_type = task_analysis.get('task_type', 'general')
|
401 |
+
synthesis_method = self._determine_synthesis_method(task_type, strategy_results.keys())
|
402 |
+
|
403 |
+
# Apply specialized synthesis based on method
|
404 |
+
if synthesis_method == "weighted_voting":
|
405 |
+
final_result = await self._weighted_voting_synthesis(strategy_results)
|
406 |
+
elif synthesis_method == "market_focused":
|
407 |
+
final_result = await self._market_focused_synthesis(market_results, core_results)
|
408 |
+
elif synthesis_method == "analytical_consensus":
|
409 |
+
final_result = await self._analytical_consensus_synthesis(analytical_results, core_results)
|
410 |
+
elif synthesis_method == "agentic_orchestration":
|
411 |
+
final_result = await self._agentic_orchestration_synthesis(agentic_results, strategy_results)
|
412 |
+
else:
|
413 |
+
final_result = await self._ensemble_synthesis(strategy_results)
|
414 |
+
|
415 |
+
# Generate meta-insights about the synthesis process
|
416 |
+
meta_insights = self._generate_meta_insights(strategy_results, synthesis_method)
|
417 |
+
|
418 |
+
# Calculate aggregate performance metrics
|
419 |
+
performance_metrics = self._calculate_synthesis_metrics(strategy_results, final_result)
|
420 |
+
|
421 |
+
return UnifiedResult(
|
422 |
+
success=final_result['success'],
|
423 |
+
answer=final_result['answer'],
|
424 |
+
confidence=final_result['confidence'],
|
425 |
+
strategy_results=strategy_results,
|
426 |
+
synthesis_method=synthesis_method,
|
427 |
+
meta_insights=meta_insights,
|
428 |
+
performance_metrics=performance_metrics
|
429 |
+
)
|
430 |
+
|
431 |
+
def _determine_synthesis_method(self, task_type: str, available_strategies: Set[StrategyType]) -> str:
|
432 |
+
"""Determine the best synthesis method based on task type and available strategies."""
|
433 |
+
market_strategies = {StrategyType.MARKET_ANALYSIS, StrategyType.PORTFOLIO_OPTIMIZATION,
|
434 |
+
StrategyType.VENTURE, StrategyType.MONETIZATION}
|
435 |
+
analytical_strategies = {StrategyType.BAYESIAN, StrategyType.NEUROSYMBOLIC}
|
436 |
+
agentic_strategies = {StrategyType.TASK_DECOMPOSITION, StrategyType.RESOURCE_MANAGEMENT,
|
437 |
+
StrategyType.CONTEXTUAL_PLANNING}
|
438 |
+
|
439 |
+
# Calculate strategy type coverage
|
440 |
+
market_coverage = len(market_strategies.intersection(available_strategies))
|
441 |
+
analytical_coverage = len(analytical_strategies.intersection(available_strategies))
|
442 |
+
agentic_coverage = len(agentic_strategies.intersection(available_strategies))
|
443 |
+
|
444 |
+
if task_type in ['market_analysis', 'investment'] and market_coverage >= 2:
|
445 |
+
return "market_focused"
|
446 |
+
elif task_type in ['analysis', 'prediction'] and analytical_coverage >= 2:
|
447 |
+
return "analytical_consensus"
|
448 |
+
elif task_type in ['planning', 'execution'] and agentic_coverage >= 2:
|
449 |
+
return "agentic_orchestration"
|
450 |
+
else:
|
451 |
+
return "weighted_voting"
|
452 |
+
|
453 |
+
async def _weighted_voting_synthesis(self, strategy_results: Dict[StrategyType, StrategyResult]) -> Dict[str, Any]:
|
454 |
+
"""Combine results using weighted voting based on strategy confidence and historical performance."""
|
455 |
+
weighted_answers = defaultdict(float)
|
456 |
+
total_weight = 0
|
457 |
+
|
458 |
+
for strategy_type, result in strategy_results.items():
|
459 |
+
# Calculate weight based on strategy confidence and historical performance
|
460 |
+
historical_performance = np.mean(self.strategy_performance[strategy_type]) if self.strategy_performance[strategy_type] else 1.0
|
461 |
+
weight = self.strategy_weights[strategy_type] * result.confidence * historical_performance
|
462 |
+
|
463 |
+
weighted_answers[result.answer] += weight
|
464 |
+
total_weight += weight
|
465 |
+
|
466 |
+
if not total_weight:
|
467 |
+
return {'success': False, 'answer': '', 'confidence': 0.0}
|
468 |
+
|
469 |
+
# Select answer with highest weighted votes
|
470 |
+
best_answer = max(weighted_answers.items(), key=lambda x: x[1])
|
471 |
+
confidence = best_answer[1] / total_weight
|
472 |
+
|
473 |
+
return {
|
474 |
+
'success': confidence >= self.min_confidence,
|
475 |
+
'answer': best_answer[0],
|
476 |
+
'confidence': confidence
|
477 |
+
}
|
478 |
+
|
479 |
+
async def _market_focused_synthesis(self, market_results: Dict[StrategyType, StrategyResult],
|
480 |
+
core_results: Dict[StrategyType, StrategyResult]) -> Dict[str, Any]:
|
481 |
+
"""Synthesize results with emphasis on market-related strategies."""
|
482 |
+
market_consensus = await self._weighted_voting_synthesis(market_results)
|
483 |
+
core_consensus = await self._weighted_voting_synthesis(core_results)
|
484 |
+
|
485 |
+
# Combine market and core insights with higher weight for market results
|
486 |
+
if market_consensus['confidence'] >= self.min_confidence:
|
487 |
+
return {
|
488 |
+
'success': True,
|
489 |
+
'answer': f"{market_consensus['answer']} (Supported by core analysis: {core_consensus['answer']})",
|
490 |
+
'confidence': 0.7 * market_consensus['confidence'] + 0.3 * core_consensus['confidence']
|
491 |
+
}
|
492 |
+
else:
|
493 |
+
return core_consensus
|
494 |
+
|
495 |
+
async def _analytical_consensus_synthesis(self, analytical_results: Dict[StrategyType, StrategyResult],
|
496 |
+
core_results: Dict[StrategyType, StrategyResult]) -> Dict[str, Any]:
|
497 |
+
"""Synthesize results with emphasis on analytical and probabilistic reasoning."""
|
498 |
+
analytical_consensus = await self._weighted_voting_synthesis(analytical_results)
|
499 |
+
core_consensus = await self._weighted_voting_synthesis(core_results)
|
500 |
+
|
501 |
+
# Combine analytical and core insights with uncertainty quantification
|
502 |
+
if analytical_consensus['confidence'] >= self.min_confidence:
|
503 |
+
return {
|
504 |
+
'success': True,
|
505 |
+
'answer': f"{analytical_consensus['answer']} (Confidence interval: {analytical_consensus['confidence']:.2f})",
|
506 |
+
'confidence': 0.6 * analytical_consensus['confidence'] + 0.4 * core_consensus['confidence']
|
507 |
+
}
|
508 |
+
else:
|
509 |
+
return core_consensus
|
510 |
+
|
511 |
+
async def _agentic_orchestration_synthesis(self, agentic_results: Dict[StrategyType, StrategyResult],
|
512 |
+
all_results: Dict[StrategyType, StrategyResult]) -> Dict[str, Any]:
|
513 |
+
"""Synthesize results with emphasis on task decomposition and execution planning."""
|
514 |
+
# Extract task decomposition and planning insights
|
515 |
+
task_structure = self._extract_task_structure(agentic_results)
|
516 |
+
execution_plan = self._create_execution_plan(task_structure, all_results)
|
517 |
+
|
518 |
+
# Combine results according to the execution plan
|
519 |
+
synthesized_result = self._execute_synthesis_plan(execution_plan, all_results)
|
520 |
+
|
521 |
+
return {
|
522 |
+
'success': synthesized_result['confidence'] >= self.min_confidence,
|
523 |
+
'answer': synthesized_result['answer'],
|
524 |
+
'confidence': synthesized_result['confidence']
|
525 |
+
}
|
526 |
+
|
527 |
+
def _generate_meta_insights(self, strategy_results: Dict[StrategyType, StrategyResult],
|
528 |
+
synthesis_method: str) -> List[str]:
|
529 |
+
"""Generate meta-insights about the synthesis process and strategy performance."""
|
530 |
+
insights = []
|
531 |
+
|
532 |
+
# Analyze strategy agreement
|
533 |
+
agreement_rate = self._calculate_strategy_agreement(strategy_results)
|
534 |
+
insights.append(f"Strategy agreement rate: {agreement_rate:.2f}")
|
535 |
+
|
536 |
+
# Identify strongest and weakest strategies
|
537 |
+
strategy_performances = [(st, res.confidence) for st, res in strategy_results.items()]
|
538 |
+
best_strategy = max(strategy_performances, key=lambda x: x[1])
|
539 |
+
worst_strategy = min(strategy_performances, key=lambda x: x[1])
|
540 |
+
|
541 |
+
insights.append(f"Most confident strategy: {best_strategy[0]} ({best_strategy[1]:.2f})")
|
542 |
+
insights.append(f"Synthesis method used: {synthesis_method}")
|
543 |
+
|
544 |
+
return insights
|
545 |
+
|
546 |
+
def _calculate_synthesis_metrics(self, strategy_results: Dict[StrategyType, StrategyResult],
|
547 |
+
final_result: Dict[str, Any]) -> Dict[str, Any]:
|
548 |
+
"""Calculate comprehensive metrics about the synthesis process."""
|
549 |
+
return {
|
550 |
+
'strategy_count': len(strategy_results),
|
551 |
+
'average_confidence': np.mean([r.confidence for r in strategy_results.values()]),
|
552 |
+
'confidence_std': np.std([r.confidence for r in strategy_results.values()]),
|
553 |
+
'final_confidence': final_result['confidence'],
|
554 |
+
'strategy_agreement': self._calculate_strategy_agreement(strategy_results)
|
555 |
+
}
|
556 |
+
|
557 |
+
def _update_performance(self, result: UnifiedResult):
|
558 |
+
"""Update performance metrics and strategy weights."""
|
559 |
+
# Update strategy performance
|
560 |
+
for strategy_type, strategy_result in result.strategy_results.items():
|
561 |
+
self.strategy_performance[strategy_type].append(strategy_result.confidence)
|
562 |
+
|
563 |
+
# Update weights using exponential moving average
|
564 |
+
current_weight = self.strategy_weights[strategy_type]
|
565 |
+
performance = strategy_result.confidence
|
566 |
+
self.strategy_weights[strategy_type] = (
|
567 |
+
(1 - self.learning_rate) * current_weight +
|
568 |
+
self.learning_rate * performance
|
569 |
+
)
|
570 |
+
|
571 |
+
# Update synthesis performance
|
572 |
+
self.synthesis_performance[result.synthesis_method].append(result.confidence)
|
573 |
+
|
574 |
+
def _calculate_resource_match(self, strategy_type: StrategyType, required_resources: Dict[str, Any]) -> float:
|
575 |
+
"""Calculate how well a strategy matches required resources."""
|
576 |
+
# Implementation-specific resource matching logic
|
577 |
+
return 0.8 # Placeholder
|
578 |
+
|
579 |
+
def _calculate_capability_match(self, strategy_type: StrategyType, required_capabilities: List[str]) -> float:
|
580 |
+
"""Calculate how well a strategy matches required capabilities."""
|
581 |
+
# Implementation-specific capability matching logic
|
582 |
+
return 0.8 # Placeholder
|
583 |
+
|
584 |
+
def _parse_task_analysis(self, response: str) -> Dict[str, Any]:
|
585 |
+
"""Parse task analysis from response."""
|
586 |
+
analysis = {
|
587 |
+
"type": "",
|
588 |
+
"complexity": 0.0,
|
589 |
+
"capabilities": [],
|
590 |
+
"resources": {},
|
591 |
+
"criteria": [],
|
592 |
+
"risks": []
|
593 |
+
}
|
594 |
+
|
595 |
+
for line in response.split('\n'):
|
596 |
+
line = line.strip()
|
597 |
+
if line.startswith('Type:'):
|
598 |
+
analysis["type"] = line[5:].strip()
|
599 |
+
elif line.startswith('Complexity:'):
|
600 |
+
try:
|
601 |
+
analysis["complexity"] = float(line[11:].strip())
|
602 |
+
except:
|
603 |
+
pass
|
604 |
+
elif line.startswith('Capabilities:'):
|
605 |
+
analysis["capabilities"] = [c.strip() for c in line[13:].split(',')]
|
606 |
+
elif line.startswith('Resources:'):
|
607 |
+
try:
|
608 |
+
analysis["resources"] = json.loads(line[10:].strip())
|
609 |
+
except:
|
610 |
+
analysis["resources"] = {"raw": line[10:].strip()}
|
611 |
+
elif line.startswith('Criteria:'):
|
612 |
+
analysis["criteria"] = [c.strip() for c in line[9:].split(',')]
|
613 |
+
elif line.startswith('Risks:'):
|
614 |
+
analysis["risks"] = [r.strip() for r in line[7:].split(',')]
|
615 |
+
|
616 |
+
return analysis
|
617 |
+
|
618 |
+
def _parse_synthesis(self, response: str) -> Dict[str, Any]:
|
619 |
+
"""Parse synthesis result from response."""
|
620 |
+
synthesis = {
|
621 |
+
"method": "",
|
622 |
+
"answer": "",
|
623 |
+
"confidence": 0.0,
|
624 |
+
"insights": [],
|
625 |
+
"performance": {}
|
626 |
+
}
|
627 |
+
|
628 |
+
for line in response.split('\n'):
|
629 |
+
line = line.strip()
|
630 |
+
if line.startswith('Method:'):
|
631 |
+
synthesis["method"] = line[7:].strip()
|
632 |
+
elif line.startswith('Answer:'):
|
633 |
+
synthesis["answer"] = line[7:].strip()
|
634 |
+
elif line.startswith('Confidence:'):
|
635 |
+
try:
|
636 |
+
synthesis["confidence"] = float(line[11:].strip())
|
637 |
+
except:
|
638 |
+
pass
|
639 |
+
elif line.startswith('Insights:'):
|
640 |
+
synthesis["insights"] = [i.strip() for i in line[9:].split(',')]
|
641 |
+
elif line.startswith('Performance:'):
|
642 |
+
try:
|
643 |
+
synthesis["performance"] = json.loads(line[12:].strip())
|
644 |
+
except:
|
645 |
+
synthesis["performance"] = {"raw": line[12:].strip()}
|
646 |
+
|
647 |
+
return synthesis
|
648 |
+
|
649 |
+
def _strategy_result_to_dict(self, result: StrategyResult) -> Dict[str, Any]:
|
650 |
+
"""Convert strategy result to dictionary for serialization."""
|
651 |
+
return {
|
652 |
+
"strategy_type": result.strategy_type.value,
|
653 |
+
"success": result.success,
|
654 |
+
"answer": result.answer,
|
655 |
+
"confidence": result.confidence,
|
656 |
+
"reasoning_trace": result.reasoning_trace,
|
657 |
+
"metadata": result.metadata,
|
658 |
+
"performance_metrics": result.performance_metrics,
|
659 |
+
"timestamp": result.timestamp.isoformat()
|
660 |
+
}
|
661 |
+
|
662 |
+
def get_performance_metrics(self) -> Dict[str, Any]:
|
663 |
+
"""Get comprehensive performance metrics."""
|
664 |
+
return {
|
665 |
+
"strategy_weights": dict(self.strategy_weights),
|
666 |
+
"average_performance": {
|
667 |
+
strategy_type.value: sum(scores) / len(scores) if scores else 0
|
668 |
+
for strategy_type, scores in self.strategy_performance.items()
|
669 |
+
},
|
670 |
+
"synthesis_success": {
|
671 |
+
method: sum(scores) / len(scores) if scores else 0
|
672 |
+
for method, scores in self.synthesis_performance.items()
|
673 |
+
},
|
674 |
+
"task_type_performance": {
|
675 |
+
task_type: dict(strategy_scores)
|
676 |
+
for task_type, strategy_scores in self.task_type_performance.items()
|
677 |
+
}
|
678 |
+
}
|
679 |
+
|
680 |
+
def clear_performance_history(self):
|
681 |
+
"""Clear performance history and reset weights."""
|
682 |
+
self.strategy_performance.clear()
|
683 |
+
self.task_type_performance.clear()
|
684 |
+
self.synthesis_performance.clear()
|
685 |
+
self.strategy_weights = {
|
686 |
+
strategy_type: 1.0 for strategy_type in StrategyType
|
687 |
+
}
|
688 |
+
|
689 |
+
def _extract_task_structure(self, agentic_results: Dict[StrategyType, StrategyResult]) -> Dict[str, Any]:
|
690 |
+
"""Extract task structure from agentic strategy results."""
|
691 |
+
# Implementation-specific task structure extraction logic
|
692 |
+
return {}
|
693 |
+
|
694 |
+
def _create_execution_plan(self, task_structure: Dict[str, Any], all_results: Dict[StrategyType, StrategyResult]) -> Dict[str, Any]:
|
695 |
+
"""Create execution plan based on task structure and strategy results."""
|
696 |
+
# Implementation-specific execution plan creation logic
|
697 |
+
return {}
|
698 |
+
|
699 |
+
def _execute_synthesis_plan(self, execution_plan: Dict[str, Any], all_results: Dict[StrategyType, StrategyResult]) -> Dict[str, Any]:
|
700 |
+
"""Execute synthesis plan and combine results."""
|
701 |
+
# Implementation-specific synthesis plan execution logic
|
702 |
+
return {}
|
703 |
+
|
704 |
+
def _calculate_strategy_agreement(self, strategy_results: Dict[StrategyType, StrategyResult]) -> float:
|
705 |
+
"""Calculate agreement rate among strategies."""
|
706 |
+
# Implementation-specific strategy agreement calculation logic
|
707 |
+
return 0.0
|
reasoning/venture_strategies.py
ADDED
@@ -0,0 +1,701 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Specialized strategies for autonomous business and revenue generation."""
|
2 |
+
|
3 |
+
import logging
|
4 |
+
from typing import Dict, Any, List, Optional, Set, Union, Type, Tuple
|
5 |
+
import json
|
6 |
+
from dataclasses import dataclass, field
|
7 |
+
from enum import Enum
|
8 |
+
from datetime import datetime
|
9 |
+
import numpy as np
|
10 |
+
from collections import defaultdict
|
11 |
+
|
12 |
+
from .base import ReasoningStrategy
|
13 |
+
|
14 |
+
class VentureType(Enum):
|
15 |
+
"""Types of business ventures."""
|
16 |
+
AI_STARTUP = "ai_startup"
|
17 |
+
SAAS = "saas"
|
18 |
+
API_SERVICE = "api_service"
|
19 |
+
DATA_ANALYTICS = "data_analytics"
|
20 |
+
AUTOMATION_SERVICE = "automation_service"
|
21 |
+
CONSULTING = "consulting"
|
22 |
+
DIGITAL_PRODUCTS = "digital_products"
|
23 |
+
MARKETPLACE = "marketplace"
|
24 |
+
|
25 |
+
class RevenueStream(Enum):
|
26 |
+
"""Types of revenue streams."""
|
27 |
+
SUBSCRIPTION = "subscription"
|
28 |
+
USAGE_BASED = "usage_based"
|
29 |
+
LICENSING = "licensing"
|
30 |
+
CONSULTING = "consulting"
|
31 |
+
PRODUCT_SALES = "product_sales"
|
32 |
+
COMMISSION = "commission"
|
33 |
+
ADVERTISING = "advertising"
|
34 |
+
PARTNERSHIP = "partnership"
|
35 |
+
|
36 |
+
@dataclass
|
37 |
+
class VentureMetrics:
|
38 |
+
"""Key business metrics."""
|
39 |
+
revenue: float
|
40 |
+
profit_margin: float
|
41 |
+
customer_acquisition_cost: float
|
42 |
+
lifetime_value: float
|
43 |
+
churn_rate: float
|
44 |
+
growth_rate: float
|
45 |
+
burn_rate: float
|
46 |
+
runway_months: float
|
47 |
+
roi: float
|
48 |
+
|
49 |
+
@dataclass
|
50 |
+
class MarketOpportunity:
|
51 |
+
"""Market opportunity analysis."""
|
52 |
+
market_size: float
|
53 |
+
growth_potential: float
|
54 |
+
competition_level: float
|
55 |
+
entry_barriers: float
|
56 |
+
regulatory_risks: float
|
57 |
+
technology_risks: float
|
58 |
+
monetization_potential: float
|
59 |
+
|
60 |
+
class AIStartupStrategy(ReasoningStrategy):
|
61 |
+
"""
|
62 |
+
Advanced AI startup strategy that:
|
63 |
+
1. Identifies profitable AI applications
|
64 |
+
2. Analyzes market opportunities
|
65 |
+
3. Develops MVP strategies
|
66 |
+
4. Plans scaling approaches
|
67 |
+
5. Optimizes revenue streams
|
68 |
+
"""
|
69 |
+
|
70 |
+
def __init__(self, config: Optional[Dict[str, Any]] = None):
|
71 |
+
super().__init__()
|
72 |
+
self.config = config or {}
|
73 |
+
|
74 |
+
# Standard reasoning parameters
|
75 |
+
self.min_confidence = self.config.get('min_confidence', 0.7)
|
76 |
+
self.parallel_threshold = self.config.get('parallel_threshold', 3)
|
77 |
+
self.learning_rate = self.config.get('learning_rate', 0.1)
|
78 |
+
self.strategy_weights = self.config.get('strategy_weights', {
|
79 |
+
"LOCAL_LLM": 0.8,
|
80 |
+
"CHAIN_OF_THOUGHT": 0.6,
|
81 |
+
"TREE_OF_THOUGHTS": 0.5,
|
82 |
+
"META_LEARNING": 0.4
|
83 |
+
})
|
84 |
+
|
85 |
+
async def reason(self, query: str, context: Dict[str, Any]) -> Dict[str, Any]:
|
86 |
+
"""Generate AI startup strategy."""
|
87 |
+
try:
|
88 |
+
# Market analysis
|
89 |
+
market = await self._analyze_market(query, context)
|
90 |
+
|
91 |
+
# Technology assessment
|
92 |
+
tech = await self._assess_technology(market, context)
|
93 |
+
|
94 |
+
# Business model
|
95 |
+
model = await self._develop_business_model(tech, context)
|
96 |
+
|
97 |
+
# Growth strategy
|
98 |
+
strategy = await self._create_growth_strategy(model, context)
|
99 |
+
|
100 |
+
# Financial projections
|
101 |
+
projections = await self._project_financials(strategy, context)
|
102 |
+
|
103 |
+
return {
|
104 |
+
"success": projections["annual_profit"] >= 1_000_000,
|
105 |
+
"market_analysis": market,
|
106 |
+
"tech_assessment": tech,
|
107 |
+
"business_model": model,
|
108 |
+
"growth_strategy": strategy,
|
109 |
+
"financials": projections,
|
110 |
+
"confidence": self._calculate_confidence(projections)
|
111 |
+
}
|
112 |
+
except Exception as e:
|
113 |
+
logging.error(f"Error in AI startup strategy: {str(e)}")
|
114 |
+
return {"success": False, "error": str(e)}
|
115 |
+
|
116 |
+
class SaaSVentureStrategy(ReasoningStrategy):
|
117 |
+
"""
|
118 |
+
Advanced SaaS venture strategy that:
|
119 |
+
1. Identifies scalable SaaS opportunities
|
120 |
+
2. Develops pricing strategies
|
121 |
+
3. Plans customer acquisition
|
122 |
+
4. Optimizes retention
|
123 |
+
5. Maximizes recurring revenue
|
124 |
+
"""
|
125 |
+
|
126 |
+
def __init__(self, config: Optional[Dict[str, Any]] = None):
|
127 |
+
super().__init__()
|
128 |
+
self.config = config or {}
|
129 |
+
|
130 |
+
# Standard reasoning parameters
|
131 |
+
self.min_confidence = self.config.get('min_confidence', 0.7)
|
132 |
+
self.parallel_threshold = self.config.get('parallel_threshold', 3)
|
133 |
+
self.learning_rate = self.config.get('learning_rate', 0.1)
|
134 |
+
self.strategy_weights = self.config.get('strategy_weights', {
|
135 |
+
"LOCAL_LLM": 0.8,
|
136 |
+
"CHAIN_OF_THOUGHT": 0.6,
|
137 |
+
"TREE_OF_THOUGHTS": 0.5,
|
138 |
+
"META_LEARNING": 0.4
|
139 |
+
})
|
140 |
+
|
141 |
+
async def reason(self, query: str, context: Dict[str, Any]) -> Dict[str, Any]:
|
142 |
+
"""Generate SaaS venture strategy."""
|
143 |
+
try:
|
144 |
+
# Opportunity analysis
|
145 |
+
opportunity = await self._analyze_opportunity(query, context)
|
146 |
+
|
147 |
+
# Product strategy
|
148 |
+
product = await self._develop_product_strategy(opportunity, context)
|
149 |
+
|
150 |
+
# Pricing model
|
151 |
+
pricing = await self._create_pricing_model(product, context)
|
152 |
+
|
153 |
+
# Growth plan
|
154 |
+
growth = await self._plan_growth(pricing, context)
|
155 |
+
|
156 |
+
# Revenue projections
|
157 |
+
projections = await self._project_revenue(growth, context)
|
158 |
+
|
159 |
+
return {
|
160 |
+
"success": projections["annual_revenue"] >= 1_000_000,
|
161 |
+
"opportunity": opportunity,
|
162 |
+
"product": product,
|
163 |
+
"pricing": pricing,
|
164 |
+
"growth": growth,
|
165 |
+
"projections": projections
|
166 |
+
}
|
167 |
+
except Exception as e:
|
168 |
+
logging.error(f"Error in SaaS venture strategy: {str(e)}")
|
169 |
+
return {"success": False, "error": str(e)}
|
170 |
+
|
171 |
+
class AutomationVentureStrategy(ReasoningStrategy):
|
172 |
+
"""
|
173 |
+
Advanced automation venture strategy that:
|
174 |
+
1. Identifies automation opportunities
|
175 |
+
2. Analyzes cost-saving potential
|
176 |
+
3. Develops automation solutions
|
177 |
+
4. Plans implementation
|
178 |
+
5. Maximizes ROI
|
179 |
+
"""
|
180 |
+
|
181 |
+
def __init__(self, config: Optional[Dict[str, Any]] = None):
|
182 |
+
super().__init__()
|
183 |
+
self.config = config or {}
|
184 |
+
|
185 |
+
# Standard reasoning parameters
|
186 |
+
self.min_confidence = self.config.get('min_confidence', 0.7)
|
187 |
+
self.parallel_threshold = self.config.get('parallel_threshold', 3)
|
188 |
+
self.learning_rate = self.config.get('learning_rate', 0.1)
|
189 |
+
self.strategy_weights = self.config.get('strategy_weights', {
|
190 |
+
"LOCAL_LLM": 0.8,
|
191 |
+
"CHAIN_OF_THOUGHT": 0.6,
|
192 |
+
"TREE_OF_THOUGHTS": 0.5,
|
193 |
+
"META_LEARNING": 0.4
|
194 |
+
})
|
195 |
+
|
196 |
+
async def reason(self, query: str, context: Dict[str, Any]) -> Dict[str, Any]:
|
197 |
+
"""Generate automation venture strategy."""
|
198 |
+
try:
|
199 |
+
# Opportunity identification
|
200 |
+
opportunities = await self._identify_opportunities(query, context)
|
201 |
+
|
202 |
+
# Solution development
|
203 |
+
solutions = await self._develop_solutions(opportunities, context)
|
204 |
+
|
205 |
+
# Implementation strategy
|
206 |
+
implementation = await self._create_implementation_strategy(solutions, context)
|
207 |
+
|
208 |
+
# ROI analysis
|
209 |
+
roi = await self._analyze_roi(implementation, context)
|
210 |
+
|
211 |
+
# Scale strategy
|
212 |
+
scale = await self._create_scale_strategy(roi, context)
|
213 |
+
|
214 |
+
return {
|
215 |
+
"success": roi["annual_profit"] >= 1_000_000,
|
216 |
+
"opportunities": opportunities,
|
217 |
+
"solutions": solutions,
|
218 |
+
"implementation": implementation,
|
219 |
+
"roi": roi,
|
220 |
+
"scale": scale
|
221 |
+
}
|
222 |
+
except Exception as e:
|
223 |
+
logging.error(f"Error in automation venture strategy: {str(e)}")
|
224 |
+
return {"success": False, "error": str(e)}
|
225 |
+
|
226 |
+
class DataVentureStrategy(ReasoningStrategy):
|
227 |
+
"""
|
228 |
+
Advanced data venture strategy that:
|
229 |
+
1. Identifies valuable data opportunities
|
230 |
+
2. Develops data products
|
231 |
+
3. Creates monetization strategies
|
232 |
+
4. Ensures compliance
|
233 |
+
5. Maximizes data value
|
234 |
+
"""
|
235 |
+
|
236 |
+
def __init__(self, config: Optional[Dict[str, Any]] = None):
|
237 |
+
super().__init__()
|
238 |
+
self.config = config or {}
|
239 |
+
|
240 |
+
# Standard reasoning parameters
|
241 |
+
self.min_confidence = self.config.get('min_confidence', 0.7)
|
242 |
+
self.parallel_threshold = self.config.get('parallel_threshold', 3)
|
243 |
+
self.learning_rate = self.config.get('learning_rate', 0.1)
|
244 |
+
self.strategy_weights = self.config.get('strategy_weights', {
|
245 |
+
"LOCAL_LLM": 0.8,
|
246 |
+
"CHAIN_OF_THOUGHT": 0.6,
|
247 |
+
"TREE_OF_THOUGHTS": 0.5,
|
248 |
+
"META_LEARNING": 0.4
|
249 |
+
})
|
250 |
+
|
251 |
+
async def reason(self, query: str, context: Dict[str, Any]) -> Dict[str, Any]:
|
252 |
+
"""Generate data venture strategy."""
|
253 |
+
try:
|
254 |
+
# Data opportunity analysis
|
255 |
+
opportunity = await self._analyze_data_opportunity(query, context)
|
256 |
+
|
257 |
+
# Product development
|
258 |
+
product = await self._develop_data_product(opportunity, context)
|
259 |
+
|
260 |
+
# Monetization strategy
|
261 |
+
monetization = await self._create_monetization_strategy(product, context)
|
262 |
+
|
263 |
+
# Compliance plan
|
264 |
+
compliance = await self._ensure_compliance(monetization, context)
|
265 |
+
|
266 |
+
# Scale plan
|
267 |
+
scale = await self._plan_scaling(compliance, context)
|
268 |
+
|
269 |
+
return {
|
270 |
+
"success": monetization["annual_revenue"] >= 1_000_000,
|
271 |
+
"opportunity": opportunity,
|
272 |
+
"product": product,
|
273 |
+
"monetization": monetization,
|
274 |
+
"compliance": compliance,
|
275 |
+
"scale": scale
|
276 |
+
}
|
277 |
+
except Exception as e:
|
278 |
+
logging.error(f"Error in data venture strategy: {str(e)}")
|
279 |
+
return {"success": False, "error": str(e)}
|
280 |
+
|
281 |
+
class APIVentureStrategy(ReasoningStrategy):
|
282 |
+
"""
|
283 |
+
Advanced API venture strategy that:
|
284 |
+
1. Identifies API opportunities
|
285 |
+
2. Develops API products
|
286 |
+
3. Creates pricing models
|
287 |
+
4. Plans scaling
|
288 |
+
5. Maximizes API value
|
289 |
+
"""
|
290 |
+
|
291 |
+
def __init__(self, config: Optional[Dict[str, Any]] = None):
|
292 |
+
super().__init__()
|
293 |
+
self.config = config or {}
|
294 |
+
|
295 |
+
# Standard reasoning parameters
|
296 |
+
self.min_confidence = self.config.get('min_confidence', 0.7)
|
297 |
+
self.parallel_threshold = self.config.get('parallel_threshold', 3)
|
298 |
+
self.learning_rate = self.config.get('learning_rate', 0.1)
|
299 |
+
self.strategy_weights = self.config.get('strategy_weights', {
|
300 |
+
"LOCAL_LLM": 0.8,
|
301 |
+
"CHAIN_OF_THOUGHT": 0.6,
|
302 |
+
"TREE_OF_THOUGHTS": 0.5,
|
303 |
+
"META_LEARNING": 0.4
|
304 |
+
})
|
305 |
+
|
306 |
+
async def reason(self, query: str, context: Dict[str, Any]) -> Dict[str, Any]:
|
307 |
+
"""Generate API venture strategy."""
|
308 |
+
try:
|
309 |
+
# API opportunity analysis
|
310 |
+
opportunity = await self._analyze_api_opportunity(query, context)
|
311 |
+
|
312 |
+
# Product development
|
313 |
+
product = await self._develop_api_product(opportunity, context)
|
314 |
+
|
315 |
+
# Pricing strategy
|
316 |
+
pricing = await self._create_api_pricing(product, context)
|
317 |
+
|
318 |
+
# Scale strategy
|
319 |
+
scale = await self._plan_api_scaling(pricing, context)
|
320 |
+
|
321 |
+
# Revenue projections
|
322 |
+
projections = await self._project_api_revenue(scale, context)
|
323 |
+
|
324 |
+
return {
|
325 |
+
"success": projections["annual_revenue"] >= 1_000_000,
|
326 |
+
"opportunity": opportunity,
|
327 |
+
"product": product,
|
328 |
+
"pricing": pricing,
|
329 |
+
"scale": scale,
|
330 |
+
"projections": projections
|
331 |
+
}
|
332 |
+
except Exception as e:
|
333 |
+
logging.error(f"Error in API venture strategy: {str(e)}")
|
334 |
+
return {"success": False, "error": str(e)}
|
335 |
+
|
336 |
+
class MarketplaceVentureStrategy(ReasoningStrategy):
|
337 |
+
"""
|
338 |
+
Advanced marketplace venture strategy that:
|
339 |
+
1. Identifies marketplace opportunities
|
340 |
+
2. Develops platform strategy
|
341 |
+
3. Plans liquidity generation
|
342 |
+
4. Optimizes matching
|
343 |
+
5. Maximizes transaction value
|
344 |
+
"""
|
345 |
+
|
346 |
+
def __init__(self, config: Optional[Dict[str, Any]] = None):
|
347 |
+
super().__init__()
|
348 |
+
self.config = config or {}
|
349 |
+
|
350 |
+
# Standard reasoning parameters
|
351 |
+
self.min_confidence = self.config.get('min_confidence', 0.7)
|
352 |
+
self.parallel_threshold = self.config.get('parallel_threshold', 3)
|
353 |
+
self.learning_rate = self.config.get('learning_rate', 0.1)
|
354 |
+
self.strategy_weights = self.config.get('strategy_weights', {
|
355 |
+
"LOCAL_LLM": 0.8,
|
356 |
+
"CHAIN_OF_THOUGHT": 0.6,
|
357 |
+
"TREE_OF_THOUGHTS": 0.5,
|
358 |
+
"META_LEARNING": 0.4
|
359 |
+
})
|
360 |
+
|
361 |
+
async def reason(self, query: str, context: Dict[str, Any]) -> Dict[str, Any]:
|
362 |
+
"""Generate marketplace venture strategy."""
|
363 |
+
try:
|
364 |
+
# Opportunity analysis
|
365 |
+
opportunity = await self._analyze_marketplace_opportunity(query, context)
|
366 |
+
|
367 |
+
# Platform strategy
|
368 |
+
platform = await self._develop_platform_strategy(opportunity, context)
|
369 |
+
|
370 |
+
# Liquidity strategy
|
371 |
+
liquidity = await self._create_liquidity_strategy(platform, context)
|
372 |
+
|
373 |
+
# Growth strategy
|
374 |
+
growth = await self._plan_marketplace_growth(liquidity, context)
|
375 |
+
|
376 |
+
# Revenue projections
|
377 |
+
projections = await self._project_marketplace_revenue(growth, context)
|
378 |
+
|
379 |
+
return {
|
380 |
+
"success": projections["annual_revenue"] >= 1_000_000,
|
381 |
+
"opportunity": opportunity,
|
382 |
+
"platform": platform,
|
383 |
+
"liquidity": liquidity,
|
384 |
+
"growth": growth,
|
385 |
+
"projections": projections
|
386 |
+
}
|
387 |
+
except Exception as e:
|
388 |
+
logging.error(f"Error in marketplace venture strategy: {str(e)}")
|
389 |
+
return {"success": False, "error": str(e)}
|
390 |
+
|
391 |
+
class VenturePortfolioStrategy(ReasoningStrategy):
|
392 |
+
"""
|
393 |
+
Advanced venture portfolio strategy that:
|
394 |
+
1. Optimizes venture mix
|
395 |
+
2. Balances risk-reward
|
396 |
+
3. Allocates resources
|
397 |
+
4. Manages dependencies
|
398 |
+
5. Maximizes portfolio value
|
399 |
+
"""
|
400 |
+
|
401 |
+
def __init__(self, config: Optional[Dict[str, Any]] = None):
|
402 |
+
super().__init__()
|
403 |
+
self.config = config or {}
|
404 |
+
|
405 |
+
# Standard reasoning parameters
|
406 |
+
self.min_confidence = self.config.get('min_confidence', 0.7)
|
407 |
+
self.parallel_threshold = self.config.get('parallel_threshold', 3)
|
408 |
+
self.learning_rate = self.config.get('learning_rate', 0.1)
|
409 |
+
self.strategy_weights = self.config.get('strategy_weights', {
|
410 |
+
"LOCAL_LLM": 0.8,
|
411 |
+
"CHAIN_OF_THOUGHT": 0.6,
|
412 |
+
"TREE_OF_THOUGHTS": 0.5,
|
413 |
+
"META_LEARNING": 0.4
|
414 |
+
})
|
415 |
+
|
416 |
+
async def reason(self, query: str, context: Dict[str, Any]) -> Dict[str, Any]:
|
417 |
+
"""Generate venture portfolio strategy."""
|
418 |
+
try:
|
419 |
+
# Portfolio analysis
|
420 |
+
analysis = await self._analyze_portfolio(query, context)
|
421 |
+
|
422 |
+
# Venture selection
|
423 |
+
selection = await self._select_ventures(analysis, context)
|
424 |
+
|
425 |
+
# Resource allocation
|
426 |
+
allocation = await self._allocate_resources(selection, context)
|
427 |
+
|
428 |
+
# Risk management
|
429 |
+
risk = await self._manage_risk(allocation, context)
|
430 |
+
|
431 |
+
# Portfolio projections
|
432 |
+
projections = await self._project_portfolio(risk, context)
|
433 |
+
|
434 |
+
return {
|
435 |
+
"success": projections["annual_profit"] >= 1_000_000,
|
436 |
+
"analysis": analysis,
|
437 |
+
"selection": selection,
|
438 |
+
"allocation": allocation,
|
439 |
+
"risk": risk,
|
440 |
+
"projections": projections
|
441 |
+
}
|
442 |
+
except Exception as e:
|
443 |
+
logging.error(f"Error in venture portfolio strategy: {str(e)}")
|
444 |
+
return {"success": False, "error": str(e)}
|
445 |
+
|
446 |
+
async def _analyze_portfolio(self, query: str, context: Dict[str, Any]) -> Dict[str, Any]:
|
447 |
+
"""Analyze potential venture portfolio."""
|
448 |
+
prompt = f"""
|
449 |
+
Analyze venture portfolio opportunities:
|
450 |
+
Query: {query}
|
451 |
+
Context: {json.dumps(context)}
|
452 |
+
|
453 |
+
Consider:
|
454 |
+
1. Market opportunities
|
455 |
+
2. Technology trends
|
456 |
+
3. Resource requirements
|
457 |
+
4. Risk factors
|
458 |
+
5. Synergy potential
|
459 |
+
|
460 |
+
Format as:
|
461 |
+
[Analysis]
|
462 |
+
Opportunities: ...
|
463 |
+
Trends: ...
|
464 |
+
Resources: ...
|
465 |
+
Risks: ...
|
466 |
+
Synergies: ...
|
467 |
+
"""
|
468 |
+
|
469 |
+
response = await context["groq_api"].predict(prompt)
|
470 |
+
return self._parse_portfolio_analysis(response["answer"])
|
471 |
+
|
472 |
+
def _parse_portfolio_analysis(self, response: str) -> Dict[str, Any]:
|
473 |
+
"""Parse portfolio analysis from response."""
|
474 |
+
analysis = {
|
475 |
+
"opportunities": [],
|
476 |
+
"trends": [],
|
477 |
+
"resources": {},
|
478 |
+
"risks": [],
|
479 |
+
"synergies": []
|
480 |
+
}
|
481 |
+
|
482 |
+
current_section = None
|
483 |
+
for line in response.split('\n'):
|
484 |
+
line = line.strip()
|
485 |
+
if line.startswith('Opportunities:'):
|
486 |
+
current_section = "opportunities"
|
487 |
+
elif line.startswith('Trends:'):
|
488 |
+
current_section = "trends"
|
489 |
+
elif line.startswith('Resources:'):
|
490 |
+
current_section = "resources"
|
491 |
+
elif line.startswith('Risks:'):
|
492 |
+
current_section = "risks"
|
493 |
+
elif line.startswith('Synergies:'):
|
494 |
+
current_section = "synergies"
|
495 |
+
elif current_section and line:
|
496 |
+
if current_section == "resources":
|
497 |
+
try:
|
498 |
+
key, value = line.split(':')
|
499 |
+
analysis[current_section][key.strip()] = value.strip()
|
500 |
+
except:
|
501 |
+
pass
|
502 |
+
else:
|
503 |
+
analysis[current_section].append(line)
|
504 |
+
|
505 |
+
return analysis
|
506 |
+
|
507 |
+
def get_venture_metrics(self) -> Dict[str, Any]:
|
508 |
+
"""Get comprehensive venture metrics."""
|
509 |
+
return {
|
510 |
+
"portfolio_metrics": {
|
511 |
+
"total_ventures": len(self.ventures),
|
512 |
+
"profitable_ventures": sum(1 for v in self.ventures if v.metrics.profit_margin > 0),
|
513 |
+
"total_revenue": sum(v.metrics.revenue for v in self.ventures),
|
514 |
+
"average_margin": np.mean([v.metrics.profit_margin for v in self.ventures]),
|
515 |
+
"portfolio_roi": np.mean([v.metrics.roi for v in self.ventures])
|
516 |
+
},
|
517 |
+
"market_metrics": {
|
518 |
+
"total_market_size": sum(v.opportunity.market_size for v in self.ventures),
|
519 |
+
"average_growth": np.mean([v.opportunity.growth_potential for v in self.ventures]),
|
520 |
+
"risk_score": np.mean([v.opportunity.regulatory_risks + v.opportunity.technology_risks for v in self.ventures])
|
521 |
+
},
|
522 |
+
"performance_metrics": {
|
523 |
+
"customer_acquisition": np.mean([v.metrics.customer_acquisition_cost for v in self.ventures]),
|
524 |
+
"lifetime_value": np.mean([v.metrics.lifetime_value for v in self.ventures]),
|
525 |
+
"churn_rate": np.mean([v.metrics.churn_rate for v in self.ventures]),
|
526 |
+
"burn_rate": sum(v.metrics.burn_rate for v in self.ventures)
|
527 |
+
}
|
528 |
+
}
|
529 |
+
|
530 |
+
class VentureStrategy(ReasoningStrategy):
|
531 |
+
"""
|
532 |
+
Advanced venture strategy that combines multiple specialized strategies
|
533 |
+
to generate comprehensive business plans and recommendations.
|
534 |
+
"""
|
535 |
+
|
536 |
+
def __init__(self, config: Optional[Dict[str, Any]] = None):
|
537 |
+
"""Initialize venture strategy with component strategies."""
|
538 |
+
super().__init__()
|
539 |
+
self.config = config or {}
|
540 |
+
|
541 |
+
# Standard reasoning parameters
|
542 |
+
self.min_confidence = self.config.get('min_confidence', 0.7)
|
543 |
+
self.parallel_threshold = self.config.get('parallel_threshold', 3)
|
544 |
+
self.learning_rate = self.config.get('learning_rate', 0.1)
|
545 |
+
self.strategy_weights = self.config.get('strategy_weights', {
|
546 |
+
"LOCAL_LLM": 0.8,
|
547 |
+
"CHAIN_OF_THOUGHT": 0.6,
|
548 |
+
"TREE_OF_THOUGHTS": 0.5,
|
549 |
+
"META_LEARNING": 0.4
|
550 |
+
})
|
551 |
+
|
552 |
+
# Initialize component strategies with shared config
|
553 |
+
strategy_config = {
|
554 |
+
'min_confidence': self.min_confidence,
|
555 |
+
'parallel_threshold': self.parallel_threshold,
|
556 |
+
'learning_rate': self.learning_rate,
|
557 |
+
'strategy_weights': self.strategy_weights
|
558 |
+
}
|
559 |
+
|
560 |
+
self.strategies = {
|
561 |
+
VentureType.AI_STARTUP: AIStartupStrategy(strategy_config),
|
562 |
+
VentureType.SAAS: SaaSVentureStrategy(strategy_config),
|
563 |
+
VentureType.AUTOMATION_SERVICE: AutomationVentureStrategy(strategy_config),
|
564 |
+
VentureType.DATA_ANALYTICS: DataVentureStrategy(strategy_config),
|
565 |
+
VentureType.API_SERVICE: APIVentureStrategy(strategy_config),
|
566 |
+
VentureType.MARKETPLACE: MarketplaceVentureStrategy(strategy_config)
|
567 |
+
}
|
568 |
+
|
569 |
+
# Portfolio strategy for multi-venture optimization
|
570 |
+
self.portfolio_strategy = VenturePortfolioStrategy(strategy_config)
|
571 |
+
|
572 |
+
async def reason(self, query: str, context: Dict[str, Any]) -> Dict[str, Any]:
|
573 |
+
"""
|
574 |
+
Generate venture strategy based on query and context.
|
575 |
+
|
576 |
+
Args:
|
577 |
+
query: The venture strategy query
|
578 |
+
context: Additional context and parameters
|
579 |
+
|
580 |
+
Returns:
|
581 |
+
Dict containing venture strategy and confidence scores
|
582 |
+
"""
|
583 |
+
try:
|
584 |
+
# Determine venture type from query/context
|
585 |
+
venture_type = self._determine_venture_type(query, context)
|
586 |
+
|
587 |
+
# Get strategy for venture type
|
588 |
+
strategy = self.strategies.get(venture_type)
|
589 |
+
if not strategy:
|
590 |
+
raise ValueError(f"Unsupported venture type: {venture_type}")
|
591 |
+
|
592 |
+
# Generate strategy
|
593 |
+
strategy_result = await strategy.reason(query, context)
|
594 |
+
|
595 |
+
# Get portfolio analysis
|
596 |
+
portfolio_result = await self.portfolio_strategy.reason(query, context)
|
597 |
+
|
598 |
+
# Combine results
|
599 |
+
combined_result = self._combine_results(
|
600 |
+
strategy_result,
|
601 |
+
portfolio_result,
|
602 |
+
venture_type
|
603 |
+
)
|
604 |
+
|
605 |
+
return {
|
606 |
+
'answer': self._format_strategy(combined_result),
|
607 |
+
'confidence': combined_result.get('confidence', 0.0),
|
608 |
+
'venture_type': venture_type.value,
|
609 |
+
'strategy': strategy_result,
|
610 |
+
'portfolio_analysis': portfolio_result
|
611 |
+
}
|
612 |
+
|
613 |
+
except Exception as e:
|
614 |
+
logging.error(f"Venture strategy generation failed: {str(e)}")
|
615 |
+
return {
|
616 |
+
'error': f"Venture strategy generation failed: {str(e)}",
|
617 |
+
'confidence': 0.0
|
618 |
+
}
|
619 |
+
|
620 |
+
def _determine_venture_type(self, query: str, context: Dict[str, Any]) -> VentureType:
|
621 |
+
"""Determine venture type from query and context."""
|
622 |
+
# Use context if available
|
623 |
+
if 'venture_type' in context:
|
624 |
+
return VentureType(context['venture_type'])
|
625 |
+
|
626 |
+
# Simple keyword matching
|
627 |
+
query_lower = query.lower()
|
628 |
+
if any(term in query_lower for term in ['ai', 'ml', 'model', 'neural']):
|
629 |
+
return VentureType.AI_STARTUP
|
630 |
+
elif any(term in query_lower for term in ['saas', 'software', 'cloud']):
|
631 |
+
return VentureType.SAAS
|
632 |
+
elif any(term in query_lower for term in ['automate', 'automation', 'workflow']):
|
633 |
+
return VentureType.AUTOMATION_SERVICE
|
634 |
+
elif any(term in query_lower for term in ['data', 'analytics', 'insights']):
|
635 |
+
return VentureType.DATA_ANALYTICS
|
636 |
+
elif any(term in query_lower for term in ['api', 'service', 'endpoint']):
|
637 |
+
return VentureType.API_SERVICE
|
638 |
+
elif any(term in query_lower for term in ['marketplace', 'platform', 'network']):
|
639 |
+
return VentureType.MARKETPLACE
|
640 |
+
|
641 |
+
# Default to AI startup if unclear
|
642 |
+
return VentureType.AI_STARTUP
|
643 |
+
|
644 |
+
def _combine_results(
|
645 |
+
self,
|
646 |
+
strategy_result: Dict[str, Any],
|
647 |
+
portfolio_result: Dict[str, Any],
|
648 |
+
venture_type: VentureType
|
649 |
+
) -> Dict[str, Any]:
|
650 |
+
"""Combine strategy and portfolio results."""
|
651 |
+
return {
|
652 |
+
'venture_type': venture_type.value,
|
653 |
+
'strategy': strategy_result.get('strategy', {}),
|
654 |
+
'metrics': strategy_result.get('metrics', {}),
|
655 |
+
'portfolio_fit': portfolio_result.get('portfolio_fit', {}),
|
656 |
+
'recommendations': strategy_result.get('recommendations', []),
|
657 |
+
'confidence': min(
|
658 |
+
strategy_result.get('confidence', 0.0),
|
659 |
+
portfolio_result.get('confidence', 0.0)
|
660 |
+
)
|
661 |
+
}
|
662 |
+
|
663 |
+
def _format_strategy(self, result: Dict[str, Any]) -> str:
|
664 |
+
"""Format venture strategy into readable text."""
|
665 |
+
sections = []
|
666 |
+
|
667 |
+
# Venture type
|
668 |
+
sections.append(f"Venture Type: {result['venture_type'].replace('_', ' ').title()}")
|
669 |
+
|
670 |
+
# Strategy overview
|
671 |
+
if 'strategy' in result:
|
672 |
+
strategy = result['strategy']
|
673 |
+
sections.append("\nStrategy Overview:")
|
674 |
+
for key, value in strategy.items():
|
675 |
+
sections.append(f"- {key.replace('_', ' ').title()}: {value}")
|
676 |
+
|
677 |
+
# Key metrics
|
678 |
+
if 'metrics' in result:
|
679 |
+
metrics = result['metrics']
|
680 |
+
sections.append("\nKey Metrics:")
|
681 |
+
for key, value in metrics.items():
|
682 |
+
if isinstance(value, (int, float)):
|
683 |
+
sections.append(f"- {key.replace('_', ' ').title()}: {value:.2f}")
|
684 |
+
else:
|
685 |
+
sections.append(f"- {key.replace('_', ' ').title()}: {value}")
|
686 |
+
|
687 |
+
# Portfolio fit
|
688 |
+
if 'portfolio_fit' in result:
|
689 |
+
fit = result['portfolio_fit']
|
690 |
+
sections.append("\nPortfolio Analysis:")
|
691 |
+
for key, value in fit.items():
|
692 |
+
sections.append(f"- {key.replace('_', ' ').title()}: {value}")
|
693 |
+
|
694 |
+
# Recommendations
|
695 |
+
if 'recommendations' in result:
|
696 |
+
recs = result['recommendations']
|
697 |
+
sections.append("\nKey Recommendations:")
|
698 |
+
for rec in recs:
|
699 |
+
sections.append(f"- {rec}")
|
700 |
+
|
701 |
+
return "\n".join(sections)
|
reasoning/venture_types.py
ADDED
@@ -0,0 +1,332 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Additional venture types for business optimization."""
|
2 |
+
|
3 |
+
import logging
|
4 |
+
from typing import Dict, Any, List, Optional, Set, Union, Type, Tuple
|
5 |
+
import json
|
6 |
+
from dataclasses import dataclass, field
|
7 |
+
from enum import Enum
|
8 |
+
from datetime import datetime
|
9 |
+
import numpy as np
|
10 |
+
from collections import defaultdict
|
11 |
+
|
12 |
+
from .base import ReasoningStrategy
|
13 |
+
|
14 |
+
class AIInfrastructureStrategy(ReasoningStrategy):
|
15 |
+
"""
|
16 |
+
AI infrastructure venture strategy that:
|
17 |
+
1. Identifies infrastructure needs
|
18 |
+
2. Develops cloud solutions
|
19 |
+
3. Optimizes compute resources
|
20 |
+
4. Manages scalability
|
21 |
+
5. Ensures reliability
|
22 |
+
"""
|
23 |
+
|
24 |
+
async def reason(self, query: str, context: Dict[str, Any]) -> Dict[str, Any]:
|
25 |
+
"""Generate AI infrastructure strategy."""
|
26 |
+
try:
|
27 |
+
# Market analysis
|
28 |
+
market = await self._analyze_market(query, context)
|
29 |
+
|
30 |
+
# Infrastructure design
|
31 |
+
design = await self._design_infrastructure(market, context)
|
32 |
+
|
33 |
+
# Optimization strategy
|
34 |
+
optimization = await self._create_optimization_strategy(design, context)
|
35 |
+
|
36 |
+
# Scaling plan
|
37 |
+
scaling = await self._plan_scaling(optimization, context)
|
38 |
+
|
39 |
+
# Revenue projections
|
40 |
+
projections = await self._project_revenue(scaling, context)
|
41 |
+
|
42 |
+
return {
|
43 |
+
"success": projections["annual_revenue"] >= 1_000_000,
|
44 |
+
"market": market,
|
45 |
+
"design": design,
|
46 |
+
"optimization": optimization,
|
47 |
+
"scaling": scaling,
|
48 |
+
"projections": projections
|
49 |
+
}
|
50 |
+
except Exception as e:
|
51 |
+
logging.error(f"Error in AI infrastructure strategy: {str(e)}")
|
52 |
+
return {"success": False, "error": str(e)}
|
53 |
+
|
54 |
+
class AIConsultingStrategy(ReasoningStrategy):
|
55 |
+
"""
|
56 |
+
AI consulting venture strategy that:
|
57 |
+
1. Identifies consulting opportunities
|
58 |
+
2. Develops service offerings
|
59 |
+
3. Creates delivery frameworks
|
60 |
+
4. Manages client relationships
|
61 |
+
5. Scales operations
|
62 |
+
"""
|
63 |
+
|
64 |
+
async def reason(self, query: str, context: Dict[str, Any]) -> Dict[str, Any]:
|
65 |
+
"""Generate AI consulting strategy."""
|
66 |
+
try:
|
67 |
+
# Market analysis
|
68 |
+
market = await self._analyze_consulting_market(query, context)
|
69 |
+
|
70 |
+
# Service design
|
71 |
+
services = await self._design_services(market, context)
|
72 |
+
|
73 |
+
# Delivery framework
|
74 |
+
framework = await self._create_delivery_framework(services, context)
|
75 |
+
|
76 |
+
# Growth strategy
|
77 |
+
growth = await self._plan_growth(framework, context)
|
78 |
+
|
79 |
+
# Revenue projections
|
80 |
+
projections = await self._project_consulting_revenue(growth, context)
|
81 |
+
|
82 |
+
return {
|
83 |
+
"success": projections["annual_revenue"] >= 1_000_000,
|
84 |
+
"market": market,
|
85 |
+
"services": services,
|
86 |
+
"framework": framework,
|
87 |
+
"growth": growth,
|
88 |
+
"projections": projections
|
89 |
+
}
|
90 |
+
except Exception as e:
|
91 |
+
logging.error(f"Error in AI consulting strategy: {str(e)}")
|
92 |
+
return {"success": False, "error": str(e)}
|
93 |
+
|
94 |
+
class AIProductStrategy(ReasoningStrategy):
|
95 |
+
"""
|
96 |
+
AI product venture strategy that:
|
97 |
+
1. Identifies product opportunities
|
98 |
+
2. Develops product roadmap
|
99 |
+
3. Creates go-to-market strategy
|
100 |
+
4. Manages product lifecycle
|
101 |
+
5. Scales distribution
|
102 |
+
"""
|
103 |
+
|
104 |
+
async def reason(self, query: str, context: Dict[str, Any]) -> Dict[str, Any]:
|
105 |
+
"""Generate AI product strategy."""
|
106 |
+
try:
|
107 |
+
# Market analysis
|
108 |
+
market = await self._analyze_product_market(query, context)
|
109 |
+
|
110 |
+
# Product development
|
111 |
+
product = await self._develop_product_strategy(market, context)
|
112 |
+
|
113 |
+
# Go-to-market
|
114 |
+
gtm = await self._create_gtm_strategy(product, context)
|
115 |
+
|
116 |
+
# Scale strategy
|
117 |
+
scale = await self._plan_product_scaling(gtm, context)
|
118 |
+
|
119 |
+
# Revenue projections
|
120 |
+
projections = await self._project_product_revenue(scale, context)
|
121 |
+
|
122 |
+
return {
|
123 |
+
"success": projections["annual_revenue"] >= 1_000_000,
|
124 |
+
"market": market,
|
125 |
+
"product": product,
|
126 |
+
"gtm": gtm,
|
127 |
+
"scale": scale,
|
128 |
+
"projections": projections
|
129 |
+
}
|
130 |
+
except Exception as e:
|
131 |
+
logging.error(f"Error in AI product strategy: {str(e)}")
|
132 |
+
return {"success": False, "error": str(e)}
|
133 |
+
|
134 |
+
class FinTechStrategy(ReasoningStrategy):
|
135 |
+
"""
|
136 |
+
FinTech venture strategy that:
|
137 |
+
1. Identifies fintech opportunities
|
138 |
+
2. Develops financial products
|
139 |
+
3. Ensures compliance
|
140 |
+
4. Manages risk
|
141 |
+
5. Scales operations
|
142 |
+
"""
|
143 |
+
|
144 |
+
async def reason(self, query: str, context: Dict[str, Any]) -> Dict[str, Any]:
|
145 |
+
"""Generate FinTech strategy."""
|
146 |
+
try:
|
147 |
+
# Market analysis
|
148 |
+
market = await self._analyze_fintech_market(query, context)
|
149 |
+
|
150 |
+
# Product development
|
151 |
+
product = await self._develop_fintech_product(market, context)
|
152 |
+
|
153 |
+
# Compliance strategy
|
154 |
+
compliance = await self._ensure_compliance(product, context)
|
155 |
+
|
156 |
+
# Risk management
|
157 |
+
risk = await self._manage_risk(compliance, context)
|
158 |
+
|
159 |
+
# Scale strategy
|
160 |
+
scale = await self._plan_fintech_scaling(risk, context)
|
161 |
+
|
162 |
+
return {
|
163 |
+
"success": scale["annual_revenue"] >= 1_000_000,
|
164 |
+
"market": market,
|
165 |
+
"product": product,
|
166 |
+
"compliance": compliance,
|
167 |
+
"risk": risk,
|
168 |
+
"scale": scale
|
169 |
+
}
|
170 |
+
except Exception as e:
|
171 |
+
logging.error(f"Error in FinTech strategy: {str(e)}")
|
172 |
+
return {"success": False, "error": str(e)}
|
173 |
+
|
174 |
+
class HealthTechStrategy(ReasoningStrategy):
|
175 |
+
"""
|
176 |
+
HealthTech venture strategy that:
|
177 |
+
1. Identifies healthcare opportunities
|
178 |
+
2. Develops health solutions
|
179 |
+
3. Ensures compliance
|
180 |
+
4. Manages patient data
|
181 |
+
5. Scales operations
|
182 |
+
"""
|
183 |
+
|
184 |
+
async def reason(self, query: str, context: Dict[str, Any]) -> Dict[str, Any]:
|
185 |
+
"""Generate HealthTech strategy."""
|
186 |
+
try:
|
187 |
+
# Market analysis
|
188 |
+
market = await self._analyze_healthtech_market(query, context)
|
189 |
+
|
190 |
+
# Solution development
|
191 |
+
solution = await self._develop_health_solution(market, context)
|
192 |
+
|
193 |
+
# Compliance strategy
|
194 |
+
compliance = await self._ensure_health_compliance(solution, context)
|
195 |
+
|
196 |
+
# Data strategy
|
197 |
+
data = await self._manage_health_data(compliance, context)
|
198 |
+
|
199 |
+
# Scale strategy
|
200 |
+
scale = await self._plan_healthtech_scaling(data, context)
|
201 |
+
|
202 |
+
return {
|
203 |
+
"success": scale["annual_revenue"] >= 1_000_000,
|
204 |
+
"market": market,
|
205 |
+
"solution": solution,
|
206 |
+
"compliance": compliance,
|
207 |
+
"data": data,
|
208 |
+
"scale": scale
|
209 |
+
}
|
210 |
+
except Exception as e:
|
211 |
+
logging.error(f"Error in HealthTech strategy: {str(e)}")
|
212 |
+
return {"success": False, "error": str(e)}
|
213 |
+
|
214 |
+
class EdTechStrategy(ReasoningStrategy):
|
215 |
+
"""
|
216 |
+
EdTech venture strategy that:
|
217 |
+
1. Identifies education opportunities
|
218 |
+
2. Develops learning solutions
|
219 |
+
3. Creates content strategy
|
220 |
+
4. Manages user engagement
|
221 |
+
5. Scales platform
|
222 |
+
"""
|
223 |
+
|
224 |
+
async def reason(self, query: str, context: Dict[str, Any]) -> Dict[str, Any]:
|
225 |
+
"""Generate EdTech strategy."""
|
226 |
+
try:
|
227 |
+
# Market analysis
|
228 |
+
market = await self._analyze_edtech_market(query, context)
|
229 |
+
|
230 |
+
# Solution development
|
231 |
+
solution = await self._develop_learning_solution(market, context)
|
232 |
+
|
233 |
+
# Content strategy
|
234 |
+
content = await self._create_content_strategy(solution, context)
|
235 |
+
|
236 |
+
# Engagement strategy
|
237 |
+
engagement = await self._manage_engagement(content, context)
|
238 |
+
|
239 |
+
# Scale strategy
|
240 |
+
scale = await self._plan_edtech_scaling(engagement, context)
|
241 |
+
|
242 |
+
return {
|
243 |
+
"success": scale["annual_revenue"] >= 1_000_000,
|
244 |
+
"market": market,
|
245 |
+
"solution": solution,
|
246 |
+
"content": content,
|
247 |
+
"engagement": engagement,
|
248 |
+
"scale": scale
|
249 |
+
}
|
250 |
+
except Exception as e:
|
251 |
+
logging.error(f"Error in EdTech strategy: {str(e)}")
|
252 |
+
return {"success": False, "error": str(e)}
|
253 |
+
|
254 |
+
class BlockchainStrategy(ReasoningStrategy):
|
255 |
+
"""
|
256 |
+
Blockchain venture strategy that:
|
257 |
+
1. Identifies blockchain opportunities
|
258 |
+
2. Develops blockchain solutions
|
259 |
+
3. Ensures security
|
260 |
+
4. Manages tokenomics
|
261 |
+
5. Scales network
|
262 |
+
"""
|
263 |
+
|
264 |
+
async def reason(self, query: str, context: Dict[str, Any]) -> Dict[str, Any]:
|
265 |
+
"""Generate blockchain strategy."""
|
266 |
+
try:
|
267 |
+
# Market analysis
|
268 |
+
market = await self._analyze_blockchain_market(query, context)
|
269 |
+
|
270 |
+
# Solution development
|
271 |
+
solution = await self._develop_blockchain_solution(market, context)
|
272 |
+
|
273 |
+
# Security strategy
|
274 |
+
security = await self._ensure_blockchain_security(solution, context)
|
275 |
+
|
276 |
+
# Tokenomics
|
277 |
+
tokenomics = await self._design_tokenomics(security, context)
|
278 |
+
|
279 |
+
# Scale strategy
|
280 |
+
scale = await self._plan_blockchain_scaling(tokenomics, context)
|
281 |
+
|
282 |
+
return {
|
283 |
+
"success": scale["annual_revenue"] >= 1_000_000,
|
284 |
+
"market": market,
|
285 |
+
"solution": solution,
|
286 |
+
"security": security,
|
287 |
+
"tokenomics": tokenomics,
|
288 |
+
"scale": scale
|
289 |
+
}
|
290 |
+
except Exception as e:
|
291 |
+
logging.error(f"Error in blockchain strategy: {str(e)}")
|
292 |
+
return {"success": False, "error": str(e)}
|
293 |
+
|
294 |
+
class AIMarketplaceStrategy(ReasoningStrategy):
|
295 |
+
"""
|
296 |
+
AI marketplace venture strategy that:
|
297 |
+
1. Creates AI model marketplace
|
298 |
+
2. Manages model deployment
|
299 |
+
3. Handles transactions
|
300 |
+
4. Ensures quality
|
301 |
+
5. Scales platform
|
302 |
+
"""
|
303 |
+
|
304 |
+
async def reason(self, query: str, context: Dict[str, Any]) -> Dict[str, Any]:
|
305 |
+
"""Generate AI marketplace strategy."""
|
306 |
+
try:
|
307 |
+
# Market analysis
|
308 |
+
market = await self._analyze_ai_marketplace(query, context)
|
309 |
+
|
310 |
+
# Platform development
|
311 |
+
platform = await self._develop_marketplace_platform(market, context)
|
312 |
+
|
313 |
+
# Quality strategy
|
314 |
+
quality = await self._ensure_model_quality(platform, context)
|
315 |
+
|
316 |
+
# Transaction system
|
317 |
+
transactions = await self._design_transaction_system(quality, context)
|
318 |
+
|
319 |
+
# Scale strategy
|
320 |
+
scale = await self._plan_marketplace_scaling(transactions, context)
|
321 |
+
|
322 |
+
return {
|
323 |
+
"success": scale["annual_revenue"] >= 1_000_000,
|
324 |
+
"market": market,
|
325 |
+
"platform": platform,
|
326 |
+
"quality": quality,
|
327 |
+
"transactions": transactions,
|
328 |
+
"scale": scale
|
329 |
+
}
|
330 |
+
except Exception as e:
|
331 |
+
logging.error(f"Error in AI marketplace strategy: {str(e)}")
|
332 |
+
return {"success": False, "error": str(e)}
|
requirements.txt
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Core dependencies
|
2 |
+
fastapi>=0.68.0
|
3 |
+
uvicorn>=0.15.0
|
4 |
+
gradio==4.44.1
|
5 |
+
pydantic>=2.0.0
|
6 |
+
python-dotenv>=0.19.0
|
7 |
+
|
8 |
+
# API and networking
|
9 |
+
httpx>=0.24.0
|
10 |
+
requests>=2.31.0
|
11 |
+
aiohttp>=3.8.0
|
12 |
+
urllib3>=2.0.7
|
13 |
+
websockets>=10.0
|
14 |
+
|
15 |
+
# ML and data processing
|
16 |
+
numpy>=1.24.0
|
17 |
+
pandas>=2.1.0
|
18 |
+
scikit-learn>=1.3.2
|
19 |
+
plotly>=5.18.0
|
20 |
+
|
21 |
+
# Model integration
|
22 |
+
huggingface-hub>=0.19.4
|
23 |
+
groq>=0.4.1
|
24 |
+
|
25 |
+
# Utilities
|
26 |
+
typing-extensions>=4.0.0
|
27 |
+
asyncio>=3.4.3
|
28 |
+
tqdm>=4.66.0
|
29 |
+
joblib==1.3.2
|
30 |
+
|
31 |
+
# Development
|
32 |
+
pytest>=7.0.0
|
33 |
+
black>=22.0.0
|
34 |
+
isort>=5.0.0
|
35 |
+
mypy>=1.0.0
|