Spaces:
Runtime error
Runtime error
File size: 7,346 Bytes
2618219 456ca7d 4692341 456ca7d 2618219 456ca7d 4692341 456ca7d d2e74ff 456ca7d d2e74ff 456ca7d d2e74ff 456ca7d 4692341 456ca7d 4692341 456ca7d 4692341 456ca7d 2618219 4692341 456ca7d b9d9b64 456ca7d 2618219 456ca7d 4692341 456ca7d 4692341 456ca7d 2618219 456ca7d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 |
import gradio as gr
import requests
from pydantic import BaseModel
import time
import json
import os
from typing import Generator, Tuple, List
class StepResponse(BaseModel):
title: str
content: str
next_action: str
confidence: float
def get_available_models() -> List[str]:
"""Fetch available models from OpenRouter API"""
headers = {
"Authorization": f"Bearer {os.getenv('OPENROUTER_API_KEY')}",
}
try:
response = requests.get("https://openrouter.ai/api/v1/models", headers=headers)
response.raise_for_status()
models = response.json()
return [model["id"] for model in models.data]
except Exception as e:
print(f"Error fetching models: {e}")
# Fallback to a basic list of known models
return [
"anthropic/claude-3-sonnet-20240320",
"anthropic/claude-3-opus-20240229",
"google/gemini-pro",
"meta-llama/llama-2-70b-chat",
"mistral/mistral-medium",
]
def make_api_call(model: str, system_prompt: str, messages: list, max_tokens: int,
is_final_answer: bool = False) -> StepResponse:
"""Make API call to OpenRouter with specified model"""
headers = {
"HTTP-Referer": "https://localhost:7860", # Gradio default
"X-Title": "Reasoning Chain Demo",
"Authorization": f"Bearer {os.getenv('OPENROUTER_API_KEY')}",
"Content-Type": "application/json"
}
url = "https://openrouter.ai/api/v1/chat/completions"
request_body = {
"model": model,
"messages": [
{"role": "system", "content": system_prompt},
*messages
],
"max_tokens": max_tokens,
"temperature": 0.2,
"response_format": {"type": "json_object"}
}
for attempt in range(3):
try:
response = requests.post(url, headers=headers, json=request_body)
response.raise_for_status()
result = response.json()
message_content = result['choices'][0]['message']['content']
try:
response_data = json.loads(message_content)
return StepResponse(**response_data)
except json.JSONDecodeError as e:
raise ValueError(f"Failed to parse JSON response: {str(e)}")
except Exception as e:
if attempt == 2:
return StepResponse(
title="Error",
content=f"Failed to generate {'final answer' if is_final_answer else 'step'} after 3 attempts. Error: {str(e)}",
next_action="final_answer",
confidence=0.5
)
time.sleep(1)
def generate_response(prompt: str, model: str, progress=gr.Progress()) -> Generator[str, None, None]:
"""Generator function that yields formatted markdown for each step"""
system_prompt = """You are an AI assistant that explains your reasoning step by step, incorporating dynamic Chain of Thought (CoT), reflection, and verbal reinforcement learning. IMPORTANT: You must output exactly ONE step of reasoning at a time:
1. Each response must contain ONE single step of your reasoning process.
2. For each step, enclose your thoughts within <thinking> tags as you explore that specific step.
3. After completing your current step, indicate whether you need another step or are ready for the final answer.
4. Do not try to complete multiple steps or the entire analysis in one response.
5. Regularly evaluate your progress, being critical and honest about your reasoning process.
6. Assign a quality score between 0.0 and 1.0 to guide your approach:
- 0.8+: Continue current approach
- 0.5-0.7: Consider minor adjustments
- Below 0.5: Seriously consider backtracking and trying a different approach
IMPORTANT: Your response must be a valid JSON object with the following structure:
{
"title": "Step title or topic",
"content": "Detailed step content",
"next_action": "One of: continue, reflect, or final_answer",
"confidence": float between 0.0 and 1.0
}"""
messages = [{"role": "user", "content": prompt}]
step_count = 1
markdown_output = ""
while True:
progress(step_count / 15, f"Step {step_count}") # Show progress
step_data = make_api_call(model, system_prompt, messages, 750)
# Format step as markdown
step_md = f"### Step {step_count}: {step_data.title}\n\n"
step_md += f"{step_data.content}\n\n"
step_md += f"**Confidence:** {step_data.confidence:.2f}\n\n"
step_md += "---\n\n"
markdown_output += step_md
#yield markdown_output # Update the output incrementally
messages.append({"role": "assistant", "content": json.dumps(step_data.model_dump(), indent=2)})
if step_data.next_action == 'final_answer' and step_count < 15:
messages.append({"role": "user", "content": "Please continue your analysis with at least 5 more steps before providing the final answer."})
elif step_data.next_action == 'final_answer':
break
elif step_data.next_action == 'reflect' or step_count % 3 == 0:
messages.append({"role": "user", "content": "Please perform a detailed self-reflection on your reasoning so far."})
else:
messages.append({"role": "user", "content": "Please continue with the next step in your analysis."})
step_count += 1
yield messages
# Generate final answer
final_data = make_api_call(model, system_prompt, messages, 750, is_final_answer=True)
yield messages
#final_md = f"### Final Answer\n\n"
#final_md += f"{final_data.content}\n\n"
#final_md += f"**Confidence:** {final_data.confidence:.2f}\n\n"
#markdown_output += final_md
#yield markdown_output
def create_interface():
# Check for API key
if not os.getenv('OPENROUTER_API_KEY'):
raise ValueError("Please set OPENROUTER_API_KEY environment variable")
available_models = get_available_models()
with gr.Blocks() as interface:
gr.Markdown("# AI Reasoning Chain with Model Selection")
gr.Markdown("This demo shows chain-of-thought reasoning across different language models.")
with gr.Row():
with gr.Column():
model_dropdown = gr.Dropdown(
choices=available_models,
value=available_models[0],
label="Select Model"
)
chatbot = gr.Chatbot()
query_input = gr.Textbox(
lines=5,
label="Enter your query:",
placeholder="e.g., What are the potential long-term effects of climate change on global agriculture?"
)
submit_btn = gr.Button("Generate Response")
#output_box = gr.Markdown(label="Response")
submit_btn.click(
fn=generate_response,
inputs=[query_input, model_dropdown],
outputs=chatbot
)
return interface
if __name__ == "__main__":
interface = create_interface()
interface.launch()
|