DeFactOfficial commited on
Commit
456ca7d
·
verified ·
1 Parent(s): 6888d23

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +176 -148
app.py CHANGED
@@ -1,154 +1,182 @@
1
  import gradio as gr
2
- import numpy as np
3
- import random
4
-
5
- # import spaces #[uncomment to use ZeroGPU]
6
- from diffusers import DiffusionPipeline
7
- import torch
8
-
9
- device = "cuda" if torch.cuda.is_available() else "cpu"
10
- model_repo_id = "stabilityai/sdxl-turbo" # Replace to the model you would like to use
11
-
12
- if torch.cuda.is_available():
13
- torch_dtype = torch.float16
14
- else:
15
- torch_dtype = torch.float32
16
-
17
- pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype)
18
- pipe = pipe.to(device)
19
-
20
- MAX_SEED = np.iinfo(np.int32).max
21
- MAX_IMAGE_SIZE = 1024
22
-
23
-
24
- # @spaces.GPU #[uncomment to use ZeroGPU]
25
- def infer(
26
- prompt,
27
- negative_prompt,
28
- seed,
29
- randomize_seed,
30
- width,
31
- height,
32
- guidance_scale,
33
- num_inference_steps,
34
- progress=gr.Progress(track_tqdm=True),
35
- ):
36
- if randomize_seed:
37
- seed = random.randint(0, MAX_SEED)
38
-
39
- generator = torch.Generator().manual_seed(seed)
40
-
41
- image = pipe(
42
- prompt=prompt,
43
- negative_prompt=negative_prompt,
44
- guidance_scale=guidance_scale,
45
- num_inference_steps=num_inference_steps,
46
- width=width,
47
- height=height,
48
- generator=generator,
49
- ).images[0]
50
-
51
- return image, seed
52
-
53
-
54
- examples = [
55
- "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
56
- "An astronaut riding a green horse",
57
- "A delicious ceviche cheesecake slice",
58
- ]
59
-
60
- css = """
61
- #col-container {
62
- margin: 0 auto;
63
- max-width: 640px;
64
- }
65
- """
66
-
67
- with gr.Blocks(css=css) as demo:
68
- with gr.Column(elem_id="col-container"):
69
- gr.Markdown(" # Text-to-Image Gradio Template")
70
-
71
- with gr.Row():
72
- prompt = gr.Text(
73
- label="Prompt",
74
- show_label=False,
75
- max_lines=1,
76
- placeholder="Enter your prompt",
77
- container=False,
78
- )
79
-
80
- run_button = gr.Button("Run", scale=0, variant="primary")
81
-
82
- result = gr.Image(label="Result", show_label=False)
83
-
84
- with gr.Accordion("Advanced Settings", open=False):
85
- negative_prompt = gr.Text(
86
- label="Negative prompt",
87
- max_lines=1,
88
- placeholder="Enter a negative prompt",
89
- visible=False,
90
- )
91
-
92
- seed = gr.Slider(
93
- label="Seed",
94
- minimum=0,
95
- maximum=MAX_SEED,
96
- step=1,
97
- value=0,
98
- )
99
-
100
- randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
101
-
102
- with gr.Row():
103
- width = gr.Slider(
104
- label="Width",
105
- minimum=256,
106
- maximum=MAX_IMAGE_SIZE,
107
- step=32,
108
- value=1024, # Replace with defaults that work for your model
109
- )
110
-
111
- height = gr.Slider(
112
- label="Height",
113
- minimum=256,
114
- maximum=MAX_IMAGE_SIZE,
115
- step=32,
116
- value=1024, # Replace with defaults that work for your model
117
  )
118
-
119
- with gr.Row():
120
- guidance_scale = gr.Slider(
121
- label="Guidance scale",
122
- minimum=0.0,
123
- maximum=10.0,
124
- step=0.1,
125
- value=0.0, # Replace with defaults that work for your model
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
126
  )
127
-
128
- num_inference_steps = gr.Slider(
129
- label="Number of inference steps",
130
- minimum=1,
131
- maximum=50,
132
- step=1,
133
- value=2, # Replace with defaults that work for your model
134
  )
135
-
136
- gr.Examples(examples=examples, inputs=[prompt])
137
- gr.on(
138
- triggers=[run_button.click, prompt.submit],
139
- fn=infer,
140
- inputs=[
141
- prompt,
142
- negative_prompt,
143
- seed,
144
- randomize_seed,
145
- width,
146
- height,
147
- guidance_scale,
148
- num_inference_steps,
149
- ],
150
- outputs=[result, seed],
151
- )
152
 
153
  if __name__ == "__main__":
154
- demo.launch()
 
 
1
  import gradio as gr
2
+ import requests
3
+ from pydantic import BaseModel
4
+ import time
5
+ import json
6
+ import os
7
+ from typing import Generator, Tuple, List
8
+
9
+ class StepResponse(BaseModel):
10
+ title: str
11
+ content: str
12
+ next_action: str
13
+ confidence: float
14
+
15
+ def get_available_models() -> List[str]:
16
+ """Fetch available models from OpenRouter API"""
17
+ headers = {
18
+ "Authorization": f"Bearer {os.getenv('OPENROUTER_API_KEY')}",
19
+ }
20
+
21
+ try:
22
+ response = requests.get("https://openrouter.ai/api/v1/models", headers=headers)
23
+ response.raise_for_status()
24
+ models = response.json()
25
+ return [model["id"] for model in models]
26
+ except Exception as e:
27
+ print(f"Error fetching models: {e}")
28
+ # Fallback to a basic list of known models
29
+ return [
30
+ "anthropic/claude-3-sonnet-20240320",
31
+ "anthropic/claude-3-opus-20240229",
32
+ "google/gemini-pro",
33
+ "meta-llama/llama-2-70b-chat",
34
+ "mistral/mistral-medium",
35
+ ]
36
+
37
+ def make_api_call(model: str, system_prompt: str, messages: list, max_tokens: int,
38
+ is_final_answer: bool = False) -> StepResponse:
39
+ """Make API call to OpenRouter with specified model"""
40
+ headers = {
41
+ "HTTP-Referer": "https://localhost:7860", # Gradio default
42
+ "X-Title": "Reasoning Chain Demo",
43
+ "Authorization": f"Bearer {os.getenv('OPENROUTER_API_KEY')}",
44
+ "Content-Type": "application/json"
45
+ }
46
+
47
+ url = "https://openrouter.ai/api/v1/chat/completions"
48
+
49
+ request_body = {
50
+ "model": model,
51
+ "messages": [
52
+ {"role": "system", "content": system_prompt},
53
+ *messages
54
+ ],
55
+ "max_tokens": max_tokens,
56
+ "temperature": 0.2,
57
+ "response_format": {"type": "json_object"}
58
+ }
59
+
60
+ for attempt in range(3):
61
+ try:
62
+ response = requests.post(url, headers=headers, json=request_body)
63
+ response.raise_for_status()
64
+
65
+ result = response.json()
66
+ message_content = result['choices'][0]['message']['content']
67
+
68
+ try:
69
+ response_data = json.loads(message_content)
70
+ return StepResponse(**response_data)
71
+ except json.JSONDecodeError as e:
72
+ raise ValueError(f"Failed to parse JSON response: {str(e)}")
73
+
74
+ except Exception as e:
75
+ if attempt == 2:
76
+ return StepResponse(
77
+ title="Error",
78
+ content=f"Failed to generate {'final answer' if is_final_answer else 'step'} after 3 attempts. Error: {str(e)}",
79
+ next_action="final_answer",
80
+ confidence=0.5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
81
  )
82
+ time.sleep(1)
83
+
84
+ def generate_response(prompt: str, model: str, progress=gr.Progress()) -> Generator[str, None, None]:
85
+ """Generator function that yields formatted markdown for each step"""
86
+ system_prompt = """You are an AI assistant that explains your reasoning step by step, incorporating dynamic Chain of Thought (CoT), reflection, and verbal reinforcement learning. IMPORTANT: You must output exactly ONE step of reasoning at a time:
87
+
88
+ 1. Each response must contain ONE single step of your reasoning process.
89
+ 2. For each step, enclose your thoughts within <thinking> tags as you explore that specific step.
90
+ 3. After completing your current step, indicate whether you need another step or are ready for the final answer.
91
+ 4. Do not try to complete multiple steps or the entire analysis in one response.
92
+ 5. Regularly evaluate your progress, being critical and honest about your reasoning process.
93
+ 6. Assign a quality score between 0.0 and 1.0 to guide your approach:
94
+ - 0.8+: Continue current approach
95
+ - 0.5-0.7: Consider minor adjustments
96
+ - Below 0.5: Seriously consider backtracking and trying a different approach
97
+
98
+ IMPORTANT: Your response must be a valid JSON object with the following structure:
99
+ {
100
+ "title": "Step title or topic",
101
+ "content": "Detailed step content",
102
+ "next_action": "One of: continue, reflect, or final_answer",
103
+ "confidence": float between 0.0 and 1.0
104
+ }"""
105
+
106
+ messages = [{"role": "user", "content": prompt}]
107
+ step_count = 1
108
+ markdown_output = ""
109
+
110
+ while True:
111
+ progress(step_count / 15, f"Step {step_count}") # Show progress
112
+ step_data = make_api_call(model, system_prompt, messages, 750)
113
+
114
+ # Format step as markdown
115
+ step_md = f"### Step {step_count}: {step_data.title}\n\n"
116
+ step_md += f"{step_data.content}\n\n"
117
+ step_md += f"**Confidence:** {step_data.confidence:.2f}\n\n"
118
+ step_md += "---\n\n"
119
+
120
+ markdown_output += step_md
121
+ yield markdown_output # Update the output incrementally
122
+
123
+ messages.append({"role": "assistant", "content": json.dumps(step_data.model_dump())})
124
+
125
+ if step_data.next_action == 'final_answer' and step_count < 15:
126
+ messages.append({"role": "user", "content": "Please continue your analysis with at least 5 more steps before providing the final answer."})
127
+ elif step_data.next_action == 'final_answer':
128
+ break
129
+ elif step_data.next_action == 'reflect' or step_count % 3 == 0:
130
+ messages.append({"role": "user", "content": "Please perform a detailed self-reflection on your reasoning so far."})
131
+ else:
132
+ messages.append({"role": "user", "content": "Please continue with the next step in your analysis."})
133
+
134
+ step_count += 1
135
+
136
+ # Generate final answer
137
+ final_data = make_api_call(model, system_prompt, messages, 750, is_final_answer=True)
138
+
139
+ final_md = f"### Final Answer\n\n"
140
+ final_md += f"{final_data.content}\n\n"
141
+ final_md += f"**Confidence:** {final_data.confidence:.2f}\n\n"
142
+
143
+ markdown_output += final_md
144
+ yield markdown_output
145
+
146
+ def create_interface():
147
+ # Check for API key
148
+ if not os.getenv('OPENROUTER_API_KEY'):
149
+ raise ValueError("Please set OPENROUTER_API_KEY environment variable")
150
+
151
+ available_models = get_available_models()
152
+
153
+ with gr.Blocks() as interface:
154
+ gr.Markdown("# AI Reasoning Chain with Model Selection")
155
+ gr.Markdown("This demo shows chain-of-thought reasoning across different language models.")
156
+
157
+ with gr.Row():
158
+ with gr.Column():
159
+ model_dropdown = gr.Dropdown(
160
+ choices=available_models,
161
+ value=available_models[0],
162
+ label="Select Model"
163
  )
164
+ query_input = gr.Textbox(
165
+ label="Enter your query:",
166
+ placeholder="e.g., What are the potential long-term effects of climate change on global agriculture?"
 
 
 
 
167
  )
168
+ submit_btn = gr.Button("Generate Response")
169
+
170
+ output_box = gr.Markdown(label="Response")
171
+
172
+ submit_btn.click(
173
+ fn=generate_response,
174
+ inputs=[query_input, model_dropdown],
175
+ outputs=output_box
176
+ )
177
+
178
+ return interface
 
 
 
 
 
 
179
 
180
  if __name__ == "__main__":
181
+ interface = create_interface()
182
+ interface.launch()