azettl commited on
Commit
5bef7be
ยท
0 Parent(s):

Initial commit

Browse files
Files changed (2) hide show
  1. app.py +1074 -0
  2. requirements.txt +10 -0
app.py ADDED
@@ -0,0 +1,1074 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import requests
3
+ import json
4
+ import os
5
+ import asyncio
6
+ from datetime import datetime
7
+ from typing import Dict, List, Any, Optional, Tuple
8
+ from dotenv import load_dotenv
9
+ import time
10
+ import re
11
+ from collections import Counter
12
+ import threading
13
+ import queue
14
+ from gradio_consilium_roundtable import consilium_roundtable
15
+ from smolagents import CodeAgent, DuckDuckGoSearchTool, FinalAnswerTool, InferenceClientModel, VisitWebpageTool, Tool
16
+
17
+ # Load environment variables
18
+ load_dotenv()
19
+
20
+ # API Configuration - These will be updated by UI if needed
21
+ MISTRAL_API_KEY = os.getenv("MISTRAL_API_KEY")
22
+ SAMBANOVA_API_KEY = os.getenv("SAMBANOVA_API_KEY")
23
+ MODERATOR_MODEL = os.getenv("MODERATOR_MODEL", "mistral")
24
+
25
+ class WikipediaTool(Tool):
26
+ name = "wikipedia_search"
27
+ description = "Search Wikipedia for comprehensive information on any topic"
28
+ inputs = {"query": {"type": "string", "description": "The topic to search for on Wikipedia"}}
29
+ output_type = "string"
30
+
31
+ def forward(self, query: str) -> str:
32
+ try:
33
+ import wikipedia
34
+ # Search for the topic
35
+ search_results = wikipedia.search(query, results=3)
36
+ if not search_results:
37
+ return f"No Wikipedia articles found for: {query}"
38
+
39
+ # Get the first article
40
+ page = wikipedia.page(search_results[0])
41
+ summary = page.summary[:1000] + "..." if len(page.summary) > 1000 else page.summary
42
+
43
+ return f"**Wikipedia: {page.title}**\n\n{summary}\n\nSource: {page.url}"
44
+ except Exception as e:
45
+ return f"Wikipedia search error: {str(e)}"
46
+
47
+ class WebSearchAgent:
48
+ def __init__(self):
49
+ self.agent = CodeAgent(
50
+ tools=[
51
+ DuckDuckGoSearchTool(),
52
+ VisitWebpageTool(),
53
+ WikipediaTool(),
54
+ FinalAnswerTool()
55
+ ],
56
+ model=InferenceClientModel(),
57
+ max_steps=5,
58
+ verbosity_level=1
59
+ )
60
+
61
+ def search(self, query: str, max_results: int = 5) -> str:
62
+ """Use the CodeAgent to perform comprehensive web search and analysis"""
63
+ try:
64
+ # Create a detailed prompt for the agent
65
+ agent_prompt = f"""You are a web research agent. Please research the following query comprehensively:
66
+
67
+ "{query}"
68
+
69
+ Your task:
70
+ 1. Search for relevant information using DuckDuckGo or Wikipedia
71
+ 2. Visit the most promising web pages to get detailed information
72
+ 3. Synthesize the findings into a comprehensive, well-formatted response
73
+ 4. Include sources and links where appropriate
74
+ 5. Format your response with markdown for better readability
75
+
76
+ Please provide a thorough analysis based on current, reliable information."""
77
+
78
+ # Run the agent
79
+ result = self.agent.run(agent_prompt)
80
+
81
+ # Format the result nicely
82
+ if result:
83
+ return f"๐Ÿ” **Web Research Results for:** {query}\n\n{result}"
84
+ else:
85
+ return f"๐Ÿ” **Web Search for:** {query}\n\nNo results found or agent encountered an error."
86
+
87
+ except Exception as e:
88
+ # Fallback to simple error message
89
+ return f"๐Ÿ” **Web Search Error for:** {query}\n\nError: {str(e)}\n\nThe search agent encountered an issue. Please try again or rephrase your query."
90
+
91
+ class VisualConsensusEngine:
92
+ def __init__(self, moderator_model: str = None, update_callback=None):
93
+ global MISTRAL_API_KEY, SAMBANOVA_API_KEY
94
+
95
+ self.moderator_model = moderator_model or MODERATOR_MODEL
96
+ self.search_agent = WebSearchAgent()
97
+ self.update_callback = update_callback # For real-time updates
98
+
99
+ # Use global API keys (which may be updated from UI)
100
+ self.models = {
101
+ 'mistral': {
102
+ 'name': 'Mistral Large',
103
+ 'api_key': MISTRAL_API_KEY,
104
+ 'available': bool(MISTRAL_API_KEY)
105
+ },
106
+ 'sambanova_deepseek': {
107
+ 'name': 'DeepSeek-R1',
108
+ 'api_key': SAMBANOVA_API_KEY,
109
+ 'available': bool(SAMBANOVA_API_KEY)
110
+ },
111
+ 'sambanova_llama': {
112
+ 'name': 'Meta-Llama-3.1-8B',
113
+ 'api_key': SAMBANOVA_API_KEY,
114
+ 'available': bool(SAMBANOVA_API_KEY)
115
+ },
116
+ 'sambanova_qwq': {
117
+ 'name': 'QwQ-32B',
118
+ 'api_key': SAMBANOVA_API_KEY,
119
+ 'available': bool(SAMBANOVA_API_KEY)
120
+ },
121
+ 'search': {
122
+ 'name': 'Web Search Agent',
123
+ 'api_key': True,
124
+ 'available': True
125
+ }
126
+ }
127
+
128
+ # Role definitions
129
+ self.roles = {
130
+ 'standard': "You are participating in a collaborative AI discussion. Provide thoughtful, balanced analysis.",
131
+ 'devils_advocate': "You are the devil's advocate. Challenge assumptions, point out weaknesses, and argue alternative perspectives even if unpopular.",
132
+ 'fact_checker': "You are the fact checker. Focus on verifying claims, checking accuracy, and identifying potential misinformation.",
133
+ 'synthesizer': "You are the synthesizer. Focus on finding common ground, combining different perspectives, and building bridges between opposing views.",
134
+ 'domain_expert': "You are a domain expert. Provide specialized knowledge, technical insights, and authoritative perspective on the topic.",
135
+ 'creative_thinker': "You are the creative thinker. Approach problems from unusual angles, suggest innovative solutions, and think outside conventional boundaries."
136
+ }
137
+
138
+ def update_visual_state(self, state_update: Dict[str, Any]):
139
+ """Update the visual roundtable state"""
140
+ if self.update_callback:
141
+ self.update_callback(state_update)
142
+
143
+ def call_model(self, model: str, prompt: str, context: str = "") -> Optional[str]:
144
+ """Generic model calling function"""
145
+ if model == 'search':
146
+ search_query = self._extract_search_query(prompt)
147
+ return self.search_agent.search(search_query)
148
+
149
+ if not self.models[model]['available']:
150
+ return None
151
+
152
+ full_prompt = f"{context}\n\n{prompt}" if context else prompt
153
+
154
+ try:
155
+ if model == 'mistral':
156
+ return self._call_mistral(full_prompt)
157
+ elif model.startswith('sambanova_'):
158
+ return self._call_sambanova(model, full_prompt)
159
+ except Exception as e:
160
+ print(f"Error calling {model}: {str(e)}")
161
+ return None
162
+
163
+ def _extract_search_query(self, prompt: str) -> str:
164
+ """Extract search query from prompt or generate one"""
165
+ lines = prompt.split('\n')
166
+ for line in lines:
167
+ if 'QUESTION:' in line:
168
+ return line.replace('QUESTION:', '').strip()
169
+
170
+ for line in lines:
171
+ if len(line.strip()) > 10:
172
+ return line.strip()[:100]
173
+
174
+ return prompt[:100]
175
+
176
+ def _call_sambanova(self, model: str, prompt: str) -> Optional[str]:
177
+ global SAMBANOVA_API_KEY
178
+ if not SAMBANOVA_API_KEY:
179
+ return None
180
+
181
+ try:
182
+ from openai import OpenAI
183
+
184
+ client = OpenAI(
185
+ base_url="https://api.sambanova.ai/v1",
186
+ api_key=SAMBANOVA_API_KEY
187
+ )
188
+
189
+ model_mapping = {
190
+ 'sambanova_deepseek': 'DeepSeek-R1',
191
+ 'sambanova_llama': 'Meta-Llama-3.1-8B-Instruct',
192
+ 'sambanova_qwq': 'QwQ-32B'
193
+ }
194
+
195
+ sambanova_model = model_mapping.get(model, 'Meta-Llama-3.1-8B-Instruct')
196
+
197
+ completion = client.chat.completions.create(
198
+ model=sambanova_model,
199
+ messages=[
200
+ {"role": "user", "content": prompt}
201
+ ],
202
+ max_tokens=2000,
203
+ temperature=0.7
204
+ )
205
+
206
+ return completion.choices[0].message.content
207
+
208
+ except Exception as e:
209
+ print(f"Error calling Sambanova {model}: {str(e)}")
210
+ return None
211
+
212
+ def _call_mistral(self, prompt: str) -> Optional[str]:
213
+ global MISTRAL_API_KEY
214
+ if not MISTRAL_API_KEY:
215
+ return None
216
+
217
+ try:
218
+ from openai import OpenAI
219
+
220
+ client = OpenAI(
221
+ base_url="https://api.mistral.ai/v1",
222
+ api_key=MISTRAL_API_KEY
223
+ )
224
+
225
+ completion = client.chat.completions.create(
226
+ model='mistral-large-latest',
227
+ messages=[
228
+ {"role": "user", "content": prompt}
229
+ ],
230
+ max_tokens=2000,
231
+ temperature=0.7
232
+ )
233
+
234
+ return completion.choices[0].message.content
235
+
236
+ except Exception as e:
237
+ print(f"Error calling Mistral API mistral-large-latest: {str(e)}")
238
+ return None
239
+
240
+ def assign_roles(self, models: List[str], role_assignment: str) -> Dict[str, str]:
241
+ """Assign roles to models"""
242
+ if role_assignment == "none":
243
+ return {model: "standard" for model in models}
244
+
245
+ roles_to_assign = []
246
+ if role_assignment == "balanced":
247
+ roles_to_assign = ["devils_advocate", "fact_checker", "synthesizer", "standard"]
248
+ elif role_assignment == "specialized":
249
+ roles_to_assign = ["domain_expert", "fact_checker", "creative_thinker", "synthesizer"]
250
+ elif role_assignment == "adversarial":
251
+ roles_to_assign = ["devils_advocate", "devils_advocate", "standard", "standard"]
252
+
253
+ while len(roles_to_assign) < len(models):
254
+ roles_to_assign.append("standard")
255
+
256
+ model_roles = {}
257
+ for i, model in enumerate(models):
258
+ model_roles[model] = roles_to_assign[i % len(roles_to_assign)]
259
+
260
+ return model_roles
261
+
262
+ def _extract_confidence(self, response: str) -> float:
263
+ """Extract confidence score from response"""
264
+ confidence_match = re.search(r'Confidence:\s*(\d+(?:\.\d+)?)', response)
265
+ if confidence_match:
266
+ try:
267
+ return float(confidence_match.group(1))
268
+ except ValueError:
269
+ pass
270
+ return 5.0
271
+
272
+ def run_visual_consensus(self, question: str, discussion_rounds: int = 3,
273
+ decision_protocol: str = "consensus", role_assignment: str = "balanced",
274
+ topology: str = "full_mesh", moderator_model: str = "mistral",
275
+ enable_step_by_step: bool = False):
276
+ """Run consensus with visual updates"""
277
+
278
+ available_models = [model for model, info in self.models.items() if info['available']]
279
+ if not available_models:
280
+ return "โŒ No AI models available"
281
+
282
+ model_roles = self.assign_roles(available_models, role_assignment)
283
+ participant_names = [self.models[model]['name'] for model in available_models]
284
+
285
+ # Log the start
286
+ log_discussion_event('phase', content=f"๐Ÿš€ Starting Discussion: {question}")
287
+ log_discussion_event('phase', content=f"๐Ÿ“Š Configuration: {len(available_models)} models, {decision_protocol} protocol, {role_assignment} roles")
288
+
289
+ # Initialize visual state
290
+ self.update_visual_state({
291
+ "participants": participant_names,
292
+ "messages": [],
293
+ "currentSpeaker": None,
294
+ "thinking": [],
295
+ "showBubbles": []
296
+ })
297
+
298
+ all_messages = []
299
+
300
+ # Phase 1: Initial responses
301
+ log_discussion_event('phase', content="๐Ÿ“ Phase 1: Initial Responses")
302
+
303
+ for model in available_models:
304
+ # Log and set thinking state
305
+ log_discussion_event('thinking', speaker=self.models[model]['name'])
306
+ self.update_visual_state({
307
+ "participants": participant_names,
308
+ "messages": all_messages,
309
+ "currentSpeaker": None,
310
+ "thinking": [self.models[model]['name']]
311
+ })
312
+
313
+ # No pause before thinking - let AI think immediately
314
+ if not enable_step_by_step:
315
+ time.sleep(1)
316
+
317
+ role = model_roles[model]
318
+ role_context = self.roles[role]
319
+
320
+ prompt = f"""{role_context}
321
+
322
+ QUESTION: {question}
323
+
324
+ Please provide your initial analysis and answer. Be thoughtful, detailed, and explain your reasoning.
325
+
326
+ Your response should include:
327
+ 1. Your direct answer to the question
328
+ 2. Your reasoning and evidence
329
+ 3. Any important considerations or nuances
330
+ 4. END YOUR RESPONSE WITH: "Confidence: X/10" where X is your confidence level"""
331
+
332
+ # Log and set speaking state
333
+ log_discussion_event('speaking', speaker=self.models[model]['name'])
334
+ self.update_visual_state({
335
+ "participants": participant_names,
336
+ "messages": all_messages,
337
+ "currentSpeaker": self.models[model]['name'],
338
+ "thinking": []
339
+ })
340
+
341
+ # No pause before speaking - let AI respond immediately
342
+ if not enable_step_by_step:
343
+ time.sleep(2)
344
+
345
+ response = self.call_model(model, prompt)
346
+
347
+ if response:
348
+ confidence = self._extract_confidence(response)
349
+ message = {
350
+ "speaker": self.models[model]['name'],
351
+ "text": response, # CHANGE: Don't truncate the response
352
+ "confidence": confidence,
353
+ "role": role
354
+ }
355
+ all_messages.append(message)
356
+
357
+ # Log the full response
358
+ log_discussion_event('message',
359
+ speaker=self.models[model]['name'],
360
+ content=response,
361
+ role=role,
362
+ confidence=confidence)
363
+
364
+ # Update with new message - add to showBubbles so bubble stays visible
365
+ responded_speakers = list(set(msg["speaker"] for msg in all_messages if msg.get("speaker")))
366
+
367
+ self.update_visual_state({
368
+ "participants": participant_names,
369
+ "messages": all_messages,
370
+ "currentSpeaker": None,
371
+ "thinking": [],
372
+ "showBubbles": responded_speakers # Keep bubbles visible for all who responded
373
+ })
374
+
375
+ # PAUSE AFTER AI RESPONSE - this is when user can read the response
376
+ if enable_step_by_step:
377
+ step_continue_event.clear()
378
+ step_continue_event.wait() # Wait for user to click Next Step
379
+ else:
380
+ time.sleep(0.5)
381
+
382
+ # Phase 2: Discussion rounds
383
+ if discussion_rounds > 0:
384
+ log_discussion_event('phase', content=f"๐Ÿ’ฌ Phase 2: Discussion Rounds ({discussion_rounds} rounds)")
385
+
386
+ for round_num in range(discussion_rounds):
387
+ log_discussion_event('phase', content=f"๐Ÿ”„ Discussion Round {round_num + 1}")
388
+
389
+ for model in available_models:
390
+ # Log and set thinking state
391
+ log_discussion_event('thinking', speaker=self.models[model]['name'])
392
+ self.update_visual_state({
393
+ "participants": participant_names,
394
+ "messages": all_messages,
395
+ "currentSpeaker": None,
396
+ "thinking": [self.models[model]['name']]
397
+ })
398
+
399
+ # No pause before thinking
400
+ if not enable_step_by_step:
401
+ time.sleep(1)
402
+
403
+ # Create context of other responses
404
+ other_responses = ""
405
+ for other_model in available_models:
406
+ if other_model != model:
407
+ other_responses += f"\n**{self.models[other_model]['name']}**: [Previous response]\n"
408
+
409
+ discussion_prompt = f"""CONTINUING DISCUSSION FOR: {question}
410
+
411
+ Round {round_num + 1} of {discussion_rounds}
412
+
413
+ Other models' current responses:
414
+ {other_responses}
415
+
416
+ Please provide your updated analysis considering the discussion so far.
417
+ END WITH: "Confidence: X/10" """
418
+
419
+ # Log and set speaking state
420
+ log_discussion_event('speaking', speaker=self.models[model]['name'])
421
+ self.update_visual_state({
422
+ "participants": participant_names,
423
+ "messages": all_messages,
424
+ "currentSpeaker": self.models[model]['name'],
425
+ "thinking": []
426
+ })
427
+
428
+ # No pause before speaking
429
+ if not enable_step_by_step:
430
+ time.sleep(2)
431
+
432
+ response = self.call_model(model, discussion_prompt)
433
+
434
+ if response:
435
+ confidence = self._extract_confidence(response)
436
+ message = {
437
+ "speaker": self.models[model]['name'],
438
+ "text": f"Round {round_num + 1}: {response}", # CHANGE: Don't truncate
439
+ "confidence": confidence,
440
+ "role": model_roles[model]
441
+ }
442
+ all_messages.append(message)
443
+
444
+ # Log the full response
445
+ log_discussion_event('message',
446
+ speaker=self.models[model]['name'],
447
+ content=f"Round {round_num + 1}: {response}",
448
+ role=model_roles[model],
449
+ confidence=confidence)
450
+
451
+ # Update with new message - add to showBubbles so bubble stays visible
452
+ responded_speakers = list(set(msg["speaker"] for msg in all_messages if msg.get("speaker")))
453
+
454
+ self.update_visual_state({
455
+ "participants": participant_names,
456
+ "messages": all_messages,
457
+ "currentSpeaker": None,
458
+ "thinking": [],
459
+ "showBubbles": responded_speakers # Keep bubbles visible for all who responded
460
+ })
461
+
462
+ # PAUSE AFTER AI RESPONSE for step-by-step mode
463
+ if enable_step_by_step:
464
+ step_continue_event.clear()
465
+ step_continue_event.wait()
466
+ else:
467
+ time.sleep(1)
468
+
469
+ # Phase 3: Final consensus - ACTUALLY GENERATE THE CONSENSUS
470
+ log_discussion_event('phase', content=f"๐ŸŽฏ Phase 3: Final Consensus ({decision_protocol})")
471
+ log_discussion_event('thinking', speaker="All participants", content="Building consensus...")
472
+
473
+ self.update_visual_state({
474
+ "participants": participant_names,
475
+ "messages": all_messages,
476
+ "currentSpeaker": None,
477
+ "thinking": participant_names # Everyone thinking about consensus
478
+ })
479
+
480
+ # No pause before consensus generation
481
+ if not enable_step_by_step:
482
+ time.sleep(2)
483
+
484
+ # ACTUALLY GENERATE THE FINAL CONSENSUS ANSWER
485
+ moderator = self.moderator_model if self.models[self.moderator_model]['available'] else available_models[0]
486
+
487
+ # Collect all the actual responses for synthesis
488
+ all_responses = ""
489
+ confidence_scores = []
490
+ for entry in discussion_log:
491
+ if entry['type'] == 'message' and entry['speaker'] != 'Consilium':
492
+ all_responses += f"\n**{entry['speaker']}**: {entry['content']}\n"
493
+ if 'confidence' in entry:
494
+ confidence_scores.append(entry['confidence'])
495
+
496
+ # Calculate average confidence to assess consensus likelihood
497
+ avg_confidence = sum(confidence_scores) / len(confidence_scores) if confidence_scores else 5.0
498
+ consensus_threshold = 7.0 # If average confidence is below this, flag potential disagreement
499
+
500
+ consensus_prompt = f"""You are synthesizing the final result from this AI discussion.
501
+
502
+ ORIGINAL QUESTION: {question}
503
+
504
+ ALL PARTICIPANT RESPONSES:
505
+ {all_responses}
506
+
507
+ AVERAGE CONFIDENCE LEVEL: {avg_confidence:.1f}/10
508
+
509
+ Your task:
510
+ 1. Analyze if the participants reached genuine consensus or if there are significant disagreements
511
+ 2. If there IS consensus: Provide a comprehensive final answer incorporating all insights
512
+ 3. If there is NO consensus: Clearly state the disagreements and present the main conflicting positions
513
+ 4. If partially aligned: Identify areas of agreement and areas of disagreement
514
+
515
+ Be honest about the level of consensus achieved. Do not force agreement where none exists.
516
+
517
+ Format your response as:
518
+ **CONSENSUS STATUS:** [Reached/Partial/Not Reached]
519
+
520
+ **FINAL ANSWER:** [Your synthesis]
521
+
522
+ **AREAS OF DISAGREEMENT:** [If any - explain the key points of contention]"""
523
+
524
+ log_discussion_event('speaking', speaker="Consilium", content="Analyzing consensus and synthesizing final answer...")
525
+ self.update_visual_state({
526
+ "participants": participant_names,
527
+ "messages": all_messages,
528
+ "currentSpeaker": "Consilium",
529
+ "thinking": []
530
+ })
531
+
532
+ # Generate the actual consensus analysis
533
+ consensus_result = self.call_model(moderator, consensus_prompt)
534
+
535
+ if not consensus_result:
536
+ consensus_result = f"""**CONSENSUS STATUS:** Analysis Failed
537
+
538
+ **FINAL ANSWER:** Unable to generate consensus analysis. Please review individual participant responses in the discussion log.
539
+
540
+ **AREAS OF DISAGREEMENT:** Analysis could not be completed due to technical issues."""
541
+
542
+ # Check if consensus was actually reached based on the response
543
+ consensus_reached = "CONSENSUS STATUS: Reached" in consensus_result or avg_confidence >= consensus_threshold
544
+
545
+ # Generate final consensus message for visual
546
+ if consensus_reached:
547
+ visual_summary = "โœ… Consensus reached!"
548
+ elif "Partial" in consensus_result:
549
+ visual_summary = "โš ๏ธ Partial consensus - some disagreements remain"
550
+ else:
551
+ visual_summary = "โŒ No consensus - significant disagreements identified"
552
+
553
+ final_message = {
554
+ "speaker": "Consilium",
555
+ "text": f"{visual_summary} {consensus_result}", # CHANGE: Don't truncate consensus
556
+ "confidence": avg_confidence,
557
+ "role": "consensus"
558
+ }
559
+ all_messages.append(final_message)
560
+
561
+ log_discussion_event('message',
562
+ speaker="Consilium",
563
+ content=consensus_result,
564
+ confidence=avg_confidence)
565
+
566
+ # Final state - show bubbles for all who responded
567
+ responded_speakers = list(set(msg["speaker"] for msg in all_messages if msg.get("speaker")))
568
+
569
+ self.update_visual_state({
570
+ "participants": participant_names,
571
+ "messages": all_messages,
572
+ "currentSpeaker": None,
573
+ "thinking": [],
574
+ "showBubbles": responded_speakers
575
+ })
576
+
577
+ log_discussion_event('phase', content="โœ… Discussion Complete")
578
+
579
+ return consensus_result # Return the actual analysis, including disagreements
580
+
581
+ # Global state for the visual component
582
+ current_roundtable_state = {
583
+ "participants": [],
584
+ "messages": [],
585
+ "currentSpeaker": None,
586
+ "thinking": [],
587
+ "showBubbles": []
588
+ }
589
+
590
+ def update_roundtable_state(new_state):
591
+ """Update the global roundtable state"""
592
+ global current_roundtable_state
593
+ current_roundtable_state.update(new_state)
594
+ return json.dumps(current_roundtable_state)
595
+
596
+ # Global variables for step-by-step control
597
+ step_pause_queue = queue.Queue()
598
+ step_continue_event = threading.Event()
599
+
600
+ def run_consensus_discussion(question: str, discussion_rounds: int = 3,
601
+ decision_protocol: str = "consensus", role_assignment: str = "balanced",
602
+ topology: str = "full_mesh", moderator_model: str = "mistral",
603
+ enable_step_by_step: bool = False):
604
+ """Main function that returns both text log and updates visual state"""
605
+
606
+ global discussion_log, final_answer, step_by_step_active, step_continue_event
607
+ discussion_log = [] # Reset log
608
+ final_answer = ""
609
+ step_by_step_active = enable_step_by_step
610
+ step_continue_event.clear()
611
+
612
+ def visual_update_callback(state_update):
613
+ """Callback to update visual state during discussion"""
614
+ update_roundtable_state(state_update)
615
+
616
+ engine = VisualConsensusEngine(moderator_model, visual_update_callback)
617
+ result = engine.run_visual_consensus(
618
+ question, discussion_rounds, decision_protocol,
619
+ role_assignment, topology, moderator_model, enable_step_by_step
620
+ )
621
+
622
+ # Generate final answer summary
623
+ available_models = [model for model, info in engine.models.items() if info['available']]
624
+ final_answer = f"""## ๐ŸŽฏ Final Consensus Answer
625
+
626
+ {result}
627
+
628
+ ---
629
+
630
+ ### ๐Ÿ“Š Discussion Summary
631
+ - **Question:** {question}
632
+ - **Protocol:** {decision_protocol.replace('_', ' ').title()}
633
+ - **Participants:** {len(available_models)} AI models
634
+ - **Roles:** {role_assignment.title()}
635
+ - **Communication:** {topology.replace('_', ' ').title()}
636
+ - **Rounds:** {discussion_rounds}
637
+
638
+ *Generated by Consilium Visual AI Consensus Platform*"""
639
+
640
+ step_by_step_active = False # Reset after discussion
641
+
642
+ # Return ONLY status for the status field, not the full result
643
+ status_text = "โœ… Discussion Complete - See results below"
644
+ return status_text, json.dumps(current_roundtable_state), final_answer, format_discussion_log()
645
+
646
+ def continue_step():
647
+ """Function called by the Next Step button"""
648
+ global step_continue_event
649
+ step_continue_event.set()
650
+ return "โœ… Continuing... Next AI will respond shortly"
651
+
652
+ # Global variables for step-by-step control
653
+ discussion_log = []
654
+ final_answer = ""
655
+ step_by_step_active = False
656
+ current_step_data = {}
657
+ step_callback = None
658
+
659
+ def set_step_callback(callback):
660
+ """Set the callback for step-by-step mode"""
661
+ global step_callback
662
+ step_callback = callback
663
+
664
+ def wait_for_next_step():
665
+ """Wait for user to click 'Next Step' button in step-by-step mode"""
666
+ global step_by_step_active
667
+ if step_by_step_active and step_callback:
668
+ # Return control to UI - the next step button will continue
669
+ return True
670
+ return False
671
+
672
+ def format_discussion_log():
673
+ """Format the complete discussion log for display"""
674
+ if not discussion_log:
675
+ return "No discussion log available yet."
676
+
677
+ formatted_log = "# ๐ŸŽญ Complete Discussion Log\n\n"
678
+
679
+ for entry in discussion_log:
680
+ timestamp = entry.get('timestamp', datetime.now().strftime('%H:%M:%S'))
681
+ if entry['type'] == 'thinking':
682
+ formatted_log += f"**{timestamp}** ๐Ÿค” **{entry['speaker']}** is thinking...\n\n"
683
+ elif entry['type'] == 'speaking':
684
+ formatted_log += f"**{timestamp}** ๐Ÿ’ฌ **{entry['speaker']}** is responding...\n\n"
685
+ elif entry['type'] == 'message':
686
+ formatted_log += f"**{timestamp}** โœ… **{entry['speaker']}** ({entry.get('role', 'standard')}):\n"
687
+ formatted_log += f"> {entry['content']}\n"
688
+ if 'confidence' in entry:
689
+ formatted_log += f"*Confidence: {entry['confidence']}/10*\n\n"
690
+ else:
691
+ formatted_log += "\n"
692
+ elif entry['type'] == 'phase':
693
+ formatted_log += f"\n---\n## {entry['content']}\n---\n\n"
694
+
695
+ return formatted_log
696
+
697
+ def log_discussion_event(event_type: str, speaker: str = "", content: str = "", **kwargs):
698
+ """Add an event to the discussion log"""
699
+ global discussion_log
700
+ discussion_log.append({
701
+ 'type': event_type,
702
+ 'speaker': speaker,
703
+ 'content': content,
704
+ 'timestamp': datetime.now().strftime('%H:%M:%S'),
705
+ **kwargs
706
+ })
707
+
708
+ def update_api_keys(mistral_key, sambanova_key):
709
+ """Update API keys from UI input"""
710
+ global MISTRAL_API_KEY, SAMBANOVA_API_KEY
711
+
712
+ status_messages = []
713
+
714
+ # Update Mistral key if provided, otherwise keep env var
715
+ if mistral_key.strip():
716
+ MISTRAL_API_KEY = mistral_key.strip()
717
+ status_messages.append("โœ… Mistral API key updated")
718
+ elif not MISTRAL_API_KEY:
719
+ status_messages.append("โŒ No Mistral API key (env or input)")
720
+ else:
721
+ status_messages.append("โœ… Using Mistral API key from environment")
722
+
723
+ # Update SambaNova key if provided, otherwise keep env var
724
+ if sambanova_key.strip():
725
+ SAMBANOVA_API_KEY = sambanova_key.strip()
726
+ status_messages.append("โœ… SambaNova API key updated")
727
+ elif not SAMBANOVA_API_KEY:
728
+ status_messages.append("โŒ No SambaNova API key (env or input)")
729
+ else:
730
+ status_messages.append("โœ… Using SambaNova API key from environment")
731
+
732
+ # Check if we have at least one working key
733
+ if not MISTRAL_API_KEY and not SAMBANOVA_API_KEY:
734
+ return "โŒ ERROR: No API keys available! Please provide at least one API key."
735
+
736
+ return " | ".join(status_messages)
737
+
738
+ def check_model_status():
739
+ """Check and display current model availability"""
740
+ global MISTRAL_API_KEY, SAMBANOVA_API_KEY
741
+
742
+ status_info = "## ๐Ÿ” Model Availability Status\n\n"
743
+
744
+ models = {
745
+ 'Mistral Large': MISTRAL_API_KEY,
746
+ 'DeepSeek-R1': SAMBANOVA_API_KEY,
747
+ 'Meta-Llama-3.1-8B': SAMBANOVA_API_KEY,
748
+ 'QwQ-32B': SAMBANOVA_API_KEY,
749
+ 'Web Search Agent': True
750
+ }
751
+
752
+ for model_name, available in models.items():
753
+ if model_name == 'Web Search Agent':
754
+ status = "โœ… Available (Built-in)"
755
+ else:
756
+ status = "โœ… Available" if available else "โŒ Not configured"
757
+ status_info += f"**{model_name}:** {status}\n\n"
758
+
759
+ return status_info
760
+
761
+ # Create the hybrid interface
762
+ with gr.Blocks(title="๐ŸŽญ Consilium: Visual AI Consensus Platform", theme=gr.themes.Soft()) as demo:
763
+ gr.Markdown("""
764
+ # ๐ŸŽญ Consilium: Visual AI Consensus Platform
765
+
766
+ **Watch AI models collaborate in real-time around a visual roundtable!**
767
+
768
+ This platform combines:
769
+ - ๐ŸŽจ **Visual Roundtable Interface** - See AI avatars thinking and speaking
770
+ - ๐Ÿค– **Multi-Model Consensus** - Mistral, Deepseek, Llama, QwQ
771
+ - ๐ŸŽญ **Dynamic Role Assignment** - Devil's advocate, fact checker, synthesizer roles
772
+ - ๐ŸŒ **Communication Topologies** - Full mesh, star, ring patterns
773
+ - ๐Ÿ—ณ๏ธ **Decision Protocols** - Consensus, voting, weighted, ranked choice
774
+ - ๐Ÿ” **Web Search Integration** - Real-time information gathering
775
+
776
+ **Perfect for:** Complex decisions, research analysis, creative brainstorming, problem-solving
777
+ """)
778
+
779
+ with gr.Tab("๐ŸŽญ Visual Consensus Discussion"):
780
+ with gr.Row():
781
+ with gr.Column(scale=1):
782
+ question_input = gr.Textbox(
783
+ label="Discussion Question",
784
+ placeholder="What would you like the AI council to discuss and decide?",
785
+ lines=3,
786
+ value="What are the most effective strategies for combating climate change?"
787
+ )
788
+
789
+ with gr.Row():
790
+ decision_protocol = gr.Dropdown(
791
+ choices=["consensus", "majority_voting", "weighted_voting", "ranked_choice", "unanimity"],
792
+ value="consensus",
793
+ label="๐Ÿ—ณ๏ธ Decision Protocol"
794
+ )
795
+
796
+ role_assignment = gr.Dropdown(
797
+ choices=["balanced", "specialized", "adversarial", "none"],
798
+ value="balanced",
799
+ label="๐ŸŽญ Role Assignment"
800
+ )
801
+
802
+ with gr.Row():
803
+ topology = gr.Dropdown(
804
+ choices=["full_mesh", "star", "ring"],
805
+ value="full_mesh",
806
+ label="๐ŸŒ Communication Pattern"
807
+ )
808
+
809
+ moderator_model = gr.Dropdown(
810
+ choices=["mistral", "sambanova_deepseek", "sambanova_llama", "sambanova_qwq"],
811
+ value="mistral",
812
+ label="๐Ÿ‘จโ€โš–๏ธ Moderator"
813
+ )
814
+
815
+ rounds_input = gr.Slider(
816
+ minimum=1, maximum=5, value=2, step=1,
817
+ label="๐Ÿ”„ Discussion Rounds"
818
+ )
819
+
820
+ enable_clickthrough = gr.Checkbox(
821
+ label="โฏ๏ธ Enable Step-by-Step Mode",
822
+ value=False,
823
+ info="Pause at each step for manual control"
824
+ )
825
+
826
+ start_btn = gr.Button("๐Ÿš€ Start Visual Consensus Discussion", variant="primary", size="lg")
827
+
828
+ # Step-by-step control button (only visible when step mode is active)
829
+ next_step_btn = gr.Button("โฏ๏ธ Next Step", variant="secondary", size="lg", visible=False)
830
+ step_status = gr.Textbox(label="Step Control", visible=False, interactive=False)
831
+
832
+ status_output = gr.Textbox(label="๐Ÿ“Š Discussion Status", interactive=False)
833
+
834
+ with gr.Column(scale=2):
835
+ # The visual roundtable component
836
+ roundtable = consilium_roundtable(
837
+ label="๐ŸŽญ AI Consensus Roundtable",
838
+ value=json.dumps(current_roundtable_state)
839
+ )
840
+
841
+ # Final answer section
842
+ with gr.Row():
843
+ final_answer_output = gr.Markdown(
844
+ label="๐ŸŽฏ Final Consensus Answer",
845
+ value="*Discussion results will appear here...*"
846
+ )
847
+
848
+ # Collapsible discussion log
849
+ with gr.Accordion("๐Ÿ“‹ Complete Discussion Log", open=False):
850
+ discussion_log_output = gr.Markdown(
851
+ value="*Complete discussion transcript will appear here...*"
852
+ )
853
+
854
+ # Event handlers
855
+ def on_start_discussion(*args):
856
+ # Start discussion immediately for both modes
857
+ enable_step = args[-1] # Last argument is enable_step_by_step
858
+
859
+ if enable_step:
860
+ # Step-by-step mode: Start discussion in background thread
861
+ def run_discussion():
862
+ run_consensus_discussion(*args)
863
+
864
+ discussion_thread = threading.Thread(target=run_discussion)
865
+ discussion_thread.daemon = True
866
+ discussion_thread.start()
867
+
868
+ return (
869
+ "๐ŸŽฌ Step-by-step mode: Discussion started - will pause after each AI response",
870
+ json.dumps(current_roundtable_state),
871
+ "*Discussion starting in step-by-step mode...*",
872
+ "*Discussion log will appear here...*",
873
+ gr.update(visible=True), # Show next step button
874
+ gr.update(visible=True, value="Discussion running - will pause after first AI response") # Show step status
875
+ )
876
+ else:
877
+ # Normal mode - start immediately and hide step controls
878
+ result = run_consensus_discussion(*args)
879
+ return result + (gr.update(visible=False), gr.update(visible=False))
880
+
881
+ # Function to toggle step controls visibility
882
+ def toggle_step_controls(enable_step):
883
+ return (
884
+ gr.update(visible=enable_step), # next_step_btn
885
+ gr.update(visible=enable_step) # step_status
886
+ )
887
+
888
+ # Hide/show step controls when checkbox changes
889
+ enable_clickthrough.change(
890
+ toggle_step_controls,
891
+ inputs=[enable_clickthrough],
892
+ outputs=[next_step_btn, step_status]
893
+ )
894
+
895
+ start_btn.click(
896
+ on_start_discussion,
897
+ inputs=[question_input, rounds_input, decision_protocol, role_assignment, topology, moderator_model, enable_clickthrough],
898
+ outputs=[status_output, roundtable, final_answer_output, discussion_log_output, next_step_btn, step_status]
899
+ )
900
+
901
+ # Next step button handler
902
+ next_step_btn.click(
903
+ continue_step,
904
+ outputs=[step_status]
905
+ )
906
+
907
+ # Auto-refresh the roundtable state every 2 seconds during discussion
908
+ gr.Timer(2).tick(lambda: json.dumps(current_roundtable_state), outputs=[roundtable])
909
+
910
+ with gr.Tab("๐Ÿ”ง Configuration & Setup"):
911
+ gr.Markdown("## ๐Ÿ”‘ API Keys Configuration")
912
+ gr.Markdown("*Enter your API keys below OR set them as environment variables*")
913
+
914
+ with gr.Row():
915
+ with gr.Column():
916
+ mistral_key_input = gr.Textbox(
917
+ label="Mistral API Key",
918
+ placeholder="Enter your Mistral API key...",
919
+ type="password",
920
+ info="Required for Mistral Large model"
921
+ )
922
+ sambanova_key_input = gr.Textbox(
923
+ label="SambaNova API Key",
924
+ placeholder="Enter your SambaNova API key...",
925
+ type="password",
926
+ info="Required for DeepSeek, Llama, and QwQ models"
927
+ )
928
+
929
+ with gr.Column():
930
+ # Add a button to save/update keys
931
+ save_keys_btn = gr.Button("๐Ÿ’พ Save API Keys", variant="secondary")
932
+ keys_status = gr.Textbox(
933
+ label="Keys Status",
934
+ value="No API keys configured - using environment variables if available",
935
+ interactive=False
936
+ )
937
+
938
+ # Connect the save button
939
+ save_keys_btn.click(
940
+ update_api_keys,
941
+ inputs=[mistral_key_input, sambanova_key_input],
942
+ outputs=[keys_status]
943
+ )
944
+
945
+ model_status_display = gr.Markdown(check_model_status())
946
+
947
+ # Add refresh button for model status
948
+ refresh_status_btn = gr.Button("๐Ÿ”„ Refresh Model Status")
949
+ refresh_status_btn.click(
950
+ check_model_status,
951
+ outputs=[model_status_display]
952
+ )
953
+
954
+ gr.Markdown("""
955
+ ## ๐Ÿ› ๏ธ Setup Instructions
956
+
957
+ ### ๐Ÿš€ Quick Start (Recommended)
958
+ 1. **Enter API keys above** (they'll be used for this session)
959
+ 2. **Click "Save API Keys"**
960
+ 3. **Start a discussion!**
961
+
962
+ ### ๐Ÿ”‘ Get API Keys:
963
+ - **Mistral:** [console.mistral.ai](https://console.mistral.ai)
964
+ - **SambaNova:** [cloud.sambanova.ai](https://cloud.sambanova.ai)
965
+
966
+ ### ๐ŸŒ Alternative: Environment Variables
967
+ ```bash
968
+ export MISTRAL_API_KEY=your_key_here
969
+ export SAMBANOVA_API_KEY=your_key_here
970
+ export MODERATOR_MODEL=mistral
971
+ ```
972
+
973
+ ### ๐Ÿฆ™ Sambanova Integration
974
+ The platform includes **3 Sambanova models**:
975
+ - **DeepSeek-R1**: Advanced reasoning model
976
+ - **Meta-Llama-3.1-8B**: Fast, efficient discussions
977
+ - **QwQ-32B**: Large-scale consensus analysis
978
+
979
+ ### ๐Ÿ” Web Search Agent
980
+ Built-in agent using **smolagents** with:
981
+ - **DuckDuckGoSearchTool**: Web searches
982
+ - **VisitWebpageTool**: Deep content analysis
983
+ - **WikipediaTool**: Comprehensive research
984
+ - **TinyLlama**: Fast inference for search synthesis
985
+
986
+ ### ๐Ÿ“‹ Dependencies
987
+ ```bash
988
+ pip install gradio requests python-dotenv smolagents gradio-consilium-roundtable wikipedia openai
989
+ ```
990
+
991
+ ### ๐Ÿ”— MCP Integration
992
+ Add to your Claude Desktop config:
993
+ ```json
994
+ {
995
+ "mcpServers": {
996
+ "consilium": {
997
+ "command": "npx",
998
+ "args": ["mcp-remote", "http://localhost:7860/gradio_api/mcp/sse"]
999
+ }
1000
+ }
1001
+ }
1002
+ ```
1003
+ """)
1004
+
1005
+ with gr.Tab("๐Ÿ“š Usage Examples"):
1006
+ gr.Markdown("""
1007
+ ## ๐ŸŽฏ Example Discussion Topics
1008
+
1009
+ ### ๐Ÿง  Complex Problem Solving
1010
+ - "How should we approach the global housing crisis?"
1011
+ - "What's the best strategy for reducing plastic pollution?"
1012
+ - "How can we make AI development more democratic?"
1013
+
1014
+ ### ๐Ÿ’ผ Business Strategy
1015
+ - "Should our company invest in quantum computing research?"
1016
+ - "What's the optimal remote work policy for productivity?"
1017
+ - "How should startups approach AI integration?"
1018
+
1019
+ ### ๐Ÿ”ฌ Technical Analysis
1020
+ - "What's the future of web development frameworks?"
1021
+ - "How should we handle data privacy in the age of AI?"
1022
+ - "What are the best practices for microservices architecture?"
1023
+
1024
+ ### ๐ŸŒ Social Issues
1025
+ - "How can we bridge political divides in society?"
1026
+ - "What's the most effective approach to education reform?"
1027
+ - "How should we regulate social media platforms?"
1028
+
1029
+ ## ๐ŸŽญ Visual Features
1030
+
1031
+ **Watch for these visual cues:**
1032
+ - ๐Ÿค” **Orange pulsing avatars** = AI is thinking
1033
+ - โœจ **Gold glowing avatars** = AI is responding
1034
+ - ๐Ÿ’ฌ **Speech bubbles** = Click avatars to see messages
1035
+ - ๐ŸŽฏ **Center consensus** = Final decision reached
1036
+
1037
+ **The roundtable updates in real-time as the discussion progresses!**
1038
+
1039
+ ## ๐ŸŽฎ Role Assignments Explained
1040
+
1041
+ ### ๐ŸŽญ Balanced (Recommended)
1042
+ - **Devil's Advocate**: Challenges assumptions
1043
+ - **Fact Checker**: Verifies claims and accuracy
1044
+ - **Synthesizer**: Finds common ground
1045
+ - **Standard**: Provides balanced analysis
1046
+
1047
+ ### ๐ŸŽ“ Specialized
1048
+ - **Domain Expert**: Technical expertise
1049
+ - **Fact Checker**: Accuracy verification
1050
+ - **Creative Thinker**: Innovative solutions
1051
+ - **Synthesizer**: Bridge building
1052
+
1053
+ ### โš”๏ธ Adversarial
1054
+ - **Double Devil's Advocate**: Maximum challenge
1055
+ - **Standard**: Balanced counter-perspective
1056
+
1057
+ ## ๐Ÿ—ณ๏ธ Decision Protocols
1058
+
1059
+ - **Consensus**: Seek agreement among all participants
1060
+ - **Majority Voting**: Most popular position wins
1061
+ - **Weighted Voting**: Higher confidence scores matter more
1062
+ - **Ranked Choice**: Preference-based selection
1063
+ - **Unanimity**: All must agree completely
1064
+ """)
1065
+
1066
+ # Launch configuration
1067
+ if __name__ == "__main__":
1068
+ demo.launch(
1069
+ server_name="0.0.0.0",
1070
+ server_port=7860,
1071
+ share=False,
1072
+ debug=False,
1073
+ mcp_server=True
1074
+ )
requirements.txt ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ gradio>=5.0.0
2
+ gradio[mcp]
3
+ smolagents
4
+ markdownify
5
+ requests
6
+ python-dotenv
7
+ duckduckgo-search
8
+ wikipedia-api
9
+ gradio-consilium-roundtable
10
+ openai