azettl commited on
Commit
b3f93dc
ยท
1 Parent(s): 46fb991
Files changed (1) hide show
  1. app.py +446 -239
app.py CHANGED
@@ -11,17 +11,22 @@ import re
11
  from collections import Counter
12
  import threading
13
  import queue
 
14
  from gradio_consilium_roundtable import consilium_roundtable
15
  from smolagents import CodeAgent, DuckDuckGoSearchTool, FinalAnswerTool, InferenceClientModel, VisitWebpageTool, Tool
16
 
17
  # Load environment variables
18
  load_dotenv()
19
 
20
- # API Configuration
21
  MISTRAL_API_KEY = os.getenv("MISTRAL_API_KEY")
22
  SAMBANOVA_API_KEY = os.getenv("SAMBANOVA_API_KEY")
 
23
  MODERATOR_MODEL = os.getenv("MODERATOR_MODEL", "mistral")
24
 
 
 
 
25
  class WikipediaTool(Tool):
26
  name = "wikipedia_search"
27
  description = "Search Wikipedia for comprehensive information on any topic"
@@ -46,35 +51,32 @@ class WikipediaTool(Tool):
46
 
47
  class WebSearchAgent:
48
  def __init__(self):
49
- self.agent = CodeAgent(
50
- tools=[
51
- DuckDuckGoSearchTool(),
52
- VisitWebpageTool(),
53
- WikipediaTool(),
54
- FinalAnswerTool()
55
- ],
56
- model=InferenceClientModel(),
57
- max_steps=5,
58
- verbosity_level=1
59
- )
 
 
 
 
 
60
 
61
  def search(self, query: str, max_results: int = 5) -> str:
62
  """Use the CodeAgent to perform comprehensive web search and analysis"""
 
 
 
63
  try:
64
- # Create a detailed prompt for the agent
65
- agent_prompt = f"""You are a web research agent. Please research the following query comprehensively:
66
-
67
- "{query}"
68
-
69
- Your task:
70
- 1. Search for relevant information using DuckDuckGo or Wikipedia
71
- 2. Visit the most promising web pages to get detailed information
72
- 3. Synthesize the findings into a comprehensive, well-formatted response
73
- 4. Include sources and links where appropriate
74
- 5. Format your response with markdown for better readability
75
-
76
- Please provide a thorough analysis based on current, reliable information."""
77
-
78
  # Run the agent
79
  result = self.agent.run(agent_prompt)
80
 
@@ -82,38 +84,115 @@ Please provide a thorough analysis based on current, reliable information."""
82
  if result:
83
  return f"๐Ÿ” **Web Research Results for:** {query}\n\n{result}"
84
  else:
85
- return f"๐Ÿ” **Web Search for:** {query}\n\nNo results found or agent encountered an error."
86
 
87
  except Exception as e:
88
  # Fallback to simple error message
89
- return f"๐Ÿ” **Web Search Error for:** {query}\n\nError: {str(e)}\n\nThe search agent encountered an issue. Please try again or rephrase your query."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
90
 
91
  class VisualConsensusEngine:
92
- def __init__(self, moderator_model: str = None, update_callback=None):
93
  self.moderator_model = moderator_model or MODERATOR_MODEL
94
  self.search_agent = WebSearchAgent()
95
- self.update_callback = update_callback # For real-time updates
 
 
 
 
 
 
 
 
 
96
 
97
  self.models = {
98
  'mistral': {
99
  'name': 'Mistral Large',
100
- 'api_key': MISTRAL_API_KEY,
101
- 'available': bool(MISTRAL_API_KEY)
102
  },
103
  'sambanova_deepseek': {
104
  'name': 'DeepSeek-R1',
105
- 'api_key': SAMBANOVA_API_KEY,
106
- 'available': bool(SAMBANOVA_API_KEY)
107
  },
108
  'sambanova_llama': {
109
  'name': 'Meta-Llama-3.1-8B',
110
- 'api_key': SAMBANOVA_API_KEY,
111
- 'available': bool(SAMBANOVA_API_KEY)
112
  },
113
  'sambanova_qwq': {
114
  'name': 'QwQ-32B',
115
- 'api_key': SAMBANOVA_API_KEY,
116
- 'available': bool(SAMBANOVA_API_KEY)
117
  },
118
  'search': {
119
  'name': 'Web Search Agent',
@@ -122,6 +201,13 @@ class VisualConsensusEngine:
122
  }
123
  }
124
 
 
 
 
 
 
 
 
125
  # Role definitions
126
  self.roles = {
127
  'standard': "You are participating in a collaborative AI discussion. Provide thoughtful, balanced analysis.",
@@ -133,12 +219,12 @@ class VisualConsensusEngine:
133
  }
134
 
135
  def update_visual_state(self, state_update: Dict[str, Any]):
136
- """Update the visual roundtable state"""
137
  if self.update_callback:
138
  self.update_callback(state_update)
139
 
140
  def call_model(self, model: str, prompt: str, context: str = "") -> Optional[str]:
141
- """Generic model calling function"""
142
  if model == 'search':
143
  search_query = self._extract_search_query(prompt)
144
  return self.search_agent.search(search_query)
@@ -171,7 +257,8 @@ class VisualConsensusEngine:
171
  return prompt[:100]
172
 
173
  def _call_sambanova(self, model: str, prompt: str) -> Optional[str]:
174
- if not SAMBANOVA_API_KEY:
 
175
  return None
176
 
177
  try:
@@ -179,7 +266,7 @@ class VisualConsensusEngine:
179
 
180
  client = OpenAI(
181
  base_url="https://api.sambanova.ai/v1",
182
- api_key=SAMBANOVA_API_KEY
183
  )
184
 
185
  model_mapping = {
@@ -206,7 +293,8 @@ class VisualConsensusEngine:
206
  return None
207
 
208
  def _call_mistral(self, prompt: str) -> Optional[str]:
209
- if not MISTRAL_API_KEY:
 
210
  return None
211
 
212
  try:
@@ -214,7 +302,7 @@ class VisualConsensusEngine:
214
 
215
  client = OpenAI(
216
  base_url="https://api.mistral.ai/v1",
217
- api_key=MISTRAL_API_KEY
218
  )
219
 
220
  completion = client.chat.completions.create(
@@ -264,11 +352,11 @@ class VisualConsensusEngine:
264
  pass
265
  return 5.0
266
 
267
- def run_visual_consensus(self, question: str, discussion_rounds: int = 3,
268
- decision_protocol: str = "consensus", role_assignment: str = "balanced",
269
- topology: str = "full_mesh", moderator_model: str = "mistral",
270
- enable_step_by_step: bool = False):
271
- """Run consensus with visual updates"""
272
 
273
  available_models = [model for model, info in self.models.items() if info['available']]
274
  if not available_models:
@@ -277,9 +365,14 @@ class VisualConsensusEngine:
277
  model_roles = self.assign_roles(available_models, role_assignment)
278
  participant_names = [self.models[model]['name'] for model in available_models]
279
 
 
 
 
 
 
280
  # Log the start
281
- log_discussion_event('phase', content=f"๐Ÿš€ Starting Discussion: {question}")
282
- log_discussion_event('phase', content=f"๐Ÿ“Š Configuration: {len(available_models)} models, {decision_protocol} protocol, {role_assignment} roles")
283
 
284
  # Initialize visual state
285
  self.update_visual_state({
@@ -293,11 +386,11 @@ class VisualConsensusEngine:
293
  all_messages = []
294
 
295
  # Phase 1: Initial responses
296
- log_discussion_event('phase', content="๐Ÿ“ Phase 1: Initial Responses")
297
 
298
  for model in available_models:
299
  # Log and set thinking state
300
- log_discussion_event('thinking', speaker=self.models[model]['name'])
301
  self.update_visual_state({
302
  "participants": participant_names,
303
  "messages": all_messages,
@@ -305,7 +398,6 @@ class VisualConsensusEngine:
305
  "thinking": [self.models[model]['name']]
306
  })
307
 
308
- # No pause before thinking - let AI think immediately
309
  if not enable_step_by_step:
310
  time.sleep(1)
311
 
@@ -325,7 +417,7 @@ Your response should include:
325
  4. END YOUR RESPONSE WITH: "Confidence: X/10" where X is your confidence level"""
326
 
327
  # Log and set speaking state
328
- log_discussion_event('speaking', speaker=self.models[model]['name'])
329
  self.update_visual_state({
330
  "participants": participant_names,
331
  "messages": all_messages,
@@ -333,7 +425,6 @@ Your response should include:
333
  "thinking": []
334
  })
335
 
336
- # No pause before speaking - let AI respond immediately
337
  if not enable_step_by_step:
338
  time.sleep(2)
339
 
@@ -343,20 +434,20 @@ Your response should include:
343
  confidence = self._extract_confidence(response)
344
  message = {
345
  "speaker": self.models[model]['name'],
346
- "text": response, # CHANGE: Don't truncate the response
347
  "confidence": confidence,
348
  "role": role
349
  }
350
  all_messages.append(message)
351
 
352
  # Log the full response
353
- log_discussion_event('message',
354
- speaker=self.models[model]['name'],
355
- content=response,
356
- role=role,
357
- confidence=confidence)
358
 
359
- # Update with new message - add to showBubbles so bubble stays visible
360
  responded_speakers = list(set(msg["speaker"] for msg in all_messages if msg.get("speaker")))
361
 
362
  self.update_visual_state({
@@ -364,26 +455,25 @@ Your response should include:
364
  "messages": all_messages,
365
  "currentSpeaker": None,
366
  "thinking": [],
367
- "showBubbles": responded_speakers # Keep bubbles visible for all who responded
368
  })
369
 
370
- # PAUSE AFTER AI RESPONSE - this is when user can read the response
371
  if enable_step_by_step:
372
- step_continue_event.clear()
373
- step_continue_event.wait() # Wait for user to click Next Step
 
374
  else:
375
  time.sleep(0.5)
376
 
377
  # Phase 2: Discussion rounds
378
  if discussion_rounds > 0:
379
- log_discussion_event('phase', content=f"๐Ÿ’ฌ Phase 2: Discussion Rounds ({discussion_rounds} rounds)")
380
 
381
  for round_num in range(discussion_rounds):
382
- log_discussion_event('phase', content=f"๐Ÿ”„ Discussion Round {round_num + 1}")
383
 
384
  for model in available_models:
385
- # Log and set thinking state
386
- log_discussion_event('thinking', speaker=self.models[model]['name'])
387
  self.update_visual_state({
388
  "participants": participant_names,
389
  "messages": all_messages,
@@ -391,7 +481,6 @@ Your response should include:
391
  "thinking": [self.models[model]['name']]
392
  })
393
 
394
- # No pause before thinking
395
  if not enable_step_by_step:
396
  time.sleep(1)
397
 
@@ -411,8 +500,7 @@ Other models' current responses:
411
  Please provide your updated analysis considering the discussion so far.
412
  END WITH: "Confidence: X/10" """
413
 
414
- # Log and set speaking state
415
- log_discussion_event('speaking', speaker=self.models[model]['name'])
416
  self.update_visual_state({
417
  "participants": participant_names,
418
  "messages": all_messages,
@@ -420,7 +508,6 @@ END WITH: "Confidence: X/10" """
420
  "thinking": []
421
  })
422
 
423
- # No pause before speaking
424
  if not enable_step_by_step:
425
  time.sleep(2)
426
 
@@ -430,20 +517,18 @@ END WITH: "Confidence: X/10" """
430
  confidence = self._extract_confidence(response)
431
  message = {
432
  "speaker": self.models[model]['name'],
433
- "text": f"Round {round_num + 1}: {response}", # CHANGE: Don't truncate
434
  "confidence": confidence,
435
  "role": model_roles[model]
436
  }
437
  all_messages.append(message)
438
 
439
- # Log the full response
440
- log_discussion_event('message',
441
- speaker=self.models[model]['name'],
442
- content=f"Round {round_num + 1}: {response}",
443
- role=model_roles[model],
444
- confidence=confidence)
445
 
446
- # Update with new message - add to showBubbles so bubble stays visible
447
  responded_speakers = list(set(msg["speaker"] for msg in all_messages if msg.get("speaker")))
448
 
449
  self.update_visual_state({
@@ -451,46 +536,45 @@ END WITH: "Confidence: X/10" """
451
  "messages": all_messages,
452
  "currentSpeaker": None,
453
  "thinking": [],
454
- "showBubbles": responded_speakers # Keep bubbles visible for all who responded
455
  })
456
 
457
- # PAUSE AFTER AI RESPONSE for step-by-step mode
458
  if enable_step_by_step:
459
- step_continue_event.clear()
460
- step_continue_event.wait()
 
461
  else:
462
  time.sleep(1)
463
 
464
- # Phase 3: Final consensus - ACTUALLY GENERATE THE CONSENSUS
465
- log_discussion_event('phase', content=f"๐ŸŽฏ Phase 3: Final Consensus ({decision_protocol})")
466
- log_discussion_event('thinking', speaker="All participants", content="Building consensus...")
467
 
468
  self.update_visual_state({
469
  "participants": participant_names,
470
  "messages": all_messages,
471
  "currentSpeaker": None,
472
- "thinking": participant_names # Everyone thinking about consensus
473
  })
474
 
475
- # No pause before consensus generation
476
  if not enable_step_by_step:
477
  time.sleep(2)
478
 
479
- # ACTUALLY GENERATE THE FINAL CONSENSUS ANSWER
480
  moderator = self.moderator_model if self.models[self.moderator_model]['available'] else available_models[0]
481
 
482
- # Collect all the actual responses for synthesis
 
483
  all_responses = ""
484
  confidence_scores = []
485
- for entry in discussion_log:
486
  if entry['type'] == 'message' and entry['speaker'] != 'Consilium':
487
  all_responses += f"\n**{entry['speaker']}**: {entry['content']}\n"
488
  if 'confidence' in entry:
489
  confidence_scores.append(entry['confidence'])
490
 
491
- # Calculate average confidence to assess consensus likelihood
492
  avg_confidence = sum(confidence_scores) / len(confidence_scores) if confidence_scores else 5.0
493
- consensus_threshold = 7.0 # If average confidence is below this, flag potential disagreement
494
 
495
  consensus_prompt = f"""You are synthesizing the final result from this AI discussion.
496
 
@@ -516,7 +600,7 @@ Format your response as:
516
 
517
  **AREAS OF DISAGREEMENT:** [If any - explain the key points of contention]"""
518
 
519
- log_discussion_event('speaking', speaker="Consilium", content="Analyzing consensus and synthesizing final answer...")
520
  self.update_visual_state({
521
  "participants": participant_names,
522
  "messages": all_messages,
@@ -524,7 +608,6 @@ Format your response as:
524
  "thinking": []
525
  })
526
 
527
- # Generate the actual consensus analysis
528
  consensus_result = self.call_model(moderator, consensus_prompt)
529
 
530
  if not consensus_result:
@@ -534,10 +617,8 @@ Format your response as:
534
 
535
  **AREAS OF DISAGREEMENT:** Analysis could not be completed due to technical issues."""
536
 
537
- # Check if consensus was actually reached based on the response
538
  consensus_reached = "CONSENSUS STATUS: Reached" in consensus_result or avg_confidence >= consensus_threshold
539
 
540
- # Generate final consensus message for visual
541
  if consensus_reached:
542
  visual_summary = "โœ… Consensus reached!"
543
  elif "Partial" in consensus_result:
@@ -547,18 +628,17 @@ Format your response as:
547
 
548
  final_message = {
549
  "speaker": "Consilium",
550
- "text": f"{visual_summary} {consensus_result}", # CHANGE: Don't truncate consensus
551
  "confidence": avg_confidence,
552
  "role": "consensus"
553
  }
554
  all_messages.append(final_message)
555
 
556
- log_discussion_event('message',
557
- speaker="Consilium",
558
- content=consensus_result,
559
- confidence=avg_confidence)
560
 
561
- # Final state - show bubbles for all who responded
562
  responded_speakers = list(set(msg["speaker"] for msg in all_messages if msg.get("speaker")))
563
 
564
  self.update_visual_state({
@@ -569,54 +649,60 @@ Format your response as:
569
  "showBubbles": responded_speakers
570
  })
571
 
572
- log_discussion_event('phase', content="โœ… Discussion Complete")
573
-
574
- return consensus_result # Return the actual analysis, including disagreements
575
-
576
- # Global state for the visual component
577
- current_roundtable_state = {
578
- "participants": [],
579
- "messages": [],
580
- "currentSpeaker": None,
581
- "thinking": [],
582
- "showBubbles": []
583
- }
584
-
585
- def update_roundtable_state(new_state):
586
- """Update the global roundtable state"""
587
- global current_roundtable_state
588
- current_roundtable_state.update(new_state)
589
- return json.dumps(current_roundtable_state)
590
-
591
- # Global variables for step-by-step control
592
- step_pause_queue = queue.Queue()
593
- step_continue_event = threading.Event()
594
-
595
- def run_consensus_discussion(question: str, discussion_rounds: int = 3,
596
- decision_protocol: str = "consensus", role_assignment: str = "balanced",
597
- topology: str = "full_mesh", moderator_model: str = "mistral",
598
- enable_step_by_step: bool = False):
599
- """Main function that returns both text log and updates visual state"""
600
-
601
- global discussion_log, final_answer, step_by_step_active, step_continue_event
602
- discussion_log = [] # Reset log
603
- final_answer = ""
604
- step_by_step_active = enable_step_by_step
605
- step_continue_event.clear()
606
-
607
- def visual_update_callback(state_update):
608
- """Callback to update visual state during discussion"""
609
- update_roundtable_state(state_update)
610
-
611
- engine = VisualConsensusEngine(moderator_model, visual_update_callback)
612
- result = engine.run_visual_consensus(
 
 
 
 
 
613
  question, discussion_rounds, decision_protocol,
614
- role_assignment, topology, moderator_model, enable_step_by_step
 
615
  )
616
 
617
- # Generate final answer summary
618
  available_models = [model for model, info in engine.models.items() if info['available']]
619
- final_answer = f"""## ๐ŸŽฏ Final Consensus Answer
620
 
621
  {result}
622
 
@@ -627,45 +713,23 @@ def run_consensus_discussion(question: str, discussion_rounds: int = 3,
627
  - **Protocol:** {decision_protocol.replace('_', ' ').title()}
628
  - **Participants:** {len(available_models)} AI models
629
  - **Roles:** {role_assignment.title()}
630
- - **Communication:** {topology.replace('_', ' ').title()}
631
- - **Rounds:** {discussion_rounds}
632
 
633
  *Generated by Consilium Visual AI Consensus Platform*"""
634
 
635
- step_by_step_active = False # Reset after discussion
636
-
637
- # Return ONLY status for the status field, not the full result
638
- status_text = "โœ… Discussion Complete - See results below"
639
- return status_text, json.dumps(current_roundtable_state), final_answer, format_discussion_log()
640
-
641
- def continue_step():
642
- """Function called by the Next Step button"""
643
- global step_continue_event
644
- step_continue_event.set()
645
- return "โœ… Continuing... Next AI will respond shortly"
646
-
647
- # Global variables for step-by-step control
648
- discussion_log = []
649
- final_answer = ""
650
- step_by_step_active = False
651
- current_step_data = {}
652
- step_callback = None
653
-
654
- def set_step_callback(callback):
655
- """Set the callback for step-by-step mode"""
656
- global step_callback
657
- step_callback = callback
658
-
659
- def wait_for_next_step():
660
- """Wait for user to click 'Next Step' button in step-by-step mode"""
661
- global step_by_step_active
662
- if step_by_step_active and step_callback:
663
- # Return control to UI - the next step button will continue
664
- return True
665
- return False
666
-
667
- def format_discussion_log():
668
- """Format the complete discussion log for display"""
669
  if not discussion_log:
670
  return "No discussion log available yet."
671
 
@@ -689,16 +753,46 @@ def format_discussion_log():
689
 
690
  return formatted_log
691
 
692
- def log_discussion_event(event_type: str, speaker: str = "", content: str = "", **kwargs):
693
- """Add an event to the discussion log"""
694
- global discussion_log
695
- discussion_log.append({
696
- 'type': event_type,
697
- 'speaker': speaker,
698
- 'content': content,
699
- 'timestamp': datetime.now().strftime('%H:%M:%S'),
700
- **kwargs
701
- })
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
702
 
703
  # Create the hybrid interface
704
  with gr.Blocks(title="๐ŸŽญ Consilium: Visual AI Consensus Platform", theme=gr.themes.Soft()) as demo:
@@ -714,10 +808,14 @@ with gr.Blocks(title="๐ŸŽญ Consilium: Visual AI Consensus Platform", theme=gr.th
714
  - ๐ŸŒ **Communication Topologies** - Full mesh, star, ring patterns
715
  - ๐Ÿ—ณ๏ธ **Decision Protocols** - Consensus, voting, weighted, ranked choice
716
  - ๐Ÿ” **Web Search Integration** - Real-time information gathering
 
717
 
718
  **Perfect for:** Complex decisions, research analysis, creative brainstorming, problem-solving
719
  """)
720
 
 
 
 
721
  with gr.Tab("๐ŸŽญ Visual Consensus Discussion"):
722
  with gr.Row():
723
  with gr.Column(scale=1):
@@ -777,7 +875,13 @@ with gr.Blocks(title="๐ŸŽญ Consilium: Visual AI Consensus Platform", theme=gr.th
777
  # The visual roundtable component
778
  roundtable = consilium_roundtable(
779
  label="๐ŸŽญ AI Consensus Roundtable",
780
- value=json.dumps(current_roundtable_state)
 
 
 
 
 
 
781
  )
782
 
783
  # Final answer section
@@ -796,28 +900,33 @@ with gr.Blocks(title="๐ŸŽญ Consilium: Visual AI Consensus Platform", theme=gr.th
796
  # Event handlers
797
  def on_start_discussion(*args):
798
  # Start discussion immediately for both modes
799
- enable_step = args[-1] # Last argument is enable_step_by_step
 
800
 
801
  if enable_step:
802
  # Step-by-step mode: Start discussion in background thread
803
  def run_discussion():
804
- run_consensus_discussion(*args)
805
 
806
  discussion_thread = threading.Thread(target=run_discussion)
807
  discussion_thread.daemon = True
808
  discussion_thread.start()
809
 
 
 
 
810
  return (
811
  "๐ŸŽฌ Step-by-step mode: Discussion started - will pause after each AI response",
812
- json.dumps(current_roundtable_state),
813
  "*Discussion starting in step-by-step mode...*",
814
  "*Discussion log will appear here...*",
815
  gr.update(visible=True), # Show next step button
816
- gr.update(visible=True, value="Discussion running - will pause after first AI response") # Show step status
 
817
  )
818
  else:
819
  # Normal mode - start immediately and hide step controls
820
- result = run_consensus_discussion(*args)
821
  return result + (gr.update(visible=False), gr.update(visible=False))
822
 
823
  # Function to toggle step controls visibility
@@ -836,52 +945,122 @@ with gr.Blocks(title="๐ŸŽญ Consilium: Visual AI Consensus Platform", theme=gr.th
836
 
837
  start_btn.click(
838
  on_start_discussion,
839
- inputs=[question_input, rounds_input, decision_protocol, role_assignment, topology, moderator_model, enable_clickthrough],
840
- outputs=[status_output, roundtable, final_answer_output, discussion_log_output, next_step_btn, step_status]
841
  )
842
 
843
  # Next step button handler
844
  next_step_btn.click(
845
- continue_step,
 
846
  outputs=[step_status]
847
  )
848
 
849
  # Auto-refresh the roundtable state every 2 seconds during discussion
850
- gr.Timer(2).tick(lambda: json.dumps(current_roundtable_state), outputs=[roundtable])
 
 
 
 
 
 
 
 
 
 
 
 
851
 
852
  with gr.Tab("๐Ÿ”ง Configuration & Setup"):
853
- def check_model_status():
854
- engine = VisualConsensusEngine()
855
- status_info = "## ๐Ÿ” Model Availability Status\n\n"
856
-
857
- for model_id, model_info in engine.models.items():
858
- if model_id == 'search':
859
- status = "โœ… Available (Built-in)"
860
- else:
861
- status = "โœ… Available" if model_info['available'] else "โŒ Not configured"
862
- status_info += f"**{model_info['name']}:** {status}\n\n"
863
-
864
- return status_info
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
865
 
866
- gr.Markdown(check_model_status())
 
 
 
 
 
 
 
 
867
 
868
  gr.Markdown("""
869
  ## ๐Ÿ› ๏ธ Setup Instructions
870
 
871
- ### Environment Variables Setup:
 
 
 
 
 
 
 
 
 
 
872
  ```bash
873
- MISTRAL_API_KEY=...
874
- SAMBANOVA_API_KEY=...
875
- MODERATOR_MODEL=mistral
 
876
  ```
877
 
878
  ### ๐Ÿฆ™ Sambanova Integration
879
- The platform now includes **3 Sambanova models**:
880
  - **DeepSeek-R1**: Advanced reasoning model
881
  - **Meta-Llama-3.1-8B**: Fast, efficient discussions
882
  - **QwQ-32B**: Large-scale consensus analysis
883
 
884
- All using Sambanova's ultra-fast inference infrastructure!
 
 
 
 
 
 
 
 
 
 
885
 
886
  ### ๐Ÿ”— MCP Integration
887
  Add to your Claude Desktop config:
@@ -896,19 +1075,11 @@ with gr.Blocks(title="๐ŸŽญ Consilium: Visual AI Consensus Platform", theme=gr.th
896
  }
897
  ```
898
 
899
- ### ๐Ÿ“‹ Dependencies
900
- ```bash
901
- pip install gradio requests python-dotenv smolagents gradio-consilium-roundtable
902
- ```
903
-
904
- ### ๐Ÿค– Search Agent Configuration
905
- The Web Search Agent uses **smolagents** with:
906
- - **DuckDuckGoSearchTool**: Initial web searches
907
- - **VisitWebpageTool**: Deep dive into relevant pages
908
- - **FinalAnswerTool**: Synthesized comprehensive answers
909
- - **InferenceClientModel**: Powered by Hugging Face Inference API
910
-
911
- For optimal search results, ensure you have a stable internet connection.
912
  """)
913
 
914
  with gr.Tab("๐Ÿ“š Usage Examples"):
@@ -944,6 +1115,42 @@ with gr.Blocks(title="๐ŸŽญ Consilium: Visual AI Consensus Platform", theme=gr.th
944
  - ๐ŸŽฏ **Center consensus** = Final decision reached
945
 
946
  **The roundtable updates in real-time as the discussion progresses!**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
947
  """)
948
 
949
  # Launch configuration
 
11
  from collections import Counter
12
  import threading
13
  import queue
14
+ import uuid
15
  from gradio_consilium_roundtable import consilium_roundtable
16
  from smolagents import CodeAgent, DuckDuckGoSearchTool, FinalAnswerTool, InferenceClientModel, VisitWebpageTool, Tool
17
 
18
  # Load environment variables
19
  load_dotenv()
20
 
21
+ # API Configuration - These will be updated by UI if needed
22
  MISTRAL_API_KEY = os.getenv("MISTRAL_API_KEY")
23
  SAMBANOVA_API_KEY = os.getenv("SAMBANOVA_API_KEY")
24
+ HUGGINGFACE_API_TOKEN = os.getenv("HUGGINGFACE_API_TOKEN")
25
  MODERATOR_MODEL = os.getenv("MODERATOR_MODEL", "mistral")
26
 
27
+ # Session-based storage for isolated discussions
28
+ user_sessions: Dict[str, Dict] = {}
29
+
30
  class WikipediaTool(Tool):
31
  name = "wikipedia_search"
32
  description = "Search Wikipedia for comprehensive information on any topic"
 
51
 
52
  class WebSearchAgent:
53
  def __init__(self):
54
+ try:
55
+ # Use TinyLlama for faster inference
56
+ self.agent = CodeAgent(
57
+ tools=[
58
+ DuckDuckGoSearchTool(),
59
+ VisitWebpageTool(),
60
+ WikipediaTool(),
61
+ FinalAnswerTool()
62
+ ],
63
+ model=InferenceClientModel(model_id="TinyLlama/TinyLlama-1.1B-Chat-v1.0"),
64
+ max_steps=3,
65
+ verbosity_level=0
66
+ )
67
+ except Exception as e:
68
+ print(f"Warning: Could not initialize search agent: {e}")
69
+ self.agent = None
70
 
71
  def search(self, query: str, max_results: int = 5) -> str:
72
  """Use the CodeAgent to perform comprehensive web search and analysis"""
73
+ if not self.agent:
74
+ return f"๐Ÿ” **Web Search for:** {query}\n\nSearch agent not available. Please check dependencies."
75
+
76
  try:
77
+ # Simplified prompt for TinyLlama
78
+ agent_prompt = f"Search for information about: {query}"
79
+
 
 
 
 
 
 
 
 
 
 
 
80
  # Run the agent
81
  result = self.agent.run(agent_prompt)
82
 
 
84
  if result:
85
  return f"๐Ÿ” **Web Research Results for:** {query}\n\n{result}"
86
  else:
87
+ return f"๐Ÿ” **Web Search for:** {query}\n\nNo results found."
88
 
89
  except Exception as e:
90
  # Fallback to simple error message
91
+ return f"๐Ÿ” **Web Search Error for:** {query}\n\nError: {str(e)}\n\nPlease try again or rephrase your query."
92
+
93
+ def get_session_id(request: gr.Request = None) -> str:
94
+ """Generate or retrieve session ID"""
95
+ if request and hasattr(request, 'session_hash'):
96
+ return request.session_hash
97
+ return str(uuid.uuid4())
98
+
99
+ def get_or_create_session_state(session_id: str) -> Dict:
100
+ """Get or create isolated session state"""
101
+ if session_id not in user_sessions:
102
+ user_sessions[session_id] = {
103
+ "roundtable_state": {
104
+ "participants": [],
105
+ "messages": [],
106
+ "currentSpeaker": None,
107
+ "thinking": [],
108
+ "showBubbles": []
109
+ },
110
+ "discussion_log": [],
111
+ "final_answer": "",
112
+ "step_by_step_active": False,
113
+ "step_continue_event": threading.Event(),
114
+ "api_keys": {
115
+ "mistral": None,
116
+ "sambanova": None,
117
+ "huggingface": None
118
+ }
119
+ }
120
+ return user_sessions[session_id]
121
+
122
+ def update_session_api_keys(mistral_key, sambanova_key, huggingface_key, session_id_state, request: gr.Request = None):
123
+ """Update API keys for THIS SESSION ONLY"""
124
+ session_id = get_session_id(request) if not session_id_state else session_id_state
125
+ session = get_or_create_session_state(session_id)
126
+
127
+ status_messages = []
128
+
129
+ # Update keys for THIS SESSION
130
+ if mistral_key.strip():
131
+ session["api_keys"]["mistral"] = mistral_key.strip()
132
+ status_messages.append("โœ… Mistral API key saved for this session")
133
+ elif MISTRAL_API_KEY: # Fall back to env var
134
+ session["api_keys"]["mistral"] = MISTRAL_API_KEY
135
+ status_messages.append("โœ… Using Mistral API key from environment")
136
+ else:
137
+ status_messages.append("โŒ No Mistral API key available")
138
+
139
+ if sambanova_key.strip():
140
+ session["api_keys"]["sambanova"] = sambanova_key.strip()
141
+ status_messages.append("โœ… SambaNova API key saved for this session")
142
+ elif SAMBANOVA_API_KEY:
143
+ session["api_keys"]["sambanova"] = SAMBANOVA_API_KEY
144
+ status_messages.append("โœ… Using SambaNova API key from environment")
145
+ else:
146
+ status_messages.append("โŒ No SambaNova API key available")
147
+
148
+ if huggingface_key.strip():
149
+ session["api_keys"]["huggingface"] = huggingface_key.strip()
150
+ status_messages.append("โœ… Hugging Face token saved for this session")
151
+ # Update environment for search agent
152
+ os.environ["HUGGINGFACE_API_TOKEN"] = huggingface_key.strip()
153
+ elif HUGGINGFACE_API_TOKEN:
154
+ session["api_keys"]["huggingface"] = HUGGINGFACE_API_TOKEN
155
+ status_messages.append("โœ… Using Hugging Face token from environment")
156
+ else:
157
+ status_messages.append("โŒ No Hugging Face token available")
158
+
159
+ return " | ".join(status_messages), session_id
160
 
161
  class VisualConsensusEngine:
162
+ def __init__(self, moderator_model: str = None, update_callback=None, session_id: str = None):
163
  self.moderator_model = moderator_model or MODERATOR_MODEL
164
  self.search_agent = WebSearchAgent()
165
+ self.update_callback = update_callback
166
+ self.session_id = session_id
167
+
168
+ # Get session-specific keys or fall back to global
169
+ session = get_or_create_session_state(session_id) if session_id else {"api_keys": {}}
170
+ session_keys = session.get("api_keys", {})
171
+
172
+ mistral_key = session_keys.get("mistral") or MISTRAL_API_KEY
173
+ sambanova_key = session_keys.get("sambanova") or SAMBANOVA_API_KEY
174
+ hf_token = session_keys.get("huggingface") or HUGGINGFACE_API_TOKEN
175
 
176
  self.models = {
177
  'mistral': {
178
  'name': 'Mistral Large',
179
+ 'api_key': mistral_key,
180
+ 'available': bool(mistral_key)
181
  },
182
  'sambanova_deepseek': {
183
  'name': 'DeepSeek-R1',
184
+ 'api_key': sambanova_key,
185
+ 'available': bool(sambanova_key)
186
  },
187
  'sambanova_llama': {
188
  'name': 'Meta-Llama-3.1-8B',
189
+ 'api_key': sambanova_key,
190
+ 'available': bool(sambanova_key)
191
  },
192
  'sambanova_qwq': {
193
  'name': 'QwQ-32B',
194
+ 'api_key': sambanova_key,
195
+ 'available': bool(sambanova_key)
196
  },
197
  'search': {
198
  'name': 'Web Search Agent',
 
201
  }
202
  }
203
 
204
+ # Store session keys for API calls
205
+ self.session_keys = {
206
+ 'mistral': mistral_key,
207
+ 'sambanova': sambanova_key,
208
+ 'huggingface': hf_token
209
+ }
210
+
211
  # Role definitions
212
  self.roles = {
213
  'standard': "You are participating in a collaborative AI discussion. Provide thoughtful, balanced analysis.",
 
219
  }
220
 
221
  def update_visual_state(self, state_update: Dict[str, Any]):
222
+ """Update the visual roundtable state for this session"""
223
  if self.update_callback:
224
  self.update_callback(state_update)
225
 
226
  def call_model(self, model: str, prompt: str, context: str = "") -> Optional[str]:
227
+ """Generic model calling function using session-specific keys"""
228
  if model == 'search':
229
  search_query = self._extract_search_query(prompt)
230
  return self.search_agent.search(search_query)
 
257
  return prompt[:100]
258
 
259
  def _call_sambanova(self, model: str, prompt: str) -> Optional[str]:
260
+ api_key = self.session_keys.get('sambanova')
261
+ if not api_key:
262
  return None
263
 
264
  try:
 
266
 
267
  client = OpenAI(
268
  base_url="https://api.sambanova.ai/v1",
269
+ api_key=api_key
270
  )
271
 
272
  model_mapping = {
 
293
  return None
294
 
295
  def _call_mistral(self, prompt: str) -> Optional[str]:
296
+ api_key = self.session_keys.get('mistral')
297
+ if not api_key:
298
  return None
299
 
300
  try:
 
302
 
303
  client = OpenAI(
304
  base_url="https://api.mistral.ai/v1",
305
+ api_key=api_key
306
  )
307
 
308
  completion = client.chat.completions.create(
 
352
  pass
353
  return 5.0
354
 
355
+ def run_visual_consensus_session(self, question: str, discussion_rounds: int = 3,
356
+ decision_protocol: str = "consensus", role_assignment: str = "balanced",
357
+ topology: str = "full_mesh", moderator_model: str = "mistral",
358
+ enable_step_by_step: bool = False, log_function=None):
359
+ """Run consensus with session-isolated visual updates"""
360
 
361
  available_models = [model for model, info in self.models.items() if info['available']]
362
  if not available_models:
 
365
  model_roles = self.assign_roles(available_models, role_assignment)
366
  participant_names = [self.models[model]['name'] for model in available_models]
367
 
368
+ # Use session-specific logging
369
+ def log_event(event_type: str, speaker: str = "", content: str = "", **kwargs):
370
+ if log_function:
371
+ log_function(event_type, speaker, content, **kwargs)
372
+
373
  # Log the start
374
+ log_event('phase', content=f"๐Ÿš€ Starting Discussion: {question}")
375
+ log_event('phase', content=f"๐Ÿ“Š Configuration: {len(available_models)} models, {decision_protocol} protocol, {role_assignment} roles")
376
 
377
  # Initialize visual state
378
  self.update_visual_state({
 
386
  all_messages = []
387
 
388
  # Phase 1: Initial responses
389
+ log_event('phase', content="๐Ÿ“ Phase 1: Initial Responses")
390
 
391
  for model in available_models:
392
  # Log and set thinking state
393
+ log_event('thinking', speaker=self.models[model]['name'])
394
  self.update_visual_state({
395
  "participants": participant_names,
396
  "messages": all_messages,
 
398
  "thinking": [self.models[model]['name']]
399
  })
400
 
 
401
  if not enable_step_by_step:
402
  time.sleep(1)
403
 
 
417
  4. END YOUR RESPONSE WITH: "Confidence: X/10" where X is your confidence level"""
418
 
419
  # Log and set speaking state
420
+ log_event('speaking', speaker=self.models[model]['name'])
421
  self.update_visual_state({
422
  "participants": participant_names,
423
  "messages": all_messages,
 
425
  "thinking": []
426
  })
427
 
 
428
  if not enable_step_by_step:
429
  time.sleep(2)
430
 
 
434
  confidence = self._extract_confidence(response)
435
  message = {
436
  "speaker": self.models[model]['name'],
437
+ "text": response,
438
  "confidence": confidence,
439
  "role": role
440
  }
441
  all_messages.append(message)
442
 
443
  # Log the full response
444
+ log_event('message',
445
+ speaker=self.models[model]['name'],
446
+ content=response,
447
+ role=role,
448
+ confidence=confidence)
449
 
450
+ # Update with new message
451
  responded_speakers = list(set(msg["speaker"] for msg in all_messages if msg.get("speaker")))
452
 
453
  self.update_visual_state({
 
455
  "messages": all_messages,
456
  "currentSpeaker": None,
457
  "thinking": [],
458
+ "showBubbles": responded_speakers
459
  })
460
 
 
461
  if enable_step_by_step:
462
+ session = get_or_create_session_state(self.session_id)
463
+ session["step_continue_event"].clear()
464
+ session["step_continue_event"].wait()
465
  else:
466
  time.sleep(0.5)
467
 
468
  # Phase 2: Discussion rounds
469
  if discussion_rounds > 0:
470
+ log_event('phase', content=f"๐Ÿ’ฌ Phase 2: Discussion Rounds ({discussion_rounds} rounds)")
471
 
472
  for round_num in range(discussion_rounds):
473
+ log_event('phase', content=f"๐Ÿ”„ Discussion Round {round_num + 1}")
474
 
475
  for model in available_models:
476
+ log_event('thinking', speaker=self.models[model]['name'])
 
477
  self.update_visual_state({
478
  "participants": participant_names,
479
  "messages": all_messages,
 
481
  "thinking": [self.models[model]['name']]
482
  })
483
 
 
484
  if not enable_step_by_step:
485
  time.sleep(1)
486
 
 
500
  Please provide your updated analysis considering the discussion so far.
501
  END WITH: "Confidence: X/10" """
502
 
503
+ log_event('speaking', speaker=self.models[model]['name'])
 
504
  self.update_visual_state({
505
  "participants": participant_names,
506
  "messages": all_messages,
 
508
  "thinking": []
509
  })
510
 
 
511
  if not enable_step_by_step:
512
  time.sleep(2)
513
 
 
517
  confidence = self._extract_confidence(response)
518
  message = {
519
  "speaker": self.models[model]['name'],
520
+ "text": f"Round {round_num + 1}: {response}",
521
  "confidence": confidence,
522
  "role": model_roles[model]
523
  }
524
  all_messages.append(message)
525
 
526
+ log_event('message',
527
+ speaker=self.models[model]['name'],
528
+ content=f"Round {round_num + 1}: {response}",
529
+ role=model_roles[model],
530
+ confidence=confidence)
 
531
 
 
532
  responded_speakers = list(set(msg["speaker"] for msg in all_messages if msg.get("speaker")))
533
 
534
  self.update_visual_state({
 
536
  "messages": all_messages,
537
  "currentSpeaker": None,
538
  "thinking": [],
539
+ "showBubbles": responded_speakers
540
  })
541
 
 
542
  if enable_step_by_step:
543
+ session = get_or_create_session_state(self.session_id)
544
+ session["step_continue_event"].clear()
545
+ session["step_continue_event"].wait()
546
  else:
547
  time.sleep(1)
548
 
549
+ # Phase 3: Final consensus
550
+ log_event('phase', content=f"๐ŸŽฏ Phase 3: Final Consensus ({decision_protocol})")
551
+ log_event('thinking', speaker="All participants", content="Building consensus...")
552
 
553
  self.update_visual_state({
554
  "participants": participant_names,
555
  "messages": all_messages,
556
  "currentSpeaker": None,
557
+ "thinking": participant_names
558
  })
559
 
 
560
  if not enable_step_by_step:
561
  time.sleep(2)
562
 
563
+ # Generate consensus
564
  moderator = self.moderator_model if self.models[self.moderator_model]['available'] else available_models[0]
565
 
566
+ # Collect responses from session log
567
+ session = get_or_create_session_state(self.session_id)
568
  all_responses = ""
569
  confidence_scores = []
570
+ for entry in session["discussion_log"]:
571
  if entry['type'] == 'message' and entry['speaker'] != 'Consilium':
572
  all_responses += f"\n**{entry['speaker']}**: {entry['content']}\n"
573
  if 'confidence' in entry:
574
  confidence_scores.append(entry['confidence'])
575
 
 
576
  avg_confidence = sum(confidence_scores) / len(confidence_scores) if confidence_scores else 5.0
577
+ consensus_threshold = 7.0
578
 
579
  consensus_prompt = f"""You are synthesizing the final result from this AI discussion.
580
 
 
600
 
601
  **AREAS OF DISAGREEMENT:** [If any - explain the key points of contention]"""
602
 
603
+ log_event('speaking', speaker="Consilium", content="Analyzing consensus and synthesizing final answer...")
604
  self.update_visual_state({
605
  "participants": participant_names,
606
  "messages": all_messages,
 
608
  "thinking": []
609
  })
610
 
 
611
  consensus_result = self.call_model(moderator, consensus_prompt)
612
 
613
  if not consensus_result:
 
617
 
618
  **AREAS OF DISAGREEMENT:** Analysis could not be completed due to technical issues."""
619
 
 
620
  consensus_reached = "CONSENSUS STATUS: Reached" in consensus_result or avg_confidence >= consensus_threshold
621
 
 
622
  if consensus_reached:
623
  visual_summary = "โœ… Consensus reached!"
624
  elif "Partial" in consensus_result:
 
628
 
629
  final_message = {
630
  "speaker": "Consilium",
631
+ "text": f"{visual_summary} {consensus_result}",
632
  "confidence": avg_confidence,
633
  "role": "consensus"
634
  }
635
  all_messages.append(final_message)
636
 
637
+ log_event('message',
638
+ speaker="Consilium",
639
+ content=consensus_result,
640
+ confidence=avg_confidence)
641
 
 
642
  responded_speakers = list(set(msg["speaker"] for msg in all_messages if msg.get("speaker")))
643
 
644
  self.update_visual_state({
 
649
  "showBubbles": responded_speakers
650
  })
651
 
652
+ log_event('phase', content="โœ… Discussion Complete")
653
+
654
+ return consensus_result
655
+
656
+ def update_session_roundtable_state(session_id: str, new_state: Dict):
657
+ """Update roundtable state for specific session"""
658
+ session = get_or_create_session_state(session_id)
659
+ session["roundtable_state"].update(new_state)
660
+ return json.dumps(session["roundtable_state"])
661
+
662
+ def run_consensus_discussion_session(question: str, discussion_rounds: int = 3,
663
+ decision_protocol: str = "consensus", role_assignment: str = "balanced",
664
+ topology: str = "full_mesh", moderator_model: str = "mistral",
665
+ enable_step_by_step: bool = False, session_id_state: str = None,
666
+ request: gr.Request = None):
667
+ """Session-isolated consensus discussion"""
668
+
669
+ # Get unique session
670
+ session_id = get_session_id(request) if not session_id_state else session_id_state
671
+ session = get_or_create_session_state(session_id)
672
+
673
+ # Reset session state for new discussion
674
+ session["discussion_log"] = []
675
+ session["final_answer"] = ""
676
+ session["step_by_step_active"] = enable_step_by_step
677
+ session["step_continue_event"].clear()
678
+
679
+ def session_visual_update_callback(state_update):
680
+ """Session-specific visual update callback"""
681
+ update_session_roundtable_state(session_id, state_update)
682
+
683
+ def session_log_event(event_type: str, speaker: str = "", content: str = "", **kwargs):
684
+ """Add event to THIS session's log only"""
685
+ session["discussion_log"].append({
686
+ 'type': event_type,
687
+ 'speaker': speaker,
688
+ 'content': content,
689
+ 'timestamp': datetime.now().strftime('%H:%M:%S'),
690
+ **kwargs
691
+ })
692
+
693
+ # Create engine with session-specific callback
694
+ engine = VisualConsensusEngine(moderator_model, session_visual_update_callback, session_id)
695
+
696
+ # Run consensus with session-specific logging
697
+ result = engine.run_visual_consensus_session(
698
  question, discussion_rounds, decision_protocol,
699
+ role_assignment, topology, moderator_model,
700
+ enable_step_by_step, session_log_event
701
  )
702
 
703
+ # Generate session-specific final answer
704
  available_models = [model for model, info in engine.models.items() if info['available']]
705
+ session["final_answer"] = f"""## ๐ŸŽฏ Final Consensus Answer
706
 
707
  {result}
708
 
 
713
  - **Protocol:** {decision_protocol.replace('_', ' ').title()}
714
  - **Participants:** {len(available_models)} AI models
715
  - **Roles:** {role_assignment.title()}
716
+ - **Session ID:** {session_id[:8]}...
 
717
 
718
  *Generated by Consilium Visual AI Consensus Platform*"""
719
 
720
+ session["step_by_step_active"] = False
721
+
722
+ # Format session-specific discussion log
723
+ formatted_log = format_session_discussion_log(session["discussion_log"])
724
+
725
+ return ("โœ… Discussion Complete - See results below",
726
+ json.dumps(session["roundtable_state"]),
727
+ session["final_answer"],
728
+ formatted_log,
729
+ session_id)
730
+
731
+ def format_session_discussion_log(discussion_log: list) -> str:
732
+ """Format discussion log for specific session"""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
733
  if not discussion_log:
734
  return "No discussion log available yet."
735
 
 
753
 
754
  return formatted_log
755
 
756
+ def continue_step_session(session_id_state: str):
757
+ """Function called by the Next Step button for specific session"""
758
+ if session_id_state and session_id_state in user_sessions:
759
+ session = user_sessions[session_id_state]
760
+ session["step_continue_event"].set()
761
+ return "โœ… Continuing... Next AI will respond shortly"
762
+ return "โŒ Session not found"
763
+
764
+ def check_model_status_session(session_id_state: str = None, request: gr.Request = None):
765
+ """Check and display current model availability for specific session"""
766
+ session_id = get_session_id(request) if not session_id_state else session_id_state
767
+ session = get_or_create_session_state(session_id)
768
+ session_keys = session.get("api_keys", {})
769
+
770
+ # Get session-specific keys or fall back to env vars
771
+ mistral_key = session_keys.get("mistral") or MISTRAL_API_KEY
772
+ sambanova_key = session_keys.get("sambanova") or SAMBANOVA_API_KEY
773
+ hf_token = session_keys.get("huggingface") or HUGGINGFACE_API_TOKEN
774
+
775
+ status_info = "## ๐Ÿ” Model Availability Status\n\n"
776
+
777
+ models = {
778
+ 'Mistral Large': mistral_key,
779
+ 'DeepSeek-R1': sambanova_key,
780
+ 'Meta-Llama-3.1-8B': sambanova_key,
781
+ 'QwQ-32B': sambanova_key,
782
+ 'Web Search Agent': True
783
+ }
784
+
785
+ for model_name, available in models.items():
786
+ if model_name == 'Web Search Agent':
787
+ status = "โœ… Available (Built-in)"
788
+ else:
789
+ if available:
790
+ status = f"โœ… Available (Key: {available[:8]}...)"
791
+ else:
792
+ status = "โŒ Not configured"
793
+ status_info += f"**{model_name}:** {status}\n\n"
794
+
795
+ return status_info
796
 
797
  # Create the hybrid interface
798
  with gr.Blocks(title="๐ŸŽญ Consilium: Visual AI Consensus Platform", theme=gr.themes.Soft()) as demo:
 
808
  - ๐ŸŒ **Communication Topologies** - Full mesh, star, ring patterns
809
  - ๐Ÿ—ณ๏ธ **Decision Protocols** - Consensus, voting, weighted, ranked choice
810
  - ๐Ÿ” **Web Search Integration** - Real-time information gathering
811
+ - ๐Ÿ”’ **Session Isolation** - Each user gets their own private discussion space
812
 
813
  **Perfect for:** Complex decisions, research analysis, creative brainstorming, problem-solving
814
  """)
815
 
816
+ # Hidden session state component
817
+ session_state = gr.State()
818
+
819
  with gr.Tab("๐ŸŽญ Visual Consensus Discussion"):
820
  with gr.Row():
821
  with gr.Column(scale=1):
 
875
  # The visual roundtable component
876
  roundtable = consilium_roundtable(
877
  label="๐ŸŽญ AI Consensus Roundtable",
878
+ value=json.dumps({
879
+ "participants": [],
880
+ "messages": [],
881
+ "currentSpeaker": None,
882
+ "thinking": [],
883
+ "showBubbles": []
884
+ })
885
  )
886
 
887
  # Final answer section
 
900
  # Event handlers
901
  def on_start_discussion(*args):
902
  # Start discussion immediately for both modes
903
+ enable_step = args[-2] # Second to last argument is enable_step_by_step
904
+ request = args[-1] # Last argument is request
905
 
906
  if enable_step:
907
  # Step-by-step mode: Start discussion in background thread
908
  def run_discussion():
909
+ run_consensus_discussion_session(*args)
910
 
911
  discussion_thread = threading.Thread(target=run_discussion)
912
  discussion_thread.daemon = True
913
  discussion_thread.start()
914
 
915
+ # Get session ID for this user
916
+ session_id = get_session_id(request)
917
+
918
  return (
919
  "๐ŸŽฌ Step-by-step mode: Discussion started - will pause after each AI response",
920
+ json.dumps(get_or_create_session_state(session_id)["roundtable_state"]),
921
  "*Discussion starting in step-by-step mode...*",
922
  "*Discussion log will appear here...*",
923
  gr.update(visible=True), # Show next step button
924
+ gr.update(visible=True, value="Discussion running - will pause after first AI response"), # Show step status
925
+ session_id
926
  )
927
  else:
928
  # Normal mode - start immediately and hide step controls
929
+ result = run_consensus_discussion_session(*args)
930
  return result + (gr.update(visible=False), gr.update(visible=False))
931
 
932
  # Function to toggle step controls visibility
 
945
 
946
  start_btn.click(
947
  on_start_discussion,
948
+ inputs=[question_input, rounds_input, decision_protocol, role_assignment, topology, moderator_model, enable_clickthrough, session_state],
949
+ outputs=[status_output, roundtable, final_answer_output, discussion_log_output, next_step_btn, step_status, session_state]
950
  )
951
 
952
  # Next step button handler
953
  next_step_btn.click(
954
+ continue_step_session,
955
+ inputs=[session_state],
956
  outputs=[step_status]
957
  )
958
 
959
  # Auto-refresh the roundtable state every 2 seconds during discussion
960
+ def refresh_roundtable(session_id_state, request: gr.Request = None):
961
+ session_id = get_session_id(request) if not session_id_state else session_id_state
962
+ if session_id in user_sessions:
963
+ return json.dumps(user_sessions[session_id]["roundtable_state"])
964
+ return json.dumps({
965
+ "participants": [],
966
+ "messages": [],
967
+ "currentSpeaker": None,
968
+ "thinking": [],
969
+ "showBubbles": []
970
+ })
971
+
972
+ gr.Timer(2).tick(refresh_roundtable, inputs=[session_state], outputs=[roundtable])
973
 
974
  with gr.Tab("๐Ÿ”ง Configuration & Setup"):
975
+ gr.Markdown("## ๐Ÿ”‘ API Keys Configuration")
976
+ gr.Markdown("*Enter your API keys below OR set them as environment variables*")
977
+ gr.Markdown("**๐Ÿ”’ Privacy:** Your API keys are stored only for your session and are not shared with other users.")
978
+
979
+ with gr.Row():
980
+ with gr.Column():
981
+ mistral_key_input = gr.Textbox(
982
+ label="Mistral API Key",
983
+ placeholder="Enter your Mistral API key...",
984
+ type="password",
985
+ info="Required for Mistral Large model"
986
+ )
987
+ sambanova_key_input = gr.Textbox(
988
+ label="SambaNova API Key",
989
+ placeholder="Enter your SambaNova API key...",
990
+ type="password",
991
+ info="Required for DeepSeek, Llama, and QwQ models"
992
+ )
993
+ huggingface_key_input = gr.Textbox(
994
+ label="Hugging Face API Token",
995
+ placeholder="Enter your Hugging Face API token...",
996
+ type="password",
997
+ info="Required for Web Search Agent (TinyLlama)"
998
+ )
999
+
1000
+ with gr.Column():
1001
+ # Add a button to save/update keys
1002
+ save_keys_btn = gr.Button("๐Ÿ’พ Save API Keys", variant="secondary")
1003
+ keys_status = gr.Textbox(
1004
+ label="Keys Status",
1005
+ value="No API keys configured - using environment variables if available",
1006
+ interactive=False
1007
+ )
1008
+
1009
+ # Connect the save button
1010
+ save_keys_btn.click(
1011
+ update_session_api_keys,
1012
+ inputs=[mistral_key_input, sambanova_key_input, huggingface_key_input, session_state],
1013
+ outputs=[keys_status, session_state]
1014
+ )
1015
 
1016
+ model_status_display = gr.Markdown(check_model_status_session())
1017
+
1018
+ # Add refresh button for model status
1019
+ refresh_status_btn = gr.Button("๐Ÿ”„ Refresh Model Status")
1020
+ refresh_status_btn.click(
1021
+ check_model_status_session,
1022
+ inputs=[session_state],
1023
+ outputs=[model_status_display]
1024
+ )
1025
 
1026
  gr.Markdown("""
1027
  ## ๐Ÿ› ๏ธ Setup Instructions
1028
 
1029
+ ### ๐Ÿš€ Quick Start (Recommended)
1030
+ 1. **Enter API keys above** (they'll be used only for your session)
1031
+ 2. **Click "Save API Keys"**
1032
+ 3. **Start a discussion!**
1033
+
1034
+ ### ๐Ÿ”‘ Get API Keys:
1035
+ - **Mistral:** [console.mistral.ai](https://console.mistral.ai)
1036
+ - **SambaNova:** [cloud.sambanova.ai](https://cloud.sambanova.ai)
1037
+ - **Hugging Face:** [huggingface.co/settings/tokens](https://huggingface.co/settings/tokens)
1038
+
1039
+ ### ๐ŸŒ Alternative: Environment Variables
1040
  ```bash
1041
+ export MISTRAL_API_KEY=your_key_here
1042
+ export SAMBANOVA_API_KEY=your_key_here
1043
+ export HUGGINGFACE_API_TOKEN=your_token_here
1044
+ export MODERATOR_MODEL=mistral
1045
  ```
1046
 
1047
  ### ๐Ÿฆ™ Sambanova Integration
1048
+ The platform includes **3 Sambanova models**:
1049
  - **DeepSeek-R1**: Advanced reasoning model
1050
  - **Meta-Llama-3.1-8B**: Fast, efficient discussions
1051
  - **QwQ-32B**: Large-scale consensus analysis
1052
 
1053
+ ### ๐Ÿ” Web Search Agent
1054
+ Built-in agent using **smolagents** with:
1055
+ - **DuckDuckGoSearchTool**: Web searches
1056
+ - **VisitWebpageTool**: Deep content analysis
1057
+ - **WikipediaTool**: Comprehensive research
1058
+ - **TinyLlama**: Fast inference for search synthesis
1059
+
1060
+ ### ๐Ÿ“‹ Dependencies
1061
+ ```bash
1062
+ pip install gradio requests python-dotenv smolagents gradio-consilium-roundtable wikipedia openai
1063
+ ```
1064
 
1065
  ### ๐Ÿ”— MCP Integration
1066
  Add to your Claude Desktop config:
 
1075
  }
1076
  ```
1077
 
1078
+ ### ๐Ÿ”’ Privacy & Security
1079
+ - **Session Isolation**: Each user gets their own private discussion space
1080
+ - **API Key Protection**: Keys are stored only in your browser session
1081
+ - **No Global State**: Your discussions are not visible to other users
1082
+ - **Secure Communication**: All API calls use HTTPS encryption
 
 
 
 
 
 
 
 
1083
  """)
1084
 
1085
  with gr.Tab("๐Ÿ“š Usage Examples"):
 
1115
  - ๐ŸŽฏ **Center consensus** = Final decision reached
1116
 
1117
  **The roundtable updates in real-time as the discussion progresses!**
1118
+
1119
+ ## ๐ŸŽฎ Role Assignments Explained
1120
+
1121
+ ### ๐ŸŽญ Balanced (Recommended)
1122
+ - **Devil's Advocate**: Challenges assumptions
1123
+ - **Fact Checker**: Verifies claims and accuracy
1124
+ - **Synthesizer**: Finds common ground
1125
+ - **Standard**: Provides balanced analysis
1126
+
1127
+ ### ๐ŸŽ“ Specialized
1128
+ - **Domain Expert**: Technical expertise
1129
+ - **Fact Checker**: Accuracy verification
1130
+ - **Creative Thinker**: Innovative solutions
1131
+ - **Synthesizer**: Bridge building
1132
+
1133
+ ### โš”๏ธ Adversarial
1134
+ - **Double Devil's Advocate**: Maximum challenge
1135
+ - **Standard**: Balanced counter-perspective
1136
+
1137
+ ## ๐Ÿ—ณ๏ธ Decision Protocols
1138
+
1139
+ - **Consensus**: Seek agreement among all participants
1140
+ - **Majority Voting**: Most popular position wins
1141
+ - **Weighted Voting**: Higher confidence scores matter more
1142
+ - **Ranked Choice**: Preference-based selection
1143
+ - **Unanimity**: All must agree completely
1144
+
1145
+ ## ๐Ÿ”’ Session Isolation
1146
+
1147
+ **Each user gets their own private space:**
1148
+ - โœ… Your discussions are private to you
1149
+ - โœ… Your API keys are not shared
1150
+ - โœ… Your conversation history is isolated
1151
+ - โœ… Multiple users can use the platform simultaneously
1152
+
1153
+ **Perfect for teams, research groups, and individual use!**
1154
  """)
1155
 
1156
  # Launch configuration