azettl commited on
Commit
730fc8a
Β·
1 Parent(s): ce0bf87

add details about research

Browse files
Files changed (1) hide show
  1. app.py +305 -124
app.py CHANGED
@@ -177,105 +177,6 @@ class VisualConsensusEngine:
177
  """Update the visual roundtable state for this session"""
178
  if self.update_callback:
179
  self.update_callback(state_update)
180
-
181
- def show_research_activity(self, speaker: str, function: str, query: str):
182
- """Show research happening in the UI with Research Agent activation"""
183
- # Get current state properly
184
- session = get_or_create_session_state(self.session_id)
185
- current_state = session["roundtable_state"]
186
- all_messages = list(current_state.get("messages", [])) # Make a copy
187
- participants = current_state.get("participants", [])
188
-
189
- # PRESERVE existing bubbles throughout research
190
- existing_bubbles = list(set(msg["speaker"] for msg in all_messages if msg.get("speaker") and msg["speaker"] != "Research Agent"))
191
-
192
- # Get function display name
193
- function_display = {
194
- 'search_web': 'Web Search',
195
- 'search_wikipedia': 'Wikipedia',
196
- 'search_academic': 'Academic Papers',
197
- 'search_technology_trends': 'Technology Trends',
198
- 'search_financial_data': 'Financial Data',
199
- 'multi_source_research': 'Multi-Source Research'
200
- }.get(function, function.replace('_', ' ').title())
201
-
202
- # Step 1: Show expert requesting research
203
- request_message = {
204
- "speaker": speaker,
205
- "text": f"πŸ” **Research Request**: {function_display}\nπŸ“ Query: \"{query}\"",
206
- "type": "research_request"
207
- }
208
- all_messages.append(request_message)
209
-
210
- self.update_visual_state({
211
- "participants": participants,
212
- "messages": all_messages,
213
- "currentSpeaker": speaker,
214
- "thinking": [],
215
- "showBubbles": existing_bubbles + [speaker]
216
- })
217
- time.sleep(1.5)
218
-
219
- # Step 2: Research Agent starts thinking
220
- self.update_visual_state({
221
- "participants": participants,
222
- "messages": all_messages,
223
- "currentSpeaker": None,
224
- "thinking": ["Research Agent"],
225
- "showBubbles": existing_bubbles + [speaker, "Research Agent"]
226
- })
227
- time.sleep(2)
228
-
229
- # Step 3: Research Agent working - show detailed activity
230
- working_message = {
231
- "speaker": "Research Agent",
232
- "text": f"πŸ” **Conducting Research**: {function_display}\nπŸ“Š Analyzing: \"{query}\"\n⏳ Please wait while I gather information...",
233
- "type": "research_activity"
234
- }
235
- all_messages.append(working_message)
236
-
237
- self.update_visual_state({
238
- "participants": participants,
239
- "messages": all_messages,
240
- "currentSpeaker": "Research Agent",
241
- "thinking": [],
242
- "showBubbles": existing_bubbles + [speaker, "Research Agent"]
243
- })
244
- time.sleep(3) # Longer pause to see research happening
245
-
246
- # Step 4: Research completion notification
247
- completion_message = {
248
- "speaker": "Research Agent",
249
- "text": f"βœ… **Research Complete**: {function_display}\nπŸ“‹ Results ready for analysis",
250
- "type": "research_complete"
251
- }
252
- all_messages.append(completion_message)
253
-
254
- self.update_visual_state({
255
- "participants": participants,
256
- "messages": all_messages,
257
- "currentSpeaker": "Research Agent",
258
- "thinking": [],
259
- "showBubbles": existing_bubbles + [speaker, "Research Agent"]
260
- })
261
- time.sleep(1.5)
262
-
263
- # Step 5: Expert processing results
264
- processing_message = {
265
- "speaker": speaker,
266
- "text": f"πŸ“Š **Processing Research Results**\n🧠 Integrating {function_display} findings into analysis...",
267
- "type": "research_processing"
268
- }
269
- all_messages.append(processing_message)
270
-
271
- self.update_visual_state({
272
- "participants": participants,
273
- "messages": all_messages,
274
- "currentSpeaker": speaker,
275
- "thinking": [],
276
- "showBubbles": existing_bubbles + [speaker, "Research Agent"] # Keep Research Agent visible longer
277
- })
278
- time.sleep(2)
279
 
280
  def log_research_activity(self, speaker: str, function: str, query: str, result: str, log_function=None):
281
  """Log research activity to the discussion log"""
@@ -342,12 +243,33 @@ class VisualConsensusEngine:
342
  function_name = tool_call.function.name
343
  arguments = json.loads(tool_call.function.arguments)
344
 
345
- # Show research activity in UI
346
  query_param = arguments.get("query") or arguments.get("topic") or arguments.get("technology") or arguments.get("company")
347
  if query_param:
348
- self.show_research_activity(calling_model_name, function_name, query_param)
349
-
350
- # Execute the enhanced research functions
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
351
  result = self._execute_research_function(function_name, arguments)
352
 
353
  # Ensure result is a string
@@ -365,9 +287,11 @@ class VisualConsensusEngine:
365
  **kwargs
366
  })
367
 
 
 
368
  if query_param and result:
369
  self.log_research_activity(calling_model_name, function_name, query_param, result, session_log_function)
370
-
371
  # Add function result to conversation
372
  messages.append({
373
  "role": "tool",
@@ -435,42 +359,298 @@ class VisualConsensusEngine:
435
  print(f"Error in follow-up completion for {calling_model}: {str(e)}")
436
  return message.content or "Analysis completed with research integration."
437
 
 
438
  def _execute_research_function(self, function_name: str, arguments: dict) -> str:
439
- """Execute research function with enhanced capabilities"""
 
 
 
 
 
 
 
440
  try:
 
 
 
441
  if function_name == "search_web":
 
442
  depth = arguments.get("depth", "standard")
443
- return self.search_agent.search(arguments["query"], depth)
 
 
 
 
 
 
 
444
 
445
  elif function_name == "search_wikipedia":
446
- return self.search_agent.search_wikipedia(arguments["topic"])
 
 
 
447
 
448
  elif function_name == "search_academic":
449
  source = arguments.get("source", "both")
 
450
  if source == "arxiv":
451
- return self.search_agent.tools['arxiv'].search(arguments["query"])
 
 
 
 
452
  elif source == "scholar":
453
- return self.search_agent.tools['scholar'].search(arguments["query"])
454
- else: # both
 
 
 
 
 
 
455
  arxiv_result = self.search_agent.tools['arxiv'].search(arguments["query"])
 
 
 
 
456
  scholar_result = self.search_agent.tools['scholar'].search(arguments["query"])
457
- return f"{arxiv_result}\n\n{scholar_result}"
 
 
 
458
 
459
  elif function_name == "search_technology_trends":
460
- return self.search_agent.tools['github'].search(arguments["technology"])
 
 
 
461
 
462
  elif function_name == "search_financial_data":
463
- return self.search_agent.tools['sec'].search(arguments["company"])
 
 
 
464
 
465
  elif function_name == "multi_source_research":
466
- return self.search_agent.search(arguments["query"], "deep")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
467
 
468
  else:
469
- return f"Unknown research function: {function_name}"
 
 
 
 
 
470
 
 
 
471
  except Exception as e:
472
- return f"Research function error: {str(e)}"
 
 
 
473
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
474
  def call_model(self, model: str, prompt: str, context: str = "") -> Optional[str]:
475
  """Enhanced model calling with native function calling support"""
476
  if not self.models[model]['available']:
@@ -516,7 +696,7 @@ class VisualConsensusEngine:
516
 
517
  # Check if model supports function calling
518
  supports_functions = sambanova_model in [
519
- 'DeepSeek-R1-0324',
520
  'Meta-Llama-3.1-8B-Instruct',
521
  'Meta-Llama-3.1-405B-Instruct',
522
  'Meta-Llama-3.3-70B-Instruct'
@@ -727,8 +907,9 @@ class VisualConsensusEngine:
727
  # Log and set thinking state - PRESERVE BUBBLES
728
  log_event('thinking', speaker=self.models[model]['name'])
729
 
730
- # Calculate existing bubbles
731
- existing_bubbles = list(set(msg["speaker"] for msg in all_messages if msg.get("speaker") and msg["speaker"] != "Research Agent"))
 
732
 
733
  self.update_visual_state({
734
  "participants": visual_participant_names,
@@ -776,7 +957,7 @@ Provide your expert analysis:"""
776
  log_event('speaking', speaker=self.models[model]['name'])
777
 
778
  # Calculate existing bubbles
779
- existing_bubbles = list(set(msg["speaker"] for msg in all_messages if msg.get("speaker") and msg["speaker"] != "Research Agent"))
780
 
781
  self.update_visual_state({
782
  "participants": visual_participant_names,
@@ -853,7 +1034,7 @@ Provide your expert analysis:"""
853
  # Log thinking with preserved bubbles
854
  log_event('thinking', speaker=self.models[model]['name'])
855
 
856
- existing_bubbles = list(set(msg["speaker"] for msg in all_messages if msg.get("speaker") and msg["speaker"] != "Research Agent"))
857
 
858
  self.update_visual_state({
859
  "participants": visual_participant_names,
@@ -902,7 +1083,7 @@ Your expert response:"""
902
  # Log speaking with preserved bubbles
903
  log_event('speaking', speaker=self.models[model]['name'])
904
 
905
- existing_bubbles = list(set(msg["speaker"] for msg in all_messages if msg.get("speaker") and msg["speaker"] != "Research Agent"))
906
 
907
  self.update_visual_state({
908
  "participants": visual_participant_names,
@@ -979,7 +1160,7 @@ Your expert response:"""
979
  expert_names = [self.models[model]['name'] for model in available_models]
980
 
981
  # Preserve existing bubbles during final thinking
982
- existing_bubbles = list(set(msg["speaker"] for msg in all_messages if msg.get("speaker") and msg["speaker"] != "Research Agent"))
983
 
984
  self.update_visual_state({
985
  "participants": visual_participant_names,
@@ -1057,7 +1238,7 @@ Provide your synthesis:"""
1057
  log_event('speaking', speaker=moderator_title, content="Synthesizing expert analysis into final recommendation...")
1058
 
1059
  # Preserve existing bubbles during final speaking
1060
- existing_bubbles = list(set(msg["speaker"] for msg in all_messages if msg.get("speaker") and msg["speaker"] != "Research Agent"))
1061
 
1062
  self.update_visual_state({
1063
  "participants": visual_participant_names,
 
177
  """Update the visual roundtable state for this session"""
178
  if self.update_callback:
179
  self.update_callback(state_update)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
180
 
181
  def log_research_activity(self, speaker: str, function: str, query: str, result: str, log_function=None):
182
  """Log research activity to the discussion log"""
 
243
  function_name = tool_call.function.name
244
  arguments = json.loads(tool_call.function.arguments)
245
 
 
246
  query_param = arguments.get("query") or arguments.get("topic") or arguments.get("technology") or arguments.get("company")
247
  if query_param:
248
+ session = get_or_create_session_state(self.session_id)
249
+ current_state = session["roundtable_state"]
250
+ all_messages = list(current_state.get("messages", []))
251
+
252
+ # Add request message to the CALLING MODEL (Mistral)
253
+ request_message = {
254
+ "speaker": calling_model_name,
255
+ "text": f"πŸ” **Research Request**: {function_name.replace('_', ' ').title()}\nπŸ“ Query: \"{query_param}\"\n⏳ Waiting for research results...",
256
+ "type": "research_request"
257
+ }
258
+ all_messages.append(request_message)
259
+
260
+ existing_bubbles = list(current_state.get("showBubbles", []))
261
+ if calling_model_name not in existing_bubbles:
262
+ existing_bubbles.append(calling_model_name)
263
+
264
+ self.update_visual_state({
265
+ "participants": current_state.get("participants", []),
266
+ "messages": all_messages,
267
+ "currentSpeaker": calling_model_name,
268
+ "thinking": [],
269
+ "showBubbles": existing_bubbles
270
+ })
271
+ time.sleep(1)
272
+
273
  result = self._execute_research_function(function_name, arguments)
274
 
275
  # Ensure result is a string
 
287
  **kwargs
288
  })
289
 
290
+ # Get query parameter for logging
291
+ query_param = arguments.get("query") or arguments.get("topic") or arguments.get("technology") or arguments.get("company")
292
  if query_param and result:
293
  self.log_research_activity(calling_model_name, function_name, query_param, result, session_log_function)
294
+
295
  # Add function result to conversation
296
  messages.append({
297
  "role": "tool",
 
359
  print(f"Error in follow-up completion for {calling_model}: {str(e)}")
360
  return message.content or "Analysis completed with research integration."
361
 
362
+
363
  def _execute_research_function(self, function_name: str, arguments: dict) -> str:
364
+ """Execute research function with REAL-TIME visual feedback and progress indicators"""
365
+
366
+ query_param = arguments.get("query") or arguments.get("topic") or arguments.get("technology") or arguments.get("company")
367
+
368
+ # Phase 1: Show research STARTING
369
+ if query_param:
370
+ self.show_research_starting(function_name, query_param)
371
+
372
  try:
373
+ # Actually execute the research with detailed progress indicators
374
+ result = ""
375
+
376
  if function_name == "search_web":
377
+ self.update_research_progress("Initializing web search engines...")
378
  depth = arguments.get("depth", "standard")
379
+
380
+ if depth == "deep":
381
+ self.update_research_progress("Performing deep web search (multiple sources)...")
382
+ else:
383
+ self.update_research_progress("Searching web databases...")
384
+
385
+ result = self.search_agent.search(arguments["query"], depth)
386
+ self.update_research_progress(f"Web search complete - found {len(result)} characters of data")
387
 
388
  elif function_name == "search_wikipedia":
389
+ self.update_research_progress("Connecting to Wikipedia API...")
390
+ self.update_research_progress("Searching Wikipedia articles...")
391
+ result = self.search_agent.search_wikipedia(arguments["topic"])
392
+ self.update_research_progress(f"Wikipedia search complete - found {len(result)} characters")
393
 
394
  elif function_name == "search_academic":
395
  source = arguments.get("source", "both")
396
+
397
  if source == "arxiv":
398
+ self.update_research_progress("Connecting to arXiv preprint server...")
399
+ self.update_research_progress("Searching academic papers on arXiv...")
400
+ result = self.search_agent.tools['arxiv'].search(arguments["query"])
401
+ self.update_research_progress(f"arXiv search complete - found {len(result)} characters")
402
+
403
  elif source == "scholar":
404
+ self.update_research_progress("Connecting to Google Scholar...")
405
+ self.update_research_progress("Searching peer-reviewed research...")
406
+ result = self.search_agent.tools['scholar'].search(arguments["query"])
407
+ self.update_research_progress(f"Google Scholar search complete - found {len(result)} characters")
408
+
409
+ else: # both sources
410
+ self.update_research_progress("Connecting to arXiv preprint server...")
411
+ self.update_research_progress("Searching academic papers on arXiv...")
412
  arxiv_result = self.search_agent.tools['arxiv'].search(arguments["query"])
413
+ self.update_research_progress(f"arXiv complete ({len(arxiv_result)} chars) - now searching Google Scholar...")
414
+
415
+ self.update_research_progress("Connecting to Google Scholar...")
416
+ self.update_research_progress("Searching peer-reviewed research...")
417
  scholar_result = self.search_agent.tools['scholar'].search(arguments["query"])
418
+ self.update_research_progress("Combining arXiv and Google Scholar results...")
419
+
420
+ result = f"{arxiv_result}\n\n{scholar_result}"
421
+ self.update_research_progress(f"Academic search complete - combined {len(result)} characters")
422
 
423
  elif function_name == "search_technology_trends":
424
+ self.update_research_progress("Connecting to GitHub API...")
425
+ self.update_research_progress("Analyzing technology trends and repository data...")
426
+ result = self.search_agent.tools['github'].search(arguments["technology"])
427
+ self.update_research_progress(f"Technology trends analysis complete - found {len(result)} characters")
428
 
429
  elif function_name == "search_financial_data":
430
+ self.update_research_progress("Connecting to SEC EDGAR database...")
431
+ self.update_research_progress("Searching financial filings and reports...")
432
+ result = self.search_agent.tools['sec'].search(arguments["company"])
433
+ self.update_research_progress(f"Financial data search complete - found {len(result)} characters")
434
 
435
  elif function_name == "multi_source_research":
436
+ self.update_research_progress("Initializing multi-source deep research...")
437
+ self.update_research_progress("Phase 1: Web search...")
438
+
439
+ # Show progress for each source in deep research
440
+ result = ""
441
+ try:
442
+ # Simulate the deep research process with progress updates
443
+ self.update_research_progress("Phase 1: Comprehensive web search...")
444
+ web_result = self.search_agent.search(arguments["query"], "standard")
445
+ self.update_research_progress(f"Web search complete ({len(web_result)} chars) - Phase 2: Academic sources...")
446
+
447
+ self.update_research_progress("Phase 2: Searching academic databases...")
448
+ # Add small delay to show progress
449
+ time.sleep(1)
450
+
451
+ self.update_research_progress("Phase 3: Analyzing and synthesizing results...")
452
+ result = self.search_agent.search(arguments["query"], "deep")
453
+ self.update_research_progress(f"Multi-source research complete - synthesized {len(result)} characters")
454
+
455
+ except Exception as e:
456
+ self.update_research_progress(f"Multi-source research error: {str(e)}")
457
+ result = f"Multi-source research encountered an error: {str(e)}"
458
 
459
  else:
460
+ self.update_research_progress(f"Unknown research function: {function_name}")
461
+ result = f"Unknown research function: {function_name}"
462
+
463
+ # Phase 3: Show research ACTUALLY complete (after execution)
464
+ if query_param:
465
+ self.show_research_complete(function_name, query_param, len(result))
466
 
467
+ return result
468
+
469
  except Exception as e:
470
+ error_msg = str(e)
471
+ if query_param:
472
+ self.show_research_error(function_name, query_param, error_msg)
473
+ return f"Research function error: {error_msg}"
474
 
475
+ def show_research_starting(self, function: str, query: str):
476
+ """Show research request initiation with enhanced messaging"""
477
+ session = get_or_create_session_state(self.session_id)
478
+ current_state = session["roundtable_state"]
479
+ all_messages = list(current_state.get("messages", []))
480
+ participants = current_state.get("participants", [])
481
+
482
+ existing_bubbles = list(current_state.get("showBubbles", []))
483
+ # Ensure both Research Agent AND the calling model stay visible
484
+ if "Research Agent" not in existing_bubbles:
485
+ existing_bubbles.append("Research Agent")
486
+ # Keep the current speaker (the one who requested research) visible
487
+ current_speaker = current_state.get("currentSpeaker")
488
+ if current_speaker and current_speaker not in existing_bubbles and current_speaker != "Research Agent":
489
+ existing_bubbles.append(current_speaker)
490
+
491
+ # Enhanced messages based on function type
492
+ function_descriptions = {
493
+ "search_web": "🌐 Web Search - Real-time information",
494
+ "search_wikipedia": "πŸ“š Wikipedia - Authoritative encyclopedia",
495
+ "search_academic": "πŸŽ“ Academic Research - Peer-reviewed papers",
496
+ "search_technology_trends": "πŸ’» Technology Trends - GitHub analysis",
497
+ "search_financial_data": "πŸ’° Financial Data - SEC filings",
498
+ "multi_source_research": "πŸ”¬ Deep Research - Multiple sources"
499
+ }
500
+
501
+ function_desc = function_descriptions.get(function, function.replace('_', ' ').title())
502
+
503
+ estimated_time = self.estimate_research_time(function)
504
+ message = {
505
+ "speaker": "Research Agent",
506
+ "text": f"πŸ” **Initiating Research**\n{function_desc}\nπŸ“ Query: \"{query}\"\n⏰ Estimated time: {estimated_time}\n⏳ Connecting to data sources...",
507
+ "type": "research_starting"
508
+ }
509
+ all_messages.append(message)
510
+
511
+ self.update_visual_state({
512
+ "participants": participants,
513
+ "messages": all_messages,
514
+ "currentSpeaker": None,
515
+ "thinking": [],
516
+ "showBubbles": existing_bubbles + ["Research Agent"]
517
+ })
518
+ time.sleep(0.5)
519
+
520
+ def show_research_complete(self, function: str, query: str, result_length: int):
521
+ """Show research ACTUALLY completed with data quality indicators"""
522
+ session = get_or_create_session_state(self.session_id)
523
+ current_state = session["roundtable_state"]
524
+ all_messages = list(current_state.get("messages", []))
525
+ participants = current_state.get("participants", [])
526
+
527
+ existing_bubbles = list(current_state.get("showBubbles", []))
528
+ # Ensure both Research Agent AND the calling model stay visible
529
+ if "Research Agent" not in existing_bubbles:
530
+ existing_bubbles.append("Research Agent")
531
+ # Keep the current speaker (the one who requested research) visible
532
+ current_speaker = current_state.get("currentSpeaker")
533
+ if current_speaker and current_speaker not in existing_bubbles and current_speaker != "Research Agent":
534
+ existing_bubbles.append(current_speaker)
535
+
536
+ # Determine data quality based on result length
537
+ if result_length > 2000:
538
+ quality_indicator = "πŸ“Š High-quality data (comprehensive)"
539
+ quality_emoji = "🎯"
540
+ elif result_length > 800:
541
+ quality_indicator = "πŸ“ˆ Good data (substantial)"
542
+ quality_emoji = "βœ…"
543
+ elif result_length > 200:
544
+ quality_indicator = "πŸ“‹ Moderate data (basic)"
545
+ quality_emoji = "⚠️"
546
+ else:
547
+ quality_indicator = "πŸ“„ Limited data (minimal)"
548
+ quality_emoji = "⚑"
549
+
550
+ # Function-specific completion messages
551
+ function_summaries = {
552
+ "search_web": "Live web data retrieved",
553
+ "search_wikipedia": "Encyclopedia articles found",
554
+ "search_academic": "Academic papers analyzed",
555
+ "search_technology_trends": "Tech trends mapped",
556
+ "search_financial_data": "Financial reports accessed",
557
+ "multi_source_research": "Multi-source synthesis complete"
558
+ }
559
+
560
+ function_summary = function_summaries.get(function, "Research complete")
561
+
562
+ message = {
563
+ "speaker": "Research Agent",
564
+ "text": f"βœ… **Research Complete**\nπŸ”¬ {function.replace('_', ' ').title()}\nπŸ“ Query: \"{query}\"\n{quality_emoji} {function_summary}\n{quality_indicator}\nπŸ“Š {result_length:,} characters analyzed\n🎯 Data ready for expert analysis",
565
+ "type": "research_complete"
566
+ }
567
+ all_messages.append(message)
568
+
569
+ self.update_visual_state({
570
+ "participants": participants,
571
+ "messages": all_messages,
572
+ "currentSpeaker": None,
573
+ "thinking": [],
574
+ "showBubbles": existing_bubbles + ["Research Agent"]
575
+ })
576
+ time.sleep(1.5) # Longer pause to show the detailed completion
577
+
578
+ def estimate_research_time(self, function_name: str) -> str:
579
+ """Provide time estimates for different research functions"""
580
+ time_estimates = {
581
+ "search_web": "30-60 seconds",
582
+ "search_wikipedia": "15-30 seconds",
583
+ "search_academic": "2-5 minutes",
584
+ "search_technology_trends": "1-2 minutes",
585
+ "search_financial_data": "1-3 minutes",
586
+ "multi_source_research": "3-7 minutes"
587
+ }
588
+ return time_estimates.get(function_name, "1-3 minutes")
589
+
590
+ def show_research_error(self, function: str, query: str, error: str):
591
+ """Show research error"""
592
+ session = get_or_create_session_state(self.session_id)
593
+ current_state = session["roundtable_state"]
594
+ all_messages = list(current_state.get("messages", []))
595
+ participants = current_state.get("participants", [])
596
+
597
+ existing_bubbles = list(current_state.get("showBubbles", []))
598
+ # Ensure both Research Agent AND the calling model stay visible
599
+ if "Research Agent" not in existing_bubbles:
600
+ existing_bubbles.append("Research Agent")
601
+ # Keep the current speaker (the one who requested research) visible
602
+ current_speaker = current_state.get("currentSpeaker")
603
+ if current_speaker and current_speaker not in existing_bubbles and current_speaker != "Research Agent":
604
+ existing_bubbles.append(current_speaker)
605
+
606
+ message = {
607
+ "speaker": "Research Agent",
608
+ "text": f"❌ **Research Error**: {function.replace('_', ' ').title()}\nπŸ“ Query: \"{query}\"\n⚠️ Error: {error}\nπŸ”„ Continuing with available data",
609
+ "type": "research_error"
610
+ }
611
+ all_messages.append(message)
612
+
613
+ self.update_visual_state({
614
+ "participants": participants,
615
+ "messages": all_messages,
616
+ "currentSpeaker": None,
617
+ "thinking": [],
618
+ "showBubbles": existing_bubbles + ["Research Agent"]
619
+ })
620
+ time.sleep(1)
621
+
622
+ def update_research_progress(self, progress_text: str):
623
+ """Update research progress in real-time - ALWAYS REMOVE RESEARCH AGENT FROM THINKING"""
624
+ session = get_or_create_session_state(self.session_id)
625
+ current_state = session["roundtable_state"]
626
+ all_messages = list(current_state.get("messages", []))
627
+ participants = current_state.get("participants", [])
628
+
629
+ existing_bubbles = list(current_state.get("showBubbles", []))
630
+ if "Research Agent" not in existing_bubbles:
631
+ existing_bubbles.append("Research Agent")
632
+
633
+ progress_message = {
634
+ "speaker": "Research Agent",
635
+ "text": f"πŸ”„ {progress_text}",
636
+ "type": "research_progress"
637
+ }
638
+ all_messages.append(progress_message)
639
+
640
+ # Get current thinking and ALWAYS remove Research Agent
641
+ current_thinking = list(current_state.get("thinking", []))
642
+ if "Research Agent" in current_thinking:
643
+ current_thinking.remove("Research Agent")
644
+
645
+ self.update_visual_state({
646
+ "participants": participants,
647
+ "messages": all_messages,
648
+ "currentSpeaker": None,
649
+ "thinking": current_thinking, # Research Agent NEVER in thinking
650
+ "showBubbles": existing_bubbles
651
+ })
652
+ time.sleep(0.3)
653
+
654
  def call_model(self, model: str, prompt: str, context: str = "") -> Optional[str]:
655
  """Enhanced model calling with native function calling support"""
656
  if not self.models[model]['available']:
 
696
 
697
  # Check if model supports function calling
698
  supports_functions = sambanova_model in [
699
+ 'DeepSeek-V3-0324',
700
  'Meta-Llama-3.1-8B-Instruct',
701
  'Meta-Llama-3.1-405B-Instruct',
702
  'Meta-Llama-3.3-70B-Instruct'
 
907
  # Log and set thinking state - PRESERVE BUBBLES
908
  log_event('thinking', speaker=self.models[model]['name'])
909
 
910
+ session = get_or_create_session_state(self.session_id)
911
+ current_state = session["roundtable_state"]
912
+ existing_bubbles = list(current_state.get("showBubbles", []))
913
 
914
  self.update_visual_state({
915
  "participants": visual_participant_names,
 
957
  log_event('speaking', speaker=self.models[model]['name'])
958
 
959
  # Calculate existing bubbles
960
+ existing_bubbles = list(current_state.get("showBubbles", []))
961
 
962
  self.update_visual_state({
963
  "participants": visual_participant_names,
 
1034
  # Log thinking with preserved bubbles
1035
  log_event('thinking', speaker=self.models[model]['name'])
1036
 
1037
+ existing_bubbles = list(current_state.get("showBubbles", []))
1038
 
1039
  self.update_visual_state({
1040
  "participants": visual_participant_names,
 
1083
  # Log speaking with preserved bubbles
1084
  log_event('speaking', speaker=self.models[model]['name'])
1085
 
1086
+ existing_bubbles = list(current_state.get("showBubbles", []))
1087
 
1088
  self.update_visual_state({
1089
  "participants": visual_participant_names,
 
1160
  expert_names = [self.models[model]['name'] for model in available_models]
1161
 
1162
  # Preserve existing bubbles during final thinking
1163
+ existing_bubbles = list(current_state.get("showBubbles", []))
1164
 
1165
  self.update_visual_state({
1166
  "participants": visual_participant_names,
 
1238
  log_event('speaking', speaker=moderator_title, content="Synthesizing expert analysis into final recommendation...")
1239
 
1240
  # Preserve existing bubbles during final speaking
1241
+ existing_bubbles = list(current_state.get("showBubbles", []))
1242
 
1243
  self.update_visual_state({
1244
  "participants": visual_participant_names,