#!/bin/bash # Script to add model tooltips and fix existing tooltips in all HTML files # Model tooltip definitions - exact descriptions from cost analysis tab declare -A model_tooltips model_tooltips["OpenAI gpt-4o"]="OpenAI's flagship multimodal model optimized for a balance of quality and speed. Features strong performance across diverse tasks with capabilities for complex financial reasoning and instruction following." model_tooltips["GPT-4o"]="OpenAI's flagship multimodal model optimized for a balance of quality and speed. Features strong performance across diverse tasks with capabilities for complex financial reasoning and instruction following." model_tooltips["OpenAI o1-mini"]="OpenAI's smaller advanced model balancing efficiency and performance. Demonstrates surprisingly strong results on financial tasks despite its reduced parameter count." model_tooltips["o1-mini"]="OpenAI's smaller advanced model balancing efficiency and performance. Demonstrates surprisingly strong results on financial tasks despite its reduced parameter count." model_tooltips["Claude 3.5 Sonnet"]="Anthropic's advanced proprietary language model optimized for complex reasoning and instruction-following. Features enhanced performance on financial tasks with strong text processing capabilities." model_tooltips["Claude 3 Haiku"]="Anthropic's smaller efficiency-focused model in the Claude family. Designed for speed and lower computational requirements while maintaining reasonable performance on financial tasks." model_tooltips["Google Gemini 1.5 Pro"]="Google's advanced proprietary multimodal model designed for complex reasoning and instruction-following tasks. Features strong performance across financial domains with advanced reasoning capabilities." model_tooltips["Gemini 1.5 Pro"]="Google's advanced proprietary multimodal model designed for complex reasoning and instruction-following tasks. Features strong performance across financial domains with advanced reasoning capabilities." model_tooltips["Cohere Command R 7B"]="Cohere's 7-billion parameter model focused on instruction-following. An efficient model with reasonable financial domain capabilities for its size." model_tooltips["Cohere Command R +"]="Cohere's enhanced command model with improved instruction-following capabilities. Features advanced reasoning for financial domains with stronger performance than its smaller counterpart." model_tooltips["DeepSeek R1"]="DeepSeek's premium 671 billion parameter Mixture of Experts (MoE) model representing their most advanced offering. Designed for state-of-the-art performance across complex reasoning and financial tasks." model_tooltips["DeepSeek-V3"]="DeepSeek's 685 billion parameter Mixture of Experts (MoE) model optimized for advanced reasoning. Strong performance on financial tasks with robust instruction-following capabilities." model_tooltips["DeepSeek LLM (67B)"]="DeepSeek's 67 billion parameter model optimized for chat applications. Balances performance and efficiency across financial tasks with solid reasoning capabilities." model_tooltips["Llama 3 70B Instruct"]="Meta's advanced 70 billion parameter dense language model optimized for instruction-following tasks. Available through Together AI and notable for complex reasoning capabilities." model_tooltips["Llama 3 8B Instruct"]="Meta's efficient 8 billion parameter language model optimized for instruction-following. Balances performance and efficiency for financial tasks with reasonable reasoning capabilities." model_tooltips["DBRX Instruct"]="Databricks' 132 billion parameter Mixture of Experts (MoE) model focused on advanced reasoning. Demonstrates competitive performance on financial tasks with strong text processing capabilities." model_tooltips["Mixtral-8x22B Instruct"]="Mistral AI's 141 billion parameter MoE model with eight 22B expert networks. Features robust reasoning capabilities for financial tasks with strong instruction-following performance." model_tooltips["Mixtral-8x7B Instruct"]="Mistral AI's 47 billion parameter MoE model with eight 7B expert networks. Balances efficiency and performance with reasonable financial reasoning capabilities." model_tooltips["Mistral (7B) Instruct v0.3"]="Mistral AI's 7 billion parameter instruction-tuned model. Demonstrates impressive efficiency with reasonable performance on financial tasks despite its smaller size." model_tooltips["Qwen 2 Instruct (72B)"]="Alibaba's 72 billion parameter instruction-following model optimized for reasoning tasks. Features strong performance on financial domains with advanced text processing capabilities." model_tooltips["WizardLM-2 8x22B"]="A 176 billion parameter MoE model focused on complex reasoning. Designed for advanced instruction-following with strong capabilities across financial tasks." model_tooltips["Gemma 2 27B"]="Google's open-weight 27 billion parameter model optimized for reasoning tasks. Balances performance and efficiency across financial domains with strong instruction-following." model_tooltips["Gemma 2 9B"]="Google's efficient open-weight 9 billion parameter model. Demonstrates good performance on financial tasks relative to its smaller size." model_tooltips["QwQ-32B-Preview"]="Qwen's experimental 32 billion parameter MoE model focused on efficient computation. Features interesting performance characteristics on certain financial tasks." model_tooltips["Jamba 1.5 Mini"]="A compact variant in the Jamba model series focused on efficiency. Balances performance and computational requirements for financial tasks." model_tooltips["Jamba 1.5 Large"]="An expanded variant in the Jamba model series with enhanced capabilities. Features stronger reasoning for financial tasks than its smaller counterpart." # Files to process files=( "text_classification_table.html" "sentiment_analysis_table.html" "information_retrieval_table.html" "causal_analysis_table.html" "text_summarization_table.html" "qa_table.html" ) # Fix existing dataset tooltips # Fix tooltips in information_retrieval_table.html sed -i 's/tooltip-trigger" data-tooltip="A dataset for information retrieval in the financial domain/tooltip-trigger tooltip-right" data-tooltip="A dataset for information retrieval in the financial domain/g' information_retrieval_table.html # Fix tooltips in text_classification_table.html sed -i 's/tooltip-trigger" data-tooltip="An expert-annotated dataset for detecting fine-grained investor claims/tooltip-trigger tooltip-right" data-tooltip="An expert-annotated dataset for detecting fine-grained investor claims/g' text_classification_table.html # Fix tooltips in causal_analysis_table.html (in case the tooltip-right class isn't working) sed -i 's/tooltip-trigger tooltip-right" data-tooltip="Determines if a given financial text section contains a causal relation/tooltip-trigger tooltip-right" data-tooltip="Determines if a given financial text section contains a causal relation/g' causal_analysis_table.html # Fix tooltips in sentiment_analysis_table.html (in case the tooltip-right class isn't working) sed -i 's/tooltip-trigger tooltip-right" data-tooltip="Manually-annotated dataset focusing on subjectivity/tooltip-trigger tooltip-right" data-tooltip="Manually-annotated dataset focusing on subjectivity/g' sentiment_analysis_table.html # Fix tooltips in text_summarization_table.html (in case the tooltip-right class isn't working) sed -i 's/tooltip-trigger tooltip-right" data-tooltip="Financial news summarization dataset with 2,000 financial news articles/tooltip-trigger tooltip-right" data-tooltip="Financial news summarization dataset with 2,000 financial news articles/g' text_summarization_table.html # Add or update model tooltips to each file for file in "${files[@]}"; do echo "Processing $file..." # For each model in our list for model in "${!model_tooltips[@]}"; do # Convert model name to a sed-safe string by escaping special characters model_sed_safe=$(echo "$model" | sed 's/[\/&]/\\&/g') tooltip_sed_safe=$(echo "${model_tooltips[$model]}" | sed 's/[\/&]/\\&/g') # First, update existing tooltips if they exist sed -i "s/data-title=\"$model_sed_safe\" data-tooltip=\"[^\"]*\"/data-title=\"$model_sed_safe\" data-tooltip=\"$tooltip_sed_safe\"/g" "$file" # Then, add tooltips to plain model names without tooltips sed -i "s/$model_sed_safe<\/td>/$model_sed_safe<\/td>/g" "$file" done # Ensure tooltip script is included at the bottom of the file if ! grep -q "tooltips.js" "$file"; then echo "" >> "$file" fi if ! grep -q "fixed-tooltips.js" "$file"; then echo "" >> "$file" fi # Add tooltips.css if not already included if ! grep -q "tooltips.css" "$file"; then sed -i '1i' "$file" fi done # Also update results.html to ensure proper tooltip handling echo "Adding tooltip fix to results.html..." # Copy the model tooltip fixing code for all tabs to a new JS file cat > static/js/model-tooltips.js << EOF document.addEventListener('DOMContentLoaded', function() { // Fix model tooltips in all tabs function fixAllModelTooltips() { console.log("Fixing model tooltips in all tabs"); // Find all model name cells (first column in all tables) const modelCells = document.querySelectorAll('td:first-child'); // Process each model cell modelCells.forEach(cell => { // Skip cells that already have tooltips if (cell.classList.contains('tooltip-trigger')) { return; } // Get the model name const modelName = cell.textContent.trim(); // Add tooltip-trigger class and position style cell.classList.add('tooltip-trigger'); cell.style.position = 'relative'; // Add data-title attribute with the model name cell.setAttribute('data-title', modelName); // Add descriptive tooltip based on model let tooltipText = ""; // Set descriptive tooltip based on model name if (modelName.includes("GPT-4o")) { tooltipText = "OpenAI's advanced proprietary closed-source model. One of the top performers across most tasks."; } else if (modelName.includes("o1-mini")) { tooltipText = "Compact proprietary model from OpenAI. Shows strong performance on causal analysis tasks."; } else if (modelName.includes("Claude 3.5 Sonnet")) { tooltipText = "Anthropic's model optimized for advanced reasoning. Strong performer on text classification and summarization."; } else if (modelName.includes("Claude 3 Haiku")) { tooltipText = "Anthropic's smaller, efficiency-focused model in the Claude series."; } else if (modelName.includes("Gemini 1.5")) { tooltipText = "Google's highly capable proprietary model."; } else if (modelName.includes("Command R 7B")) { tooltipText = "A 7-billion parameter model from Cohere focused on instruction-following."; } else if (modelName.includes("Command R +")) { tooltipText = "An improved version of Cohere's Command R model."; } else if (modelName.includes("DeepSeek R1")) { tooltipText = "Open-weight model from DeepSeek AI with 671B parameters (MoE architecture). One of the top performers in the benchmark."; } else if (modelName.includes("DeepSeek-V3") || modelName.includes("DeepSeek V3")) { tooltipText = "Open-weight model from DeepSeek AI with 685B parameters (MoE architecture)."; } else if (modelName.includes("DeepSeek LLM")) { tooltipText = "A 67-billion parameter chat-optimized model from DeepSeek AI."; } else if (modelName.includes("Llama 3 70B")) { tooltipText = "Meta's 70-billion parameter dense model, optimized for instruction-following tasks."; } else if (modelName.includes("Llama 3 8B")) { tooltipText = "Meta's 8-billion parameter efficient model variant."; } else if (modelName.includes("DBRX")) { tooltipText = "Databricks' 132B parameter MoE model."; } else if (modelName.includes("Mixtral-8x22B")) { tooltipText = "141B parameter MoE model from Mistral AI with eight 22-billion parameter sub-models."; } else if (modelName.includes("Mixtral-8x7B")) { tooltipText = "46.7B parameter MoE model from Mistral AI with eight 7-billion parameter sub-models."; } else if (modelName.includes("Mistral")) { tooltipText = "A 7-billion parameter instruction-tuned model from Mistral AI."; } else if (modelName.includes("Qwen 2")) { tooltipText = "Alibaba's 72-billion parameter instruction-following model."; } else if (modelName.includes("WizardLM")) { tooltipText = "A 176B parameter MoE model focused on complex reasoning."; } else if (modelName.includes("Gemma 2 27B")) { tooltipText = "Google's open-weight 27B parameter model."; } else if (modelName.includes("Gemma 2 9B")) { tooltipText = "Google's open-weight 9B parameter efficient model."; } else if (modelName.includes("QwQ-32B")) { tooltipText = "Qwen's experimental MoE model with 32B parameters."; } else if (modelName.includes("Jamba 1.5 Mini")) { tooltipText = "A compact variant of the Jamba model series."; } else if (modelName.includes("Jamba 1.5 Large")) { tooltipText = "An expanded variant of the Jamba model series."; } else { tooltipText = "A large language model from the FLaME evaluation benchmark."; } // Set the tooltip cell.setAttribute('data-tooltip', tooltipText); }); // After adding attributes, run the tooltip fix if (window.fixProblemTooltips) { window.fixProblemTooltips(); } } // Run on page load setTimeout(fixAllModelTooltips, 500); // Run when tabs are clicked const tabs = document.querySelectorAll('.tabs li'); tabs.forEach(tab => { tab.addEventListener('click', () => { // Give time for content to be displayed setTimeout(fixAllModelTooltips, 200); }); }); }); EOF # Add script inclusion to results.html if not already there if ! grep -q "model-tooltips.js" "results.html"; then # Add the script link before the closing body tag sed -i 's/<\/body>/