Spaces:
Running
Running
<link rel="stylesheet" href="static/css/tooltips.css"> | |
<style> | |
.tooltip-right:hover::after { | |
left: auto \ ; | |
right: 100% \ ; | |
margin-left: 0 \ ; | |
margin-right: 10px \ ; | |
} | |
</style> | |
<!-- Information Retrieval --> | |
<div id="information-retrieval" class="tab-content"> | |
<h2 class="title is-4">Information Retrieval Task Results</h2> | |
<div class="results-table"> | |
<table class="table is-bordered is-striped is-narrow is-hoverable is-fullwidth"> | |
<thead> | |
<tr> | |
<th rowspan="2">Model</th> | |
<th colspan="4" class="has-text-centered tooltip-trigger" data-title="FiNER-ORD" data-tooltip="FiNER-ORD is a manually annotated named entity recognition dataset comprising financial news articles with detailed entity annotations. The task requires identifying and correctly classifying person, location, and organization entities in financial contexts.">FiNER-ORD</th> | |
<th colspan="4" class="has-text-centered tooltip-trigger" data-title="FinRED" data-tooltip="FinRED is a specialized relation extraction dataset created from financial news and earnings call transcripts using distance supervision based on Wikidata triplets. The task involves identifying and extracting financial relationships between entities to understand connections in financial contexts.">FinRED</th> | |
<th colspan="4" class="has-text-centered tooltip-trigger tooltip-right" style="position: relative;" data-title="ReFiND" data-tooltip="ReFiND is a comprehensive relation extraction dataset containing approximately 29,000 annotated instances with 22 distinct relation types across 8 entity pair categories from various financial documents. The task requires identifying specific relationships between financial entities in complex documents like SEC filings.">ReFiND</th> | |
<th colspan="4" class="has-text-centered tooltip-trigger tooltip-right" data-title="FNXL" data-tooltip="FNXL contains 79,088 sentences with 142,922 annotated numerals extracted from SEC 10-K reports and categorized under 2,794 distinct numerical labels. The information extraction task requires identifying, categorizing and understanding the financial significance of numerical entities in regulatory filings.">FNXL</th> | |
<th colspan="4" class="has-text-centered tooltip-trigger tooltip-right" data-title="FinEntity" data-tooltip="FinEntity consists of 979 financial news paragraphs containing 2,131 manually-annotated financial entities with sentiment classifications. The task involves identifying companies and asset classes in financial texts while determining the associated sentiment expressed toward each entity.">FinEntity</th> | |
</tr> | |
<tr> | |
<th class="has-text-centered">Precision</th> | |
<th class="has-text-centered">Recall</th> | |
<th class="has-text-centered">F1</th> | |
<th class="has-text-centered">Accuracy</th> | |
<th class="has-text-centered">Accuracy</th> | |
<th class="has-text-centered">Precision</th> | |
<th class="has-text-centered">Recall</th> | |
<th class="has-text-centered">F1</th> | |
<th class="has-text-centered">Accuracy</th> | |
<th class="has-text-centered">Precision</th> | |
<th class="has-text-centered">Recall</th> | |
<th class="has-text-centered">F1</th> | |
<th class="has-text-centered">Precision</th> | |
<th class="has-text-centered">Recall</th> | |
<th class="has-text-centered">F1</th> | |
<th class="has-text-centered">Accuracy</th> | |
<th class="has-text-centered">Precision</th> | |
<th class="has-text-centered">Recall</th> | |
<th class="has-text-centered">Accuracy</th> | |
<th class="has-text-centered">F1</th> | |
</tr> | |
</thead> | |
<tbody> | |
<tr> | |
<td class="tooltip-trigger" data-title="Llama 3 70B Instruct" data-tooltip="Meta's advanced 70 billion parameter dense language model optimized for instruction-following tasks. Available through Together AI and notable for complex reasoning capabilities.">Llama 3 70B Instruct</td> | |
<td class="has-text-centered">0.715</td> | |
<td class="has-text-centered">0.693</td> | |
<td class="has-text-centered">0.701</td> | |
<td class="has-text-centered">0.911</td> | |
<td class="has-text-centered">0.314</td> | |
<td class="has-text-centered performance-medium">0.454</td> | |
<td class="has-text-centered">0.314</td> | |
<td class="has-text-centered">0.332</td> | |
<td class="has-text-centered">0.879</td> | |
<td class="has-text-centered">0.904</td> | |
<td class="has-text-centered">0.879</td> | |
<td class="has-text-centered">0.883</td> | |
<td class="has-text-centered">0.015</td> | |
<td class="has-text-centered">0.030</td> | |
<td class="has-text-centered">0.020</td> | |
<td class="has-text-centered">0.010</td> | |
<td class="has-text-centered">0.474</td> | |
<td class="has-text-centered">0.485</td> | |
<td class="has-text-centered">0.485</td> | |
<td class="has-text-centered">0.469</td> | |
</tr> | |
<tr> | |
<td class="tooltip-trigger" data-title="Llama 3 8B Instruct" data-tooltip="Meta's efficient 8 billion parameter language model optimized for instruction-following. Balances performance and efficiency for financial tasks with reasonable reasoning capabilities.">Llama 3 8B Instruct</td> | |
<td class="has-text-centered">0.581</td> | |
<td class="has-text-centered">0.558</td> | |
<td class="has-text-centered">0.565</td> | |
<td class="has-text-centered">0.854</td> | |
<td class="has-text-centered">0.296</td> | |
<td class="has-text-centered">0.357</td> | |
<td class="has-text-centered">0.296</td> | |
<td class="has-text-centered">0.289</td> | |
<td class="has-text-centered">0.723</td> | |
<td class="has-text-centered">0.755</td> | |
<td class="has-text-centered">0.723</td> | |
<td class="has-text-centered">0.705</td> | |
<td class="has-text-centered">0.003</td> | |
<td class="has-text-centered">0.004</td> | |
<td class="has-text-centered">0.003</td> | |
<td class="has-text-centered">0.002</td> | |
<td class="has-text-centered">0.301</td> | |
<td class="has-text-centered">0.478</td> | |
<td class="has-text-centered">0.478</td> | |
<td class="has-text-centered">0.350</td> | |
</tr> | |
<tr> | |
<td class="tooltip-trigger" data-title="DBRX Instruct" data-tooltip="Databricks' 132 billion parameter Mixture of Experts (MoE) model focused on advanced reasoning. Demonstrates competitive performance on financial tasks with strong text processing capabilities.">DBRX Instruct</td> | |
<td class="has-text-centered">0.516</td> | |
<td class="has-text-centered">0.476</td> | |
<td class="has-text-centered">0.489</td> | |
<td class="has-text-centered">0.802</td> | |
<td class="has-text-centered">0.329</td> | |
<td class="has-text-centered">0.371</td> | |
<td class="has-text-centered">0.329</td> | |
<td class="has-text-centered">0.304</td> | |
<td class="has-text-centered">0.766</td> | |
<td class="has-text-centered">0.825</td> | |
<td class="has-text-centered">0.766</td> | |
<td class="has-text-centered">0.778</td> | |
<td class="has-text-centered">0.008</td> | |
<td class="has-text-centered">0.011</td> | |
<td class="has-text-centered">0.009</td> | |
<td class="has-text-centered">0.005</td> | |
<td class="has-text-centered">0.004</td> | |
<td class="has-text-centered">0.014</td> | |
<td class="has-text-centered">0.014</td> | |
<td class="has-text-centered">0.006</td> | |
</tr> | |
<tr> | |
<td class="tooltip-trigger" data-title="DeepSeek LLM (67B)" data-tooltip="DeepSeek's 67 billion parameter model optimized for chat applications. Balances performance and efficiency across financial tasks with solid reasoning capabilities.">DeepSeek LLM (67B)</td> | |
<td class="has-text-centered">0.752</td> | |
<td class="has-text-centered">0.742</td> | |
<td class="has-text-centered">0.745</td> | |
<td class="has-text-centered">0.917</td> | |
<td class="has-text-centered">0.344</td> | |
<td class="has-text-centered">0.403</td> | |
<td class="has-text-centered">0.344</td> | |
<td class="has-text-centered">0.334</td> | |
<td class="has-text-centered">0.874</td> | |
<td class="has-text-centered">0.890</td> | |
<td class="has-text-centered">0.874</td> | |
<td class="has-text-centered">0.879</td> | |
<td class="has-text-centered">0.005</td> | |
<td class="has-text-centered">0.009</td> | |
<td class="has-text-centered">0.007</td> | |
<td class="has-text-centered">0.003</td> | |
<td class="has-text-centered">0.456</td> | |
<td class="has-text-centered">0.405</td> | |
<td class="has-text-centered">0.405</td> | |
<td class="has-text-centered">0.416</td> | |
</tr> | |
<tr> | |
<td class="tooltip-trigger" data-title="Gemma 2 27B" data-tooltip="Google's open-weight 27 billion parameter model optimized for reasoning tasks. Balances performance and efficiency across financial domains with strong instruction-following.">Gemma 2 27B</td> | |
<td class="has-text-centered">0.772</td> | |
<td class="has-text-centered">0.754</td> | |
<td class="has-text-centered">0.761</td> | |
<td class="has-text-centered performance-medium">0.923</td> | |
<td class="has-text-centered">0.352</td> | |
<td class="has-text-centered">0.437</td> | |
<td class="has-text-centered">0.352</td> | |
<td class="has-text-centered">0.356</td> | |
<td class="has-text-centered">0.897</td> | |
<td class="has-text-centered">0.914</td> | |
<td class="has-text-centered">0.897</td> | |
<td class="has-text-centered">0.902</td> | |
<td class="has-text-centered">0.005</td> | |
<td class="has-text-centered">0.008</td> | |
<td class="has-text-centered">0.006</td> | |
<td class="has-text-centered">0.003</td> | |
<td class="has-text-centered">0.320</td> | |
<td class="has-text-centered">0.295</td> | |
<td class="has-text-centered">0.295</td> | |
<td class="has-text-centered">0.298</td> | |
</tr> | |
<tr> | |
<td class="tooltip-trigger" data-title="Gemma 2 9B" data-tooltip="Google's efficient open-weight 9 billion parameter model. Demonstrates good performance on financial tasks relative to its smaller size.">Gemma 2 9B</td> | |
<td class="has-text-centered">0.665</td> | |
<td class="has-text-centered">0.643</td> | |
<td class="has-text-centered">0.651</td> | |
<td class="has-text-centered">0.886</td> | |
<td class="has-text-centered">0.336</td> | |
<td class="has-text-centered">0.373</td> | |
<td class="has-text-centered">0.336</td> | |
<td class="has-text-centered">0.331</td> | |
<td class="has-text-centered">0.885</td> | |
<td class="has-text-centered">0.902</td> | |
<td class="has-text-centered">0.885</td> | |
<td class="has-text-centered">0.892</td> | |
<td class="has-text-centered">0.004</td> | |
<td class="has-text-centered">0.008</td> | |
<td class="has-text-centered">0.005</td> | |
<td class="has-text-centered">0.003</td> | |
<td class="has-text-centered">0.348</td> | |
<td class="has-text-centered">0.419</td> | |
<td class="has-text-centered">0.419</td> | |
<td class="has-text-centered">0.367</td> | |
</tr> | |
<tr> | |
<td class="tooltip-trigger" data-title="Mistral (7B) Instruct v0.3" data-tooltip="Mistral AI's 7 billion parameter instruction-tuned model. Demonstrates impressive efficiency with reasonable performance on financial tasks despite its smaller size.">Mistral (7B) Instruct v0.3</td> | |
<td class="has-text-centered">0.540</td> | |
<td class="has-text-centered">0.522</td> | |
<td class="has-text-centered">0.526</td> | |
<td class="has-text-centered">0.806</td> | |
<td class="has-text-centered">0.278</td> | |
<td class="has-text-centered">0.383</td> | |
<td class="has-text-centered">0.278</td> | |
<td class="has-text-centered">0.276</td> | |
<td class="has-text-centered">0.767</td> | |
<td class="has-text-centered">0.817</td> | |
<td class="has-text-centered">0.767</td> | |
<td class="has-text-centered">0.771</td> | |
<td class="has-text-centered">0.004</td> | |
<td class="has-text-centered">0.006</td> | |
<td class="has-text-centered">0.004</td> | |
<td class="has-text-centered">0.002</td> | |
<td class="has-text-centered">0.337</td> | |
<td class="has-text-centered">0.477</td> | |
<td class="has-text-centered">0.477</td> | |
<td class="has-text-centered">0.368</td> | |
</tr> | |
<tr> | |
<td class="tooltip-trigger" data-title="Mixtral-8x22B Instruct" data-tooltip="Mistral AI's 141 billion parameter MoE model with eight 22B expert networks. Features robust reasoning capabilities for financial tasks with strong instruction-following performance.">Mixtral-8x22B Instruct</td> | |
<td class="has-text-centered">0.653</td> | |
<td class="has-text-centered">0.625</td> | |
<td class="has-text-centered">0.635</td> | |
<td class="has-text-centered">0.870</td> | |
<td class="has-text-centered">0.381</td> | |
<td class="has-text-centered">0.414</td> | |
<td class="has-text-centered">0.381</td> | |
<td class="has-text-centered">0.367</td> | |
<td class="has-text-centered">0.807</td> | |
<td class="has-text-centered">0.847</td> | |
<td class="has-text-centered">0.807</td> | |
<td class="has-text-centered">0.811</td> | |
<td class="has-text-centered">0.010</td> | |
<td class="has-text-centered">0.008</td> | |
<td class="has-text-centered">0.009</td> | |
<td class="has-text-centered">0.005</td> | |
<td class="has-text-centered">0.428</td> | |
<td class="has-text-centered">0.481</td> | |
<td class="has-text-centered">0.481</td> | |
<td class="has-text-centered">0.435</td> | |
</tr> | |
<tr> | |
<td class="tooltip-trigger" data-title="Mixtral-8x7B Instruct" data-tooltip="Mistral AI's 47 billion parameter MoE model with eight 7B expert networks. Balances efficiency and performance with reasonable financial reasoning capabilities.">Mixtral-8x7B Instruct</td> | |
<td class="has-text-centered">0.613</td> | |
<td class="has-text-centered">0.591</td> | |
<td class="has-text-centered">0.598</td> | |
<td class="has-text-centered">0.875</td> | |
<td class="has-text-centered">0.291</td> | |
<td class="has-text-centered">0.376</td> | |
<td class="has-text-centered">0.291</td> | |
<td class="has-text-centered">0.282</td> | |
<td class="has-text-centered">0.840</td> | |
<td class="has-text-centered">0.863</td> | |
<td class="has-text-centered">0.840</td> | |
<td class="has-text-centered">0.845</td> | |
<td class="has-text-centered">0.007</td> | |
<td class="has-text-centered">0.012</td> | |
<td class="has-text-centered">0.009</td> | |
<td class="has-text-centered">0.005</td> | |
<td class="has-text-centered">0.251</td> | |
<td class="has-text-centered">0.324</td> | |
<td class="has-text-centered">0.324</td> | |
<td class="has-text-centered">0.267</td> | |
</tr> | |
<tr> | |
<td class="tooltip-trigger" data-title="Qwen 2 Instruct (72B)" data-tooltip="Alibaba's 72 billion parameter instruction-following model optimized for reasoning tasks. Features strong performance on financial domains with advanced text processing capabilities.">Qwen 2 Instruct (72B)</td> | |
<td class="has-text-centered">0.766</td> | |
<td class="has-text-centered">0.742</td> | |
<td class="has-text-centered">0.748</td> | |
<td class="has-text-centered">0.899</td> | |
<td class="has-text-centered">0.365</td> | |
<td class="has-text-centered">0.407</td> | |
<td class="has-text-centered">0.365</td> | |
<td class="has-text-centered">0.348</td> | |
<td class="has-text-centered">0.850</td> | |
<td class="has-text-centered">0.881</td> | |
<td class="has-text-centered">0.850</td> | |
<td class="has-text-centered">0.854</td> | |
<td class="has-text-centered">0.010</td> | |
<td class="has-text-centered">0.016</td> | |
<td class="has-text-centered">0.012</td> | |
<td class="has-text-centered">0.006</td> | |
<td class="has-text-centered">0.468</td> | |
<td class="has-text-centered">0.530</td> | |
<td class="has-text-centered">0.530</td> | |
<td class="has-text-centered">0.483</td> | |
</tr> | |
<tr> | |
<td class="tooltip-trigger" data-title="WizardLM-2 8x22B" data-tooltip="A 176 billion parameter MoE model focused on complex reasoning. Designed for advanced instruction-following with strong capabilities across financial tasks.">WizardLM-2 8x22B</td> | |
<td class="has-text-centered">0.755</td> | |
<td class="has-text-centered">0.741</td> | |
<td class="has-text-centered">0.744</td> | |
<td class="has-text-centered">0.920</td> | |
<td class="has-text-centered">0.362</td> | |
<td class="has-text-centered">0.397</td> | |
<td class="has-text-centered">0.362</td> | |
<td class="has-text-centered">0.355</td> | |
<td class="has-text-centered">0.846</td> | |
<td class="has-text-centered">0.874</td> | |
<td class="has-text-centered">0.846</td> | |
<td class="has-text-centered">0.852</td> | |
<td class="has-text-centered">0.008</td> | |
<td class="has-text-centered">0.009</td> | |
<td class="has-text-centered">0.008</td> | |
<td class="has-text-centered">0.004</td> | |
<td class="has-text-centered">0.222</td> | |
<td class="has-text-centered">0.247</td> | |
<td class="has-text-centered">0.247</td> | |
<td class="has-text-centered">0.226</td> | |
</tr> | |
<tr> | |
<td class="tooltip-trigger" data-title="DeepSeek-V3" data-tooltip="DeepSeek's 685 billion parameter Mixture of Experts (MoE) model optimized for advanced reasoning. Strong performance on financial tasks with robust instruction-following capabilities.">DeepSeek-V3</td> | |
<td class="has-text-centered performance-medium">0.798</td> | |
<td class="has-text-centered performance-medium">0.787</td> | |
<td class="has-text-centered performance-medium">0.790</td> | |
<td class="has-text-centered performance-best">0.945</td> | |
<td class="has-text-centered performance-strong">0.450</td> | |
<td class="has-text-centered performance-strong">0.463</td> | |
<td class="has-text-centered performance-strong">0.450</td> | |
<td class="has-text-centered performance-strong">0.437</td> | |
<td class="has-text-centered">0.927</td> | |
<td class="has-text-centered performance-medium">0.943</td> | |
<td class="has-text-centered">0.927</td> | |
<td class="has-text-centered">0.934</td> | |
<td class="has-text-centered performance-strong">0.034</td> | |
<td class="has-text-centered performance-medium">0.067</td> | |
<td class="has-text-centered performance-medium">0.045</td> | |
<td class="has-text-centered performance-medium">0.023</td> | |
<td class="has-text-centered">0.563</td> | |
<td class="has-text-centered">0.544</td> | |
<td class="has-text-centered">0.544</td> | |
<td class="has-text-centered">0.549</td> | |
</tr> | |
<tr> | |
<td class="tooltip-trigger" data-title="DeepSeek R1" data-tooltip="DeepSeek's premium 671 billion parameter Mixture of Experts (MoE) model representing their most advanced offering. Designed for state-of-the-art performance across complex reasoning and financial tasks.">DeepSeek R1</td> | |
<td class="has-text-centered performance-best">0.813</td> | |
<td class="has-text-centered performance-best">0.805</td> | |
<td class="has-text-centered performance-best">0.807</td> | |
<td class="has-text-centered performance-strong">0.944</td> | |
<td class="has-text-centered performance-medium">0.412</td> | |
<td class="has-text-centered">0.424</td> | |
<td class="has-text-centered performance-medium">0.412</td> | |
<td class="has-text-centered">0.393</td> | |
<td class="has-text-centered performance-best">0.946</td> | |
<td class="has-text-centered performance-best">0.960</td> | |
<td class="has-text-centered performance-best">0.946</td> | |
<td class="has-text-centered performance-best">0.952</td> | |
<td class="has-text-centered performance-best">0.044</td> | |
<td class="has-text-centered performance-best">0.082</td> | |
<td class="has-text-centered performance-best">0.057</td> | |
<td class="has-text-centered performance-best">0.029</td> | |
<td class="has-text-centered performance-medium">0.600</td> | |
<td class="has-text-centered performance-medium">0.586</td> | |
<td class="has-text-centered performance-medium">0.586</td> | |
<td class="has-text-centered performance-medium">0.587</td> | |
</tr> | |
<tr> | |
<td class="tooltip-trigger" data-title="QwQ-32B-Preview" data-tooltip="Qwen's experimental 32 billion parameter MoE model focused on efficient computation. Features interesting performance characteristics on certain financial tasks.">QwQ-32B-Preview</td> | |
<td class="has-text-centered">0.695</td> | |
<td class="has-text-centered">0.681</td> | |
<td class="has-text-centered">0.685</td> | |
<td class="has-text-centered">0.907</td> | |
<td class="has-text-centered">0.278</td> | |
<td class="has-text-centered">0.396</td> | |
<td class="has-text-centered">0.278</td> | |
<td class="has-text-centered">0.270</td> | |
<td class="has-text-centered">0.680</td> | |
<td class="has-text-centered">0.770</td> | |
<td class="has-text-centered">0.680</td> | |
<td class="has-text-centered">0.656</td> | |
<td class="has-text-centered">0.001</td> | |
<td class="has-text-centered">0.001</td> | |
<td class="has-text-centered">0.001</td> | |
<td class="has-text-centered">0.000</td> | |
<td class="has-text-centered">0.005</td> | |
<td class="has-text-centered">0.005</td> | |
<td class="has-text-centered">0.005</td> | |
<td class="has-text-centered">0.005</td> | |
</tr> | |
<tr> | |
<td class="tooltip-trigger" data-title="Jamba 1.5 Mini" data-tooltip="A compact variant in the Jamba model series focused on efficiency. Balances performance and computational requirements for financial tasks.">Jamba 1.5 Mini</td> | |
<td class="has-text-centered">0.564</td> | |
<td class="has-text-centered">0.556</td> | |
<td class="has-text-centered">0.552</td> | |
<td class="has-text-centered">0.818</td> | |
<td class="has-text-centered">0.308</td> | |
<td class="has-text-centered">0.450</td> | |
<td class="has-text-centered">0.308</td> | |
<td class="has-text-centered">0.284</td> | |
<td class="has-text-centered">0.830</td> | |
<td class="has-text-centered">0.864</td> | |
<td class="has-text-centered">0.830</td> | |
<td class="has-text-centered">0.844</td> | |
<td class="has-text-centered">0.004</td> | |
<td class="has-text-centered">0.006</td> | |
<td class="has-text-centered">0.005</td> | |
<td class="has-text-centered">0.003</td> | |
<td class="has-text-centered">0.119</td> | |
<td class="has-text-centered">0.182</td> | |
<td class="has-text-centered">0.182</td> | |
<td class="has-text-centered">0.132</td> | |
</tr> | |
<tr> | |
<td class="tooltip-trigger" data-title="Jamba 1.5 Large" data-tooltip="An expanded variant in the Jamba model series with enhanced capabilities. Features stronger reasoning for financial tasks than its smaller counterpart.">Jamba 1.5 Large</td> | |
<td class="has-text-centered">0.707</td> | |
<td class="has-text-centered">0.687</td> | |
<td class="has-text-centered">0.693</td> | |
<td class="has-text-centered">0.883</td> | |
<td class="has-text-centered">0.341</td> | |
<td class="has-text-centered">0.452</td> | |
<td class="has-text-centered">0.341</td> | |
<td class="has-text-centered">0.341</td> | |
<td class="has-text-centered">0.856</td> | |
<td class="has-text-centered">0.890</td> | |
<td class="has-text-centered">0.856</td> | |
<td class="has-text-centered">0.862</td> | |
<td class="has-text-centered">0.004</td> | |
<td class="has-text-centered">0.005</td> | |
<td class="has-text-centered">0.005</td> | |
<td class="has-text-centered">0.002</td> | |
<td class="has-text-centered">0.403</td> | |
<td class="has-text-centered">0.414</td> | |
<td class="has-text-centered">0.414</td> | |
<td class="has-text-centered">0.397</td> | |
</tr> | |
<tr> | |
<td class="tooltip-trigger" data-title="Claude 3.5 Sonnet" data-tooltip="Anthropic's advanced proprietary language model optimized for complex reasoning and instruction-following. Features enhanced performance on financial tasks with strong text processing capabilities.">Claude 3.5 Sonnet</td> | |
<td class="has-text-centered performance-strong">0.811</td> | |
<td class="has-text-centered performance-strong">0.794</td> | |
<td class="has-text-centered performance-strong">0.799</td> | |
<td class="has-text-centered">0.922</td> | |
<td class="has-text-centered performance-best">0.455</td> | |
<td class="has-text-centered performance-best">0.465</td> | |
<td class="has-text-centered performance-best">0.455</td> | |
<td class="has-text-centered performance-best">0.439</td> | |
<td class="has-text-centered">0.873</td> | |
<td class="has-text-centered">0.927</td> | |
<td class="has-text-centered">0.873</td> | |
<td class="has-text-centered">0.891</td> | |
<td class="has-text-centered performance-strong">0.034</td> | |
<td class="has-text-centered performance-strong">0.080</td> | |
<td class="has-text-centered performance-strong">0.047</td> | |
<td class="has-text-centered performance-strong">0.024</td> | |
<td class="has-text-centered performance-strong">0.658</td> | |
<td class="has-text-centered performance-strong">0.668</td> | |
<td class="has-text-centered performance-strong">0.668</td> | |
<td class="has-text-centered performance-strong">0.655</td> | |
</tr> | |
<tr> | |
<td class="tooltip-trigger" data-title="Claude 3 Haiku" data-tooltip="Anthropic's smaller efficiency-focused model in the Claude family. Designed for speed and lower computational requirements while maintaining reasonable performance on financial tasks.">Claude 3 Haiku</td> | |
<td class="has-text-centered">0.732</td> | |
<td class="has-text-centered">0.700</td> | |
<td class="has-text-centered">0.711</td> | |
<td class="has-text-centered">0.895</td> | |
<td class="has-text-centered">0.294</td> | |
<td class="has-text-centered">0.330</td> | |
<td class="has-text-centered">0.294</td> | |
<td class="has-text-centered">0.285</td> | |
<td class="has-text-centered">0.879</td> | |
<td class="has-text-centered">0.917</td> | |
<td class="has-text-centered">0.879</td> | |
<td class="has-text-centered">0.883</td> | |
<td class="has-text-centered">0.011</td> | |
<td class="has-text-centered">0.022</td> | |
<td class="has-text-centered">0.015</td> | |
<td class="has-text-centered">0.008</td> | |
<td class="has-text-centered">0.498</td> | |
<td class="has-text-centered">0.517</td> | |
<td class="has-text-centered">0.517</td> | |
<td class="has-text-centered">0.494</td> | |
</tr> | |
<tr> | |
<td class="tooltip-trigger" data-title="Cohere Command R +" data-tooltip="Cohere's enhanced command model with improved instruction-following capabilities. Features advanced reasoning for financial domains with stronger performance than its smaller counterpart.">Cohere Command R +</td> | |
<td class="has-text-centered">0.769</td> | |
<td class="has-text-centered">0.750</td> | |
<td class="has-text-centered">0.756</td> | |
<td class="has-text-centered">0.902</td> | |
<td class="has-text-centered">0.353</td> | |
<td class="has-text-centered">0.405</td> | |
<td class="has-text-centered">0.353</td> | |
<td class="has-text-centered">0.333</td> | |
<td class="has-text-centered">0.917</td> | |
<td class="has-text-centered">0.930</td> | |
<td class="has-text-centered">0.917</td> | |
<td class="has-text-centered">0.922</td> | |
<td class="has-text-centered">0.016</td> | |
<td class="has-text-centered">0.032</td> | |
<td class="has-text-centered">0.021</td> | |
<td class="has-text-centered">0.011</td> | |
<td class="has-text-centered">0.462</td> | |
<td class="has-text-centered">0.459</td> | |
<td class="has-text-centered">0.459</td> | |
<td class="has-text-centered">0.452</td> | |
</tr> | |
<tr> | |
<td class="tooltip-trigger" data-title="Google Gemini 1.5 Pro" data-tooltip="Google's advanced proprietary multimodal model designed for complex reasoning and instruction-following tasks. Features strong performance across financial domains with advanced reasoning capabilities.">Google Gemini 1.5 Pro</td> | |
<td class="has-text-centered">0.728</td> | |
<td class="has-text-centered">0.705</td> | |
<td class="has-text-centered">0.712</td> | |
<td class="has-text-centered">0.891</td> | |
<td class="has-text-centered">0.373</td> | |
<td class="has-text-centered">0.436</td> | |
<td class="has-text-centered">0.373</td> | |
<td class="has-text-centered">0.374</td> | |
<td class="has-text-centered performance-strong">0.934</td> | |
<td class="has-text-centered performance-strong">0.955</td> | |
<td class="has-text-centered performance-strong">0.934</td> | |
<td class="has-text-centered performance-strong">0.944</td> | |
<td class="has-text-centered">0.014</td> | |
<td class="has-text-centered">0.028</td> | |
<td class="has-text-centered">0.019</td> | |
<td class="has-text-centered">0.010</td> | |
<td class="has-text-centered">0.399</td> | |
<td class="has-text-centered">0.400</td> | |
<td class="has-text-centered">0.400</td> | |
<td class="has-text-centered">0.393</td> | |
</tr> | |
<tr> | |
<td class="tooltip-trigger" data-title="OpenAI gpt-4o" data-tooltip="OpenAI's flagship multimodal model optimized for a balance of quality and speed. Features strong performance across diverse tasks with capabilities for complex financial reasoning and instruction following.">OpenAI gpt-4o</td> | |
<td class="has-text-centered">0.778</td> | |
<td class="has-text-centered">0.760</td> | |
<td class="has-text-centered">0.766</td> | |
<td class="has-text-centered">0.911</td> | |
<td class="has-text-centered">0.402</td> | |
<td class="has-text-centered">0.445</td> | |
<td class="has-text-centered">0.402</td> | |
<td class="has-text-centered">0.399</td> | |
<td class="has-text-centered performance-medium">0.931</td> | |
<td class="has-text-centered performance-strong">0.955</td> | |
<td class="has-text-centered performance-medium">0.931</td> | |
<td class="has-text-centered performance-medium">0.942</td> | |
<td class="has-text-centered performance-medium">0.027</td> | |
<td class="has-text-centered">0.056</td> | |
<td class="has-text-centered">0.037</td> | |
<td class="has-text-centered">0.019</td> | |
<td class="has-text-centered">0.537</td> | |
<td class="has-text-centered">0.517</td> | |
<td class="has-text-centered">0.517</td> | |
<td class="has-text-centered">0.523</td> | |
</tr> | |
<tr> | |
<td class="tooltip-trigger" data-title="OpenAI o1-mini" data-tooltip="OpenAI's smaller advanced model balancing efficiency and performance. Demonstrates surprisingly strong results on financial tasks despite its reduced parameter count.">OpenAI o1-mini</td> | |
<td class="has-text-centered">0.772</td> | |
<td class="has-text-centered">0.755</td> | |
<td class="has-text-centered">0.761</td> | |
<td class="has-text-centered">0.922</td> | |
<td class="has-text-centered">0.407</td> | |
<td class="has-text-centered">0.444</td> | |
<td class="has-text-centered">0.407</td> | |
<td class="has-text-centered performance-medium">0.403</td> | |
<td class="has-text-centered">0.867</td> | |
<td class="has-text-centered">0.900</td> | |
<td class="has-text-centered">0.867</td> | |
<td class="has-text-centered">0.876</td> | |
<td class="has-text-centered">0.007</td> | |
<td class="has-text-centered">0.015</td> | |
<td class="has-text-centered">0.010</td> | |
<td class="has-text-centered">0.005</td> | |
<td class="has-text-centered performance-best">0.661</td> | |
<td class="has-text-centered performance-best">0.681</td> | |
<td class="has-text-centered performance-best">0.681</td> | |
<td class="has-text-centered performance-best">0.662</td> | |
</tr> | |
</tbody> | |
</table> | |
<div class="content is-small mt-4"> | |
<p><strong>Note:</strong> Color highlighting indicates performance ranking: | |
<span class="performance-best"> Best </span>, | |
<span class="performance-medium"> Strong </span>, | |
<span class="performance-low"> Good </span> | |
</p> | |
</div> | |
</div> | |
</div><script src="static/js/tooltips.js"></script> | |
<script src="static/js/fixed-tooltips.js"></script> | |