|
import numpy as np |
|
from functools import partial |
|
|
|
import random |
|
from lighteval.tasks.lighteval_task import LightevalTaskConfig |
|
from lighteval.tasks.requests import Doc |
|
from lighteval.metrics.metrics import Metrics, SampleLevelMetric, MetricCategory, MetricUseCase, ExactMatches |
|
from lighteval.metrics.dynamic_metrics import ( |
|
loglikelihood_acc_metric, |
|
multilingual_quasi_exact_match_metric, |
|
multilingual_quasi_f1_score_metric, |
|
) |
|
from lighteval.metrics.normalizations import LogProbCharNorm, LogProbPMINorm, LogProbTokenNorm |
|
from lighteval.tasks.default_prompts import LETTER_INDICES |
|
import lighteval.tasks.default_prompts as prompt |
|
from lighteval.tasks.lighteval_task import LightevalTaskConfig |
|
from lighteval.tasks.multilingual.adapters import ( |
|
agieval_adapter, |
|
alghafa_adapter, |
|
ceval_adapter, |
|
get_m3exam_adapter, |
|
get_mkqa_adapter, |
|
sciqa_adapter, |
|
thai_exams_adapter, |
|
winogrand_adapter, |
|
xcodah_adapter, |
|
) |
|
from lighteval.tasks.multilingual.utils.task_utils import get_metrics_for_formulation, normalize_subset |
|
from lighteval.tasks.templates.boolq import get_boolq_prompt_function |
|
from lighteval.tasks.templates.continuation import get_continuation_prompt_function |
|
from lighteval.tasks.templates.copa import get_copa_prompt_function |
|
from lighteval.tasks.templates.hellaswag import get_hellaswag_prompt_function |
|
from lighteval.tasks.templates.multichoice import get_mcq_prompt_function |
|
from lighteval.tasks.templates.nli import get_nli_prompt_function |
|
from lighteval.tasks.templates.qa import get_qa_prompt_function |
|
from lighteval.tasks.templates.utils.formulation import ( |
|
CFFormulation, |
|
HybridFormulation, |
|
MCFFormulation, |
|
) |
|
from lighteval.utils.language import Language |
|
|
|
from lighteval.tasks.multilingual.tasks import TASKS_TABLE as ML_TASKS_TABLE |
|
from .math_utils import parse_math_answer |
|
|
|
TASKS_TABLE = [] |
|
|
|
TASKS_TABLE.extend(ML_TASKS_TABLE) |
|
|
|
def bbh_prompt(line, task_name: str = None): |
|
return Doc( |
|
task_name=task_name, |
|
query="Question: " + line["input"] + "\nAnswer: ", |
|
choices=[line["target"]], |
|
gold_index=0, |
|
) |
|
|
|
def prompt_math(line, task_name: str = None): |
|
return Doc( |
|
task_name=task_name, |
|
query=f"{line['problem']}\nPlease reason step by step, and put your final answer within \\boxed{{}}.\n\n", |
|
gold_index=0, |
|
choices=[f"{line['solution']}\n\n"], |
|
) |
|
|
|
def gpqa(line, task_name: str = None): |
|
|
|
GPQA_QUERY_TEMPLATE = """ |
|
Answer the following multiple choice question. The last line of your response should be of the following format: 'Answer: $LETTER' (without quotes) where LETTER is one of ABCD. Think step by step before answering. |
|
|
|
{Question} |
|
|
|
A) {A} |
|
B) {B} |
|
C) {C} |
|
D) {D} |
|
""".strip() |
|
gold_index = random.randint(0, 3) |
|
choices = [line["Incorrect Answer 1"], line["Incorrect Answer 2"], line["Incorrect Answer 3"]] |
|
choices.insert(gold_index, line["Correct Answer"]) |
|
|
|
query = GPQA_QUERY_TEMPLATE.format( |
|
A=choices[0], B=choices[1], C=choices[2], D=choices[3], Question=line["Question"] |
|
) |
|
|
|
return Doc( |
|
task_name=task_name, |
|
query=query, |
|
choices=LETTER_INDICES[: len(choices)], |
|
gold_index=gold_index, |
|
instruction=query, |
|
) |
|
|
|
arc_tasks = [ |
|
LightevalTaskConfig( |
|
name=f"arc_{formulation.name.lower()}:{subset.lower()}", |
|
prompt_function=get_mcq_prompt_function( |
|
Language.ENGLISH, |
|
lambda line: { |
|
"question": line["question"], |
|
"choices": line["choices"]["text"], |
|
"gold_idx": int(line["answerKey"]) - 1 |
|
if line["answerKey"].isdigit() |
|
else LETTER_INDICES.index(line["answerKey"]), |
|
}, |
|
formulation=formulation, |
|
), |
|
suite=("custom",), |
|
hf_repo="allenai/ai2_arc", |
|
hf_subset=f"ARC-{subset}", |
|
hf_revision="210d026faf9955653af8916fad021475a3f00453", |
|
trust_dataset=True, |
|
evaluation_splits=("test",), |
|
few_shots_split="train", |
|
metric=get_metrics_for_formulation( |
|
formulation, |
|
[ |
|
loglikelihood_acc_metric(normalization=LogProbTokenNorm()), |
|
loglikelihood_acc_metric(normalization=LogProbCharNorm()), |
|
loglikelihood_acc_metric(normalization=LogProbPMINorm()), |
|
], |
|
), |
|
) |
|
for subset in ["Easy", "Challenge"] |
|
for formulation in [ |
|
MCFFormulation(), |
|
CFFormulation(), |
|
HybridFormulation(), |
|
] |
|
] |
|
|
|
TASKS_TABLE.extend(arc_tasks) |
|
|
|
hellaswag_tasks = [ |
|
LightevalTaskConfig( |
|
name=f"hellaswag_{formulation.name.lower()}", |
|
suite=["custom"], |
|
prompt_function=get_hellaswag_prompt_function( |
|
language=Language.ENGLISH, |
|
adapter=lambda line: { |
|
"activity_label": line["activity_label"], |
|
"ctx_a": line["ctx_a"], |
|
"ctx_b": line["ctx_b"], |
|
"continuations": line["endings"], |
|
"gold_idx": int(line["label"]), |
|
}, |
|
formulation=formulation, |
|
), |
|
hf_repo="Rowan/hellaswag", |
|
hf_subset="default", |
|
hf_revision="6002345709e0801764318f06bf06ce1e7d1a1fe3", |
|
evaluation_splits=["validation"], |
|
hf_avail_splits=["validation"], |
|
metric=get_metrics_for_formulation( |
|
formulation, |
|
[ |
|
loglikelihood_acc_metric(normalization=LogProbTokenNorm()), |
|
loglikelihood_acc_metric(normalization=LogProbCharNorm()), |
|
], |
|
), |
|
trust_dataset=True, |
|
) |
|
for formulation in [MCFFormulation(), CFFormulation(), HybridFormulation()] |
|
] |
|
|
|
TASKS_TABLE.extend(hellaswag_tasks) |
|
|
|
commonsense_qa_tasks = [ |
|
LightevalTaskConfig( |
|
name=f"commonsenseqa_{formulation.name.lower()}", |
|
prompt_function=get_mcq_prompt_function( |
|
Language.ENGLISH, |
|
lambda line: { |
|
"question": line["question"], |
|
"choices": line["choices"]["text"], |
|
"gold_idx": line["choices"]["label"].index(line["answerKey"].strip()), |
|
}, |
|
formulation=formulation, |
|
), |
|
suite=("custom",), |
|
hf_repo="tau/commonsense_qa", |
|
hf_subset="default", |
|
hf_revision="94630fe30dad47192a8546eb75f094926d47e155", |
|
metric=get_metrics_for_formulation( |
|
formulation, |
|
[ |
|
loglikelihood_acc_metric(normalization=LogProbTokenNorm()), |
|
loglikelihood_acc_metric(normalization=LogProbCharNorm()), |
|
loglikelihood_acc_metric(normalization=LogProbPMINorm()), |
|
], |
|
), |
|
) |
|
for formulation in [ |
|
MCFFormulation(), |
|
CFFormulation(), |
|
HybridFormulation(), |
|
] |
|
] |
|
|
|
TASKS_TABLE.extend(commonsense_qa_tasks) |
|
|
|
openbook_qa_tasks = [ |
|
LightevalTaskConfig( |
|
name=f"openbookqa_{formulation.name.lower()}", |
|
prompt_function=get_mcq_prompt_function( |
|
Language.ENGLISH, |
|
lambda line: { |
|
"question": line["question_stem"], |
|
"choices": line["choices"]["text"], |
|
"gold_idx": LETTER_INDICES.index(line["answerKey"]), |
|
}, |
|
formulation=formulation, |
|
), |
|
suite=["custom"], |
|
hf_repo="allenai/openbookqa", |
|
hf_subset="main", |
|
hf_revision="388097ea7776314e93a529163e0fea805b8a6454", |
|
metric=get_metrics_for_formulation( |
|
formulation, |
|
[ |
|
loglikelihood_acc_metric(normalization=LogProbTokenNorm()), |
|
loglikelihood_acc_metric(normalization=LogProbCharNorm()), |
|
], |
|
), |
|
) |
|
for formulation in [ |
|
MCFFormulation(), |
|
CFFormulation(), |
|
HybridFormulation(), |
|
] |
|
] |
|
|
|
TASKS_TABLE.extend(openbook_qa_tasks) |
|
|
|
winogrande_tasks = [ |
|
LightevalTaskConfig( |
|
name=f"winogrande_{formulation.name.lower()}", |
|
suite=("custom",), |
|
prompt_function=get_continuation_prompt_function( |
|
Language.ENGLISH, partial(winogrand_adapter, Language.ENGLISH), formulation=formulation |
|
), |
|
hf_repo="allenai/winogrande", |
|
hf_subset="winogrande_xl", |
|
trust_dataset=True, |
|
hf_revision="85ac5b5a3b7a930e22d590176e39460400d19e41", |
|
metric=[ |
|
loglikelihood_acc_metric(normalization=None), |
|
loglikelihood_acc_metric(normalization=LogProbTokenNorm()), |
|
loglikelihood_acc_metric(normalization=LogProbCharNorm()), |
|
], |
|
) |
|
for formulation in [ |
|
MCFFormulation(), |
|
CFFormulation(), |
|
HybridFormulation(), |
|
] |
|
] |
|
|
|
TASKS_TABLE.extend(winogrande_tasks) |
|
|
|
piqa_tasks = [ |
|
LightevalTaskConfig( |
|
name=f"piqa_{formulation.name.lower()}", |
|
prompt_function=get_mcq_prompt_function( |
|
Language.ENGLISH, |
|
lambda line: { |
|
"question": line["goal"], |
|
"choices": [line['sol1'], line['sol2']], |
|
"gold_idx": int(line["label"]), |
|
}, |
|
formulation=formulation |
|
), |
|
suite=["custom"], |
|
hf_repo="ybisk/piqa", |
|
hf_revision="2e8ac2dffd59bac8c3c6714948f4c551a0848bb0", |
|
hf_subset="plain_text", |
|
trust_dataset=True, |
|
metric=get_metrics_for_formulation( |
|
formulation, |
|
[ |
|
loglikelihood_acc_metric(normalization=LogProbTokenNorm()), |
|
loglikelihood_acc_metric(normalization=LogProbCharNorm()), |
|
], |
|
), |
|
) |
|
for formulation in [ |
|
MCFFormulation(), |
|
CFFormulation(), |
|
HybridFormulation(), |
|
] |
|
] |
|
|
|
TASKS_TABLE.extend(piqa_tasks) |
|
|
|
|
|
MMLU_SUBSETS = ['abstract_algebra', 'anatomy', 'astronomy', 'business_ethics', 'clinical_knowledge', 'college_biology', 'college_chemistry', 'college_computer_science', 'college_mathematics', 'college_medicine', 'college_physics', 'computer_security', 'conceptual_physics', 'econometrics', 'electrical_engineering', 'elementary_mathematics', 'formal_logic', 'global_facts', 'high_school_biology', 'high_school_chemistry', 'high_school_computer_science', 'high_school_european_history', 'high_school_geography', 'high_school_government_and_politics', 'high_school_macroeconomics', 'high_school_mathematics', 'high_school_microeconomics', 'high_school_physics', 'high_school_psychology', 'high_school_statistics', 'high_school_us_history', 'high_school_world_history', 'human_aging', 'human_sexuality', 'international_law', 'jurisprudence', 'logical_fallacies', 'machine_learning', 'management', 'marketing', 'medical_genetics', 'miscellaneous', 'moral_disputes', 'moral_scenarios', 'nutrition', 'philosophy', 'prehistory', 'professional_accounting', 'professional_law', 'professional_medicine', 'professional_psychology', 'public_relations', 'security_studies', 'sociology', 'us_foreign_policy', 'virology', 'world_religions'] |
|
|
|
mmlu_tasks = [ |
|
LightevalTaskConfig( |
|
name=f"mmlu_{formulation.name.lower()}:{subset}", |
|
prompt_function=get_mcq_prompt_function( |
|
Language.ENGLISH, |
|
lambda line: { |
|
"question": line["question"], |
|
"choices": line["choices"], |
|
"gold_idx": int(line["answer"]), |
|
}, |
|
formulation=formulation, |
|
), |
|
suite=("custom",), |
|
hf_repo="cais/mmlu", |
|
hf_subset=subset, |
|
hf_revision="c30699e8356da336a370243923dbaf21066bb9fe", |
|
trust_dataset=True, |
|
evaluation_splits=("test",), |
|
few_shots_split="dev", |
|
metric=get_metrics_for_formulation( |
|
formulation, |
|
[ |
|
loglikelihood_acc_metric(normalization=LogProbTokenNorm()), |
|
loglikelihood_acc_metric(normalization=LogProbCharNorm()), |
|
loglikelihood_acc_metric(normalization=LogProbPMINorm()), |
|
], |
|
), |
|
) |
|
for subset in MMLU_SUBSETS |
|
for formulation in [ |
|
MCFFormulation(), |
|
CFFormulation(), |
|
HybridFormulation(), |
|
] |
|
] |
|
|
|
TASKS_TABLE.extend(mmlu_tasks) |
|
|
|
mmlu_pro_tasks = [ |
|
LightevalTaskConfig( |
|
name=f"mmlu_pro_{formulation.name.lower()}", |
|
prompt_function=get_mcq_prompt_function( |
|
Language.ENGLISH, |
|
lambda line: { |
|
"question": line["question"], |
|
"choices": line["options"], |
|
"gold_idx": line["answer_index"], |
|
}, |
|
formulation=formulation, |
|
), |
|
suite=("custom",), |
|
hf_repo="TIGER-Lab/MMLU-Pro", |
|
hf_subset="default", |
|
hf_revision="3373e0b32277875b8db2aa555a333b78a08477ea", |
|
trust_dataset=True, |
|
evaluation_splits=("test",), |
|
few_shots_split="validation", |
|
metric=get_metrics_for_formulation( |
|
formulation, |
|
[ |
|
loglikelihood_acc_metric(normalization=LogProbTokenNorm()), |
|
loglikelihood_acc_metric(normalization=LogProbCharNorm()), |
|
loglikelihood_acc_metric(normalization=LogProbPMINorm()), |
|
], |
|
), |
|
) |
|
for formulation in [ |
|
MCFFormulation(), |
|
CFFormulation(), |
|
HybridFormulation(), |
|
] |
|
] |
|
|
|
TASKS_TABLE.extend(mmlu_pro_tasks) |
|
|
|
gsm8k_tasks = [ |
|
LightevalTaskConfig( |
|
name="gsm8k", |
|
prompt_function=prompt.gsm8k, |
|
suite=("custom",), |
|
hf_repo="openai/gsm8k", |
|
hf_subset="main", |
|
hf_revision="e53f048856ff4f594e959d75785d2c2d37b678ee", |
|
hf_avail_splits=["train", "test"], |
|
evaluation_splits=["test"], |
|
metric=[Metrics.quasi_exact_match_gsm8k], |
|
generation_size=256, |
|
stop_sequence=["Question:", "Question"], |
|
few_shots_select="random_sampling_from_train", |
|
) |
|
] |
|
|
|
TASKS_TABLE.extend(gsm8k_tasks) |
|
|
|
quasi_exact_match_math = SampleLevelMetric( |
|
metric_name="qem", |
|
sample_level_fn=ExactMatches( |
|
strip_strings=True, |
|
normalize_pred=lambda text: parse_math_answer(text, "math"), |
|
normalize_gold=lambda text: parse_math_answer(text, "math") |
|
).compute, |
|
category=MetricCategory.GENERATIVE, |
|
use_case=MetricUseCase.MATH, |
|
corpus_level_fn=np.mean, |
|
higher_is_better=True, |
|
) |
|
|
|
GPQA_TASKS = [ |
|
LightevalTaskConfig( |
|
name="gpqa", |
|
suite=["lighteval"], |
|
prompt_function=gpqa, |
|
hf_repo="Idavidrein/gpqa", |
|
hf_subset="gpqa_main", |
|
hf_avail_splits=["train"], |
|
evaluation_splits=["train"], |
|
few_shots_split=None, |
|
few_shots_select="random_sampling", |
|
generation_size=1, |
|
metric=[Metrics.loglikelihood_acc_single_token], |
|
stop_sequence=["\n"], |
|
trust_dataset=True, |
|
version=0, |
|
) |
|
] |
|
|
|
TASKS_TABLE.extend(GPQA_TASKS) |
|
|
|
MATH_TASKS = [ |
|
LightevalTaskConfig( |
|
name="math", |
|
prompt_function=prompt_math, |
|
suite=["custom"], |
|
hf_repo="HuggingFaceTB/math_tasks", |
|
hf_subset="math", |
|
hf_revision="3d34f1076f279000b9315583dcdacfd288898283", |
|
hf_avail_splits=["train", "test", "demo"], |
|
evaluation_splits=["test"], |
|
metric=[quasi_exact_match_math], |
|
generation_size=1024, |
|
stop_sequence=["\n\n"], |
|
few_shots_split="demo", |
|
few_shots_select="sequential", |
|
trust_dataset=True, |
|
) |
|
] |
|
|
|
TASKS_TABLE.extend(MATH_TASKS) |
|
|
|
BBH_TASKS = [ |
|
LightevalTaskConfig( |
|
name=f"bbh:{subset}", |
|
prompt_function=bbh_prompt, |
|
suite=["custom"], |
|
hf_repo="lighteval/big_bench_hard", |
|
hf_subset=subset, |
|
hf_revision="80610173426f05e6f1448f047e2db4840a7dd899", |
|
metric=[Metrics.exact_match], |
|
hf_avail_splits=["train"], |
|
|
|
evaluation_splits=["train"], |
|
few_shots_split="train", |
|
trust_dataset=True, |
|
stop_sequence=["Question:", "Question"], |
|
) |
|
for subset in [ |
|
"boolean_expressions", |
|
"causal_judgement", |
|
"date_understanding", |
|
"disambiguation_qa", |
|
"dyck_languages", |
|
"formal_fallacies", |
|
"geometric_shapes", |
|
"hyperbaton", |
|
"logical_deduction_five_objects", |
|
"logical_deduction_seven_objects", |
|
"logical_deduction_three_objects", |
|
"movie_recommendation", |
|
"multistep_arithmetic_two", |
|
"navigate", |
|
"object_counting", |
|
"penguins_in_a_table", |
|
"reasoning_about_colored_objects", |
|
"ruin_names", |
|
"salient_translation_error_detection", |
|
"snarks", |
|
"sports_understanding", |
|
"temporal_sequences", |
|
"tracking_shuffled_objects_five_objects", |
|
"tracking_shuffled_objects_seven_objects", |
|
"tracking_shuffled_objects_three_objects", |
|
"web_of_lies", |
|
"word_sorting", |
|
] |
|
] |
|
|
|
TASKS_TABLE.extend(BBH_TASKS) |
|
|
|
|
|
for task in TASKS_TABLE: |
|
task.metric = [metric for metric in task.metric if metric.category != MetricCategory.MULTICHOICE_PMI] |
|
|
|
|
|
if __name__ == "__main__": |
|
print(t.name for t in TASKS_TABLE) |
|
print(len(TASKS_TABLE)) |