|
import argparse |
|
import json |
|
import pickle |
|
from tqdm import tqdm |
|
from pathlib import Path |
|
import re |
|
|
|
def string_match(answer, prediction, choices): |
|
|
|
def tokenize(text): |
|
|
|
return set(re.findall(r'\b\w+\b', text.lower())) |
|
|
|
|
|
prediction_tokens = tokenize(prediction) |
|
answer_tokens = tokenize(answer) |
|
|
|
if not prediction_tokens: |
|
return False |
|
|
|
|
|
incorrect_tokens = set() |
|
for choice in choices: |
|
choice_tokens = tokenize(choice) |
|
if choice_tokens != answer_tokens: |
|
incorrect_tokens.update(choice_tokens - answer_tokens) |
|
|
|
|
|
cond1 = answer_tokens.issubset(prediction_tokens) |
|
|
|
|
|
cond2 = prediction_tokens.isdisjoint(incorrect_tokens) |
|
|
|
return cond1 and cond2 |
|
|
|
if __name__ == "__main__": |
|
|
|
parser = argparse.ArgumentParser(description="Process benchmark JSON and calculate accuracy.") |
|
parser.add_argument('--input', type=str, required=True, help='Path to input JSON file to be evaluated') |
|
|
|
args = parser.parse_args() |
|
|
|
with open(args.input, 'r') as f: |
|
input_data = json.load(f) |
|
|
|
corr, total = 0, 0 |
|
|
|
|
|
modality_metrics = {'sound': [0, 0], 'music': [0, 0], 'speech': [0, 0], 'mix-sound-music': [0, 0], 'mix-sound-speech': [0, 0], 'mix-music-speech': [0, 0], 'mix-sound-music-speech': [0, 0]} |
|
category_metrics = {'Signal Layer': [0, 0], 'Perception Layer': [0, 0], 'Semantic Layer': [0, 0], 'Cultural Layer': [0, 0]} |
|
|
|
|
|
subcat_metrics = {} |
|
|
|
output_key = 'model_prediction' |
|
no_pred_count = 0 |
|
matched_outputs = [] |
|
new_data = [] |
|
|
|
|
|
for idx, sample in enumerate(input_data): |
|
|
|
|
|
if output_key not in sample: |
|
continue |
|
|
|
if output_key not in sample: |
|
_prediction = '' |
|
no_pred_count += 1 |
|
else: |
|
_prediction = sample[output_key] |
|
|
|
_answer = sample['answer'] |
|
modality = sample['modality'] |
|
category = sample['category'] |
|
choices = sample['choices'] |
|
|
|
|
|
subcat = sample.get('sub-category', None) |
|
if subcat is not None: |
|
|
|
if subcat not in subcat_metrics: |
|
subcat_metrics[subcat] = [0, 0] |
|
|
|
match_result = string_match(_answer, _prediction, choices) |
|
|
|
if match_result: |
|
modality_metrics[modality][0] += 1 |
|
category_metrics[category][0] += 1 |
|
if subcat is not None: |
|
subcat_metrics[subcat][0] += 1 |
|
matched_outputs.append([_answer, _prediction]) |
|
corr += 1 |
|
sample['match'] = 1 |
|
else: |
|
sample['match'] = 0 |
|
|
|
total += 1 |
|
new_data.append(sample) |
|
modality_metrics[modality][1] += 1 |
|
category_metrics[category][1] += 1 |
|
if subcat is not None: |
|
subcat_metrics[subcat][1] += 1 |
|
|
|
|
|
|
|
print("*"*30) |
|
print("Modality-wise Accuracy:") |
|
for modality in modality_metrics: |
|
n_correct, n_total = modality_metrics[modality] |
|
acc = (n_correct / n_total) * 100 if n_total > 0 else 0 |
|
print(f"{modality} : {acc:.2f}% over {n_total} samples") |
|
|
|
print("*"*30) |
|
print("Category-wise Accuracy:") |
|
for category in category_metrics: |
|
n_correct, n_total = category_metrics[category] |
|
acc = (n_correct / n_total) * 100 if n_total > 0 else 0 |
|
print(f"{category} : {acc:.2f}% over {n_total} samples") |
|
|
|
print("*"*30) |
|
print("Sub-category-wise Accuracy:") |
|
for subcat in subcat_metrics: |
|
n_correct, n_total = subcat_metrics[subcat] |
|
acc = (n_correct / n_total) * 100 if n_total > 0 else 0 |
|
print(f"{subcat} : {acc:.2f}% over {n_total} samples") |
|
|
|
print("*"*30) |
|
print(f"Total Accuracy: {(corr/total) * 100:.2f}% over {total} samples") |
|
print("*"*30) |
|
print(f"No prediction count: {no_pred_count}") |
|
|