|
import json |
|
import os |
|
import glob |
|
from pathlib import Path |
|
import datasets |
|
|
|
|
|
_DESCRIPTION = """\ |
|
LEGAR_BENCH is the first large-scale Korean LCR benchmark, covering 411 diverse crime types in queries over 1.2M legal cases. |
|
""" |
|
|
|
|
|
_HOMEPAGE = "https://huggingface.co/datasets/Chaeeun-Kim/LEGAR_BENCH" |
|
_LICENSE = "Apache 2.0" |
|
|
|
class LegarBenchConfig(datasets.BuilderConfig): |
|
def __init__(self, **kwargs): |
|
super(LegarBenchConfig, self).__init__(**kwargs) |
|
|
|
|
|
class LegarBench(datasets.GeneratorBasedBuilder): |
|
BUILDER_CONFIGS = [ |
|
LegarBenchConfig( |
|
name="standard", |
|
description="Standard version of LEGAR BENCH", |
|
), |
|
LegarBenchConfig( |
|
name="stricter", |
|
description="Stricter version of LEGAR BENCH", |
|
), |
|
LegarBenchConfig( |
|
name="stricter_by_difficulty", |
|
description="Stricter version organized by difficulty", |
|
), |
|
] |
|
|
|
DEFAULT_CONFIG_NAME = "standard" |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features({ |
|
"id": datasets.Value("int64"), |
|
"target_category": datasets.Value("string"), |
|
"category": datasets.Value("string"), |
|
"question": datasets.Value("string"), |
|
"question_id": datasets.Value("string"), |
|
"answer": datasets.Sequence(datasets.Value("string")), |
|
"evidence_id": datasets.Sequence(datasets.Value("string")), |
|
"difficulty": datasets.Value("string"), |
|
}), |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
if self.config.name == "standard": |
|
file_pattern = "Standard_version/*.json" |
|
elif self.config.name == "stricter": |
|
file_pattern = "Stricter_version/*.json" |
|
elif self.config.name == "stricter_by_difficulty": |
|
file_pattern = "Stricter_version_by_difficulty/**/*.json" |
|
else: |
|
file_pattern = "Standard_version/*.json" |
|
|
|
|
|
try: |
|
data_files = dl_manager.download_and_extract("") |
|
if isinstance(data_files, str): |
|
data_dir = data_files |
|
else: |
|
data_dir = data_files |
|
|
|
except Exception as e: |
|
if hasattr(dl_manager, 'manual_dir') and dl_manager.manual_dir: |
|
data_dir = dl_manager.manual_dir |
|
else: |
|
raise e |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"data_dir": data_dir, |
|
"config_name": self.config.name, |
|
}, |
|
), |
|
] |
|
|
|
def _generate_examples(self, data_dir, config_name): |
|
example_id = 0 |
|
|
|
if config_name == "standard": |
|
search_path = os.path.join(data_dir, "Standard_version") |
|
elif config_name == "stricter": |
|
search_path = os.path.join(data_dir, "Stricter_version") |
|
elif config_name == "stricter_by_difficulty": |
|
search_path = os.path.join(data_dir, "Stricter_version_by_difficulty") |
|
else: |
|
search_path = os.path.join(data_dir, "Standard_version") |
|
|
|
if not os.path.exists(search_path): |
|
possible_paths = [ |
|
data_dir, |
|
os.path.join(data_dir, "LEGAR_BENCH"), |
|
os.path.join(data_dir, "LEGAR-BENCH") |
|
] |
|
|
|
found = False |
|
for possible_path in possible_paths: |
|
test_path = os.path.join(possible_path, search_path.split('/')[-1]) |
|
if os.path.exists(test_path): |
|
search_path = test_path |
|
found = True |
|
break |
|
|
|
if not found: |
|
raise FileNotFoundError(f"Could not find data directory. Tried: {search_path}, data_dir: {data_dir}") |
|
|
|
if config_name == "stricter_by_difficulty": |
|
if os.path.exists(search_path): |
|
for difficulty_folder in sorted(os.listdir(search_path)): |
|
folder_path = os.path.join(search_path, difficulty_folder) |
|
if os.path.isdir(folder_path): |
|
for filename in sorted(os.listdir(folder_path)): |
|
if filename.endswith('.json'): |
|
filepath = os.path.join(folder_path, filename) |
|
for item in self._load_json_file(filepath, difficulty=difficulty_folder): |
|
yield example_id, item |
|
example_id += 1 |
|
else: |
|
if os.path.exists(search_path): |
|
for filename in sorted(os.listdir(search_path)): |
|
if filename.endswith('.json'): |
|
filepath = os.path.join(search_path, filename) |
|
for item in self._load_json_file(filepath, difficulty=""): |
|
yield example_id, item |
|
example_id += 1 |
|
|
|
def _load_json_file(self, filepath, difficulty=""): |
|
with open(filepath, 'r', encoding='utf-8') as f: |
|
data = json.load(f) |
|
|
|
if isinstance(data, list): |
|
for item in data: |
|
yield self._process_item(item, difficulty) |
|
else: |
|
yield self._process_item(data, difficulty) |
|
|
|
def _process_item(self, item, difficulty=""): |
|
category = item.get("category", {}) |
|
if isinstance(category, dict): |
|
category_str = json.dumps(category, ensure_ascii=False) |
|
else: |
|
category_str = str(category) |
|
|
|
answer = item.get("answer", []) |
|
if answer is None: |
|
answer = [] |
|
answer_clean = [str(a) if a is not None else "" for a in answer] |
|
|
|
evidence_id = item.get("evidence_id", []) |
|
if evidence_id is None: |
|
evidence_id = [] |
|
evidence_id_clean = [str(e) if e is not None else "" for e in evidence_id] |
|
|
|
return { |
|
"id": int(item.get("id", 0)), |
|
"target_category": str(item.get("target_category", "")), |
|
"category": category_str, |
|
"question": str(item.get("question", "")), |
|
"question_id": str(item.get("question_id", "")), |
|
"answer": answer_clean, |
|
"evidence_id": evidence_id_clean, |
|
"difficulty": str(difficulty if difficulty else ""), |
|
} |