|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""Dataset for the Legal Criticality Prediction task.""" |
|
|
|
import json |
|
import lzma |
|
import os |
|
|
|
import datasets |
|
try: |
|
import lzma as xz |
|
except ImportError: |
|
import pylzma as xz |
|
|
|
|
|
|
|
|
|
_CITATION = """\ |
|
@InProceedings{huggingface:dataset, |
|
title = {A great new dataset}, |
|
author={huggingface, Inc. |
|
}, |
|
year={2020} |
|
} |
|
""" |
|
|
|
|
|
_DESCRIPTION = """\ |
|
This dataset contains Swiss federal court decisions for the legal criticality prediction task |
|
""" |
|
|
|
_URLS = { |
|
"full": "https://huggingface.co/datasets/rcds/swiss_criticality_prediction/resolve/main/data", |
|
} |
|
|
|
|
|
class SwissCriticalityPrediction(datasets.GeneratorBasedBuilder): |
|
"""This dataset contains court decision for court view generation task.""" |
|
|
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig(name="full", description="This part covers the whole dataset"), |
|
] |
|
|
|
DEFAULT_CONFIG_NAME = "full" |
|
|
|
def _info(self): |
|
if self.config.name == "full": |
|
features = datasets.Features( |
|
{ |
|
|
|
"decision_id": datasets.Value("string"), |
|
"language": datasets.Value("string"), |
|
"year": datasets.Value("int32"), |
|
"chamber": datasets.Value("string"), |
|
"region": datasets.Value("string"), |
|
"origin_chamber": datasets.Value("string"), |
|
"origin_court": datasets.Value("string"), |
|
"origin_canton": datasets.Value("string"), |
|
"law_area": datasets.Value("string"), |
|
"law_sub_area": datasets.Value("string"), |
|
"bge_label": datasets.Value("string"), |
|
"citation_label": datasets.Value("string"), |
|
"facts": datasets.Value("string"), |
|
"considerations": datasets.Value("string"), |
|
"rulings": datasets.Value("string"), |
|
|
|
} |
|
) |
|
return datasets.DatasetInfo( |
|
|
|
description=_DESCRIPTION, |
|
|
|
features=features, |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
|
|
|
|
|
|
|
|
|
|
urls = _URLS[self.config.name] |
|
filepath_train = dl_manager.download(os.path.join(urls, "train.jsonl.xz")) |
|
filepath_validation = dl_manager.download(os.path.join(urls, "validation.jsonl.xz")) |
|
filepath_test = dl_manager.download(os.path.join(urls, "test.jsonl.xz")) |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
|
|
gen_kwargs={ |
|
"filepath": filepath_train, |
|
"split": "train", |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
|
|
gen_kwargs={ |
|
"filepath": filepath_validation, |
|
"split": "validation", |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
|
|
gen_kwargs={ |
|
"filepath": filepath_test, |
|
"split": "test" |
|
}, |
|
) |
|
] |
|
|
|
|
|
def _generate_examples(self, filepath, split): |
|
|
|
line_counter = 0 |
|
try: |
|
with xz.open(open(filepath, "rb"), "rt", encoding="utf-8") as f: |
|
for id, line in enumerate(f): |
|
line_counter += 1 |
|
if line: |
|
data = json.loads(line) |
|
if self.config.name == "full": |
|
yield id, { |
|
"decision_id": data["decision_id"], |
|
"language": data["language"], |
|
"year": data["year"], |
|
"chamber": data["chamber"], |
|
"region": data["region"], |
|
"origin_chamber": data["origin_chamber"], |
|
"origin_court": data["origin_court"], |
|
"origin_canton": data["origin_canton"], |
|
"law_area": data["law_area"], |
|
"law_sub_area": data["law_sub_area"], |
|
"citation_label": data["citation_label"], |
|
"bge_label": data["bge_label"], |
|
"facts": data["facts"], |
|
"considerations": data["considerations"], |
|
"rulings": data["rulings"], |
|
} |
|
except lzma.LZMAError as e: |
|
print(split, e) |
|
if line_counter == 0: |
|
raise e |