|
import json |
|
|
|
import datasets |
|
|
|
|
|
_CITATION = """\ |
|
@article{malhas2020ayatec, |
|
author = {Malhas, Rana and Elsayed, Tamer}, |
|
title = {AyaTEC: Building a Reusable Verse-Based Test Collection for Arabic Question Answering on the Holy Qur’an}, |
|
year = {2020}, |
|
issue_date = {November 2020}, |
|
publisher = {Association for Computing Machinery}, |
|
address = {New York, NY, USA}, |
|
volume = {19}, |
|
number = {6}, |
|
issn = {2375-4699}, |
|
url = {https://doi.org/10.1145/3400396}, |
|
doi = {10.1145/3400396}, |
|
journal = {ACM Trans. Asian Low-Resour. Lang. Inf. Process.}, |
|
month = {oct}, |
|
articleno = {78}, |
|
numpages = {21}, |
|
keywords = {evaluation, Classical Arabic} |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
The absence of publicly available reusable test collections for Arabic question answering on the Holy Qur’an has \ |
|
impeded the possibility of fairly comparing the performance of systems in that domain. In this article, we introduce \ |
|
AyaTEC, a reusable test collection for verse-based question answering on the Holy Qur’an, which serves as a common \ |
|
experimental testbed for this task. AyaTEC includes 207 questions (with their corresponding 1,762 answers) covering 11 \ |
|
topic categories of the Holy Qur’an that target the information needs of both curious and skeptical users. To the best \ |
|
of our effort, the answers to the questions (each represented as a sequence of verses) in AyaTEC were exhaustive—that \ |
|
is, all qur’anic verses that directly answered the questions were exhaustively extracted and annotated. To facilitate \ |
|
the use of AyaTEC in evaluating the systems designed for that task, we propose several evaluation measures to support \ |
|
the different types of questions and the nature of verse-based answers while integrating the concept of partial \ |
|
matching of answers in the evaluation. |
|
""" |
|
|
|
_HOMEPAGE = "https://sites.google.com/view/quran-qa-2022/home" |
|
|
|
_LICENSE = "CC-BY-ND 4.0" |
|
|
|
_URL = "https://gitlab.com/bigirqu/quranqa/-/raw/main/datasets/" |
|
_URLS = { |
|
"train": _URL + "qrcd_v1.1_train.jsonl", |
|
"dev": _URL + "qrcd_v1.1_dev.jsonl", |
|
"test": _URL + "qrcd_v1.1_test_gold.jsonl", |
|
"test_noAnswers": _URL + "qrcd_v1.1_test_noAnswers.jsonl", |
|
} |
|
|
|
|
|
class QuranQAConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for QuranQA.""" |
|
|
|
def __init__(self, **kwargs): |
|
"""BuilderConfig for QuranQA. |
|
Args: |
|
**kwargs: keyword arguments forwarded to super. |
|
""" |
|
super(QuranQAConfig, self).__init__(**kwargs) |
|
|
|
|
|
class QuranQA(datasets.GeneratorBasedBuilder): |
|
"""QuranQA: Qur'anic Reading Comprehension Dataset. Version 1.1.0""" |
|
|
|
VERSION = datasets.Version("1.1.0") |
|
|
|
BUILDER_CONFIGS = [ |
|
QuranQAConfig(name="shared_task", version=VERSION, description="Shared task (LREC 2022)"), |
|
] |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"pq_id": datasets.Value("string"), |
|
"passage": datasets.Value("string"), |
|
"surah": datasets.Value("int8"), |
|
"verses": datasets.Value("string"), |
|
"question": datasets.Value("string"), |
|
"answers": datasets.features.Sequence( |
|
{ |
|
"text": datasets.Value("string"), |
|
"answer_start": datasets.Value("int32"), |
|
} |
|
), |
|
} |
|
), |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
downloaded_files = dl_manager.download(_URLS) |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={"filepath": downloaded_files["train"]}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
gen_kwargs={"filepath": downloaded_files["dev"]}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={"filepath": downloaded_files["test"]}, |
|
), |
|
datasets.SplitGenerator( |
|
name="test_noAnswers", |
|
gen_kwargs={"filepath": downloaded_files["test_noAnswers"]}, |
|
), |
|
] |
|
|
|
def _generate_examples(self, filepath): |
|
key = 0 |
|
with open(filepath, encoding="utf-8") as f: |
|
samples = f.readlines() |
|
samples = [json.loads(s) for s in samples] |
|
for sample in samples: |
|
|
|
sample["answers"] = { |
|
"text": [answer["text"] for answer in sample["answers"]], |
|
"answer_start": [answer["start_char"] for answer in sample["answers"]] |
|
} |
|
yield key, sample |
|
key += 1 |
|
|