SaylorTwift HF Staff commited on
Commit
a78d1e1
·
verified ·
1 Parent(s): bcfb5b2

Delete loading script

Browse files
Files changed (1) hide show
  1. IndicQA.py +0 -113
IndicQA.py DELETED
@@ -1,113 +0,0 @@
1
- """TODO(xquad): Add a description here."""
2
-
3
-
4
- import json
5
-
6
- import datasets
7
-
8
-
9
- _CITATION = """\
10
-
11
- """
12
-
13
- _DESCRIPTION = """\
14
-
15
- """
16
-
17
- _URL = "https://huggingface.co/datasets/ai4bharat/IndicQA/resolve/main/data/"
18
- _LANG = ["as", "bn", "gu", "hi", "kn", "ml", "mr", "or", "pa", "ta", "te"]
19
-
20
-
21
- class IndicqaConfig(datasets.BuilderConfig):
22
-
23
- """BuilderConfig for Indicqa"""
24
-
25
- def __init__(self, lang, **kwargs):
26
- """
27
-
28
- Args:
29
- lang: string, language for the input text
30
- **kwargs: keyword arguments forwarded to super.
31
- """
32
- super(IndicqaConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs)
33
- self.lang = lang
34
-
35
-
36
- class Xquad(datasets.GeneratorBasedBuilder):
37
- """TODO(indicqa): Short description of my dataset."""
38
-
39
- # TODO(indicqa): Set up version.
40
- VERSION = datasets.Version("1.0.0")
41
- BUILDER_CONFIGS = [IndicqaConfig(name=f"indicqa.{lang}", description=_DESCRIPTION, lang=lang) for lang in _LANG]
42
-
43
- def _info(self):
44
- # TODO(indicqa): Specifies the datasets.DatasetInfo object
45
- return datasets.DatasetInfo(
46
- # This is the description that will appear on the datasets page.
47
- description=_DESCRIPTION,
48
- # datasets.features.FeatureConnectors
49
- features=datasets.Features(
50
- {
51
- "id": datasets.Value("string"),
52
- "context": datasets.Value("string"),
53
- "question": datasets.Value("string"),
54
- "answers": datasets.features.Sequence(
55
- {
56
- "text": datasets.Value("string"),
57
- "answer_start": datasets.Value("int32"),
58
- }
59
- ),
60
- # These are the features of your dataset like images, labels ...
61
- }
62
- ),
63
- # If there's a common (input, target) tuple from the features,
64
- # specify them here. They'll be used if as_supervised=True in
65
- # builder.as_dataset.
66
- supervised_keys=None,
67
- # Homepage of the dataset for documentation
68
- homepage="",
69
- citation=_CITATION,
70
- )
71
-
72
- def _split_generators(self, dl_manager):
73
- """Returns SplitGenerators."""
74
- # TODO(indicqa): Downloads the data and defines the splits
75
- # dl_manager is a datasets.download.DownloadManager that can be used to
76
- # download and extract URLs
77
- urls_to_download = {lang: _URL + f"indicqa.{lang}.json" for lang in _LANG}
78
- downloaded_files = dl_manager.download_and_extract(urls_to_download)
79
-
80
- return [
81
- datasets.SplitGenerator(
82
- name=datasets.Split.TEST,
83
- # These kwargs will be passed to _generate_examples
84
- gen_kwargs={"filepath": downloaded_files[self.config.lang]},
85
- ),
86
- ]
87
-
88
- def _generate_examples(self, filepath):
89
- """Yields examples."""
90
- # TODO(indicqa): Yields (key, example) tuples from the dataset
91
- with open(filepath, encoding="utf-8") as f:
92
- indicqa = json.load(f)
93
- id_ = 0
94
- for article in indicqa["data"]:
95
- for paragraph in article["paragraphs"]:
96
- context = paragraph["context"].strip()
97
- for qa in paragraph["qas"]:
98
- question = qa["question"].strip()
99
- answer_starts = [answer["answer_start"] for answer in qa["answers"]]
100
- answers = [answer["text"].strip() for answer in qa["answers"]]
101
-
102
- # Features currently used are "context", "question", and "answers".
103
- # Others are extracted here for the ease of future expansions.
104
- yield id_, {
105
- "context": context,
106
- "question": question,
107
- "id": qa["id"],
108
- "answers": {
109
- "answer_start": answer_starts,
110
- "text": answers,
111
- },
112
- }
113
- id_ += 1