Convert dataset to Parquet
#4
by
albertvillanova
HF staff
- opened
- README.md +14 -5
- data/test-00000-of-00001.parquet +3 -0
- data/train-00000-of-00001.parquet +3 -0
- data/validation-00000-of-00001.parquet +3 -0
- dataset_infos.json +0 -1
- medmcqa.py +0 -116
README.md
CHANGED
@@ -53,16 +53,25 @@ dataset_info:
|
|
53 |
dtype: string
|
54 |
splits:
|
55 |
- name: train
|
56 |
-
num_bytes:
|
57 |
num_examples: 182822
|
58 |
- name: test
|
59 |
-
num_bytes:
|
60 |
num_examples: 6150
|
61 |
- name: validation
|
62 |
-
num_bytes:
|
63 |
num_examples: 4183
|
64 |
-
download_size:
|
65 |
-
dataset_size:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
66 |
---
|
67 |
|
68 |
# Dataset Card for MedMCQA
|
|
|
53 |
dtype: string
|
54 |
splits:
|
55 |
- name: train
|
56 |
+
num_bytes: 131903297
|
57 |
num_examples: 182822
|
58 |
- name: test
|
59 |
+
num_bytes: 1399350
|
60 |
num_examples: 6150
|
61 |
- name: validation
|
62 |
+
num_bytes: 2221428
|
63 |
num_examples: 4183
|
64 |
+
download_size: 88311487
|
65 |
+
dataset_size: 135524075
|
66 |
+
configs:
|
67 |
+
- config_name: default
|
68 |
+
data_files:
|
69 |
+
- split: train
|
70 |
+
path: data/train-*
|
71 |
+
- split: test
|
72 |
+
path: data/test-*
|
73 |
+
- split: validation
|
74 |
+
path: data/validation-*
|
75 |
---
|
76 |
|
77 |
# Dataset Card for MedMCQA
|
data/test-00000-of-00001.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1d308841f0c82df363d3d638b33b69b8abd266b1e615ae5d2607dbb24c70beb1
|
3 |
+
size 936358
|
data/train-00000-of-00001.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b119434ba551517a6ec0ba1f7e0b4c029165ed284a4704f262ce37c791c493c5
|
3 |
+
size 85899025
|
data/validation-00000-of-00001.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b768a1ea34afc9f80d3106d9b21f80fa8a00ec450a1f6cd641af72ca9e591021
|
3 |
+
size 1476104
|
dataset_infos.json
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
{"default": {"description": "MedMCQA is a large-scale, Multiple-Choice Question Answering (MCQA) dataset designed to address real-world medical entrance exam questions. \nMedMCQA has more than 194k high-quality AIIMS & NEET PG entrance exam MCQs covering 2.4k healthcare topics and 21 medical subjects are collected with an average token length of 12.77 and high topical diversity.\nThe dataset contains questions about the following topics: Anesthesia, Anatomy, Biochemistry, Dental, ENT, Forensic Medicine (FM)\nObstetrics and Gynecology (O&G), Medicine, Microbiology, Ophthalmology, Orthopedics Pathology, Pediatrics, Pharmacology, Physiology, \nPsychiatry, Radiology Skin, Preventive & Social Medicine (PSM) and Surgery\n", "citation": "CHILL'2022", "homepage": "https://medmcqa.github.io", "license": "Apache License 2.0", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "opa": {"dtype": "string", "id": null, "_type": "Value"}, "opb": {"dtype": "string", "id": null, "_type": "Value"}, "opc": {"dtype": "string", "id": null, "_type": "Value"}, "opd": {"dtype": "string", "id": null, "_type": "Value"}, "cop": {"num_classes": 4, "names": ["a", "b", "c", "d"], "id": null, "_type": "ClassLabel"}, "choice_type": {"dtype": "string", "id": null, "_type": "Value"}, "exp": {"dtype": "string", "id": null, "_type": "Value"}, "subject_name": {"dtype": "string", "id": null, "_type": "Value"}, "topic_name": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "med_mcqa", "config_name": "default", "version": {"version_str": "1.1.0", "description": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 131904057, "num_examples": 182822, "dataset_name": "med_mcqa"}, "test": {"name": "test", "num_bytes": 1447829, "num_examples": 6150, "dataset_name": "med_mcqa"}, "validation": {"name": "validation", "num_bytes": 2221468, "num_examples": 4183, "dataset_name": "med_mcqa"}}, "download_checksums": {"https://drive.google.com/uc?export=download&id=15VkJdq5eyWIkfb_aoD3oS8i4tScbHYky": {"num_bytes": 55285460, "checksum": "16c1fbc6f47d548d2af7837b18e893aa45f45c0be9bda0a9adfff3c625bf9262"}}, "download_size": 55285460, "post_processing_size": null, "dataset_size": 135573354, "size_in_bytes": 190858814}}
|
|
|
|
medmcqa.py
DELETED
@@ -1,116 +0,0 @@
|
|
1 |
-
# coding=utf-8
|
2 |
-
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
|
3 |
-
#
|
4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
# you may not use this file except in compliance with the License.
|
6 |
-
# You may obtain a copy of the License at
|
7 |
-
#
|
8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
#
|
10 |
-
# Unless required by applicable law or agreed to in writing, software
|
11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
# See the License for the specific language governing permissions and
|
14 |
-
# limitations under the License.
|
15 |
-
"""MedMCQA : A Large-scale Multi-Subject Multi-Choice Dataset for Medical domain Question Answering"""
|
16 |
-
|
17 |
-
|
18 |
-
import json
|
19 |
-
import os
|
20 |
-
|
21 |
-
import datasets
|
22 |
-
|
23 |
-
|
24 |
-
_DESCRIPTION = """\
|
25 |
-
MedMCQA is a large-scale, Multiple-Choice Question Answering (MCQA) dataset designed to address real-world medical entrance exam questions.
|
26 |
-
MedMCQA has more than 194k high-quality AIIMS & NEET PG entrance exam MCQs covering 2.4k healthcare topics and 21 medical subjects are collected with an average token length of 12.77 and high topical diversity.
|
27 |
-
The dataset contains questions about the following topics: Anesthesia, Anatomy, Biochemistry, Dental, ENT, Forensic Medicine (FM)
|
28 |
-
Obstetrics and Gynecology (O&G), Medicine, Microbiology, Ophthalmology, Orthopedics Pathology, Pediatrics, Pharmacology, Physiology,
|
29 |
-
Psychiatry, Radiology Skin, Preventive & Social Medicine (PSM) and Surgery
|
30 |
-
"""
|
31 |
-
|
32 |
-
|
33 |
-
_HOMEPAGE = "https://medmcqa.github.io"
|
34 |
-
|
35 |
-
_LICENSE = "Apache License 2.0"
|
36 |
-
_URL = "https://drive.google.com/uc?export=download&id=15VkJdq5eyWIkfb_aoD3oS8i4tScbHYky"
|
37 |
-
_CITATION = """\
|
38 |
-
@InProceedings{pmlr-v174-pal22a,
|
39 |
-
title = {MedMCQA: A Large-scale Multi-Subject Multi-Choice Dataset for Medical domain Question Answering},
|
40 |
-
author = {Pal, Ankit and Umapathi, Logesh Kumar and Sankarasubbu, Malaikannan},
|
41 |
-
booktitle = {Proceedings of the Conference on Health, Inference, and Learning},
|
42 |
-
pages = {248--260},
|
43 |
-
year = {2022},
|
44 |
-
editor = {Flores, Gerardo and Chen, George H and Pollard, Tom and Ho, Joyce C and Naumann, Tristan},
|
45 |
-
volume = {174},
|
46 |
-
series = {Proceedings of Machine Learning Research},
|
47 |
-
month = {07--08 Apr},
|
48 |
-
publisher = {PMLR},
|
49 |
-
pdf = {https://proceedings.mlr.press/v174/pal22a/pal22a.pdf},
|
50 |
-
url = {https://proceedings.mlr.press/v174/pal22a.html},
|
51 |
-
abstract = {This paper introduces MedMCQA, a new large-scale, Multiple-Choice Question Answering (MCQA) dataset designed to address real-world medical entrance exam questions. More than 194k high-quality AIIMS & NEET PG entrance exam MCQs covering 2.4k healthcare topics and 21 medical subjects are collected with an average token length of 12.77 and high topical diversity. Each sample contains a question, correct answer(s), and other options which requires a deeper language understanding as it tests the 10+ reasoning abilities of a model across a wide range of medical subjects & topics. A detailed explanation of the solution, along with the above information, is provided in this study.}
|
52 |
-
}
|
53 |
-
"""
|
54 |
-
|
55 |
-
|
56 |
-
class MedMCQA(datasets.GeneratorBasedBuilder):
|
57 |
-
"""MedMCQA : A Large-scale Multi-Subject Multi-Choice Dataset for Medical domain Question Answering"""
|
58 |
-
|
59 |
-
VERSION = datasets.Version("1.1.0")
|
60 |
-
|
61 |
-
def _info(self):
|
62 |
-
|
63 |
-
features = datasets.Features(
|
64 |
-
{
|
65 |
-
"id": datasets.Value("string"),
|
66 |
-
"question": datasets.Value("string"),
|
67 |
-
"opa": datasets.Value("string"),
|
68 |
-
"opb": datasets.Value("string"),
|
69 |
-
"opc": datasets.Value("string"),
|
70 |
-
"opd": datasets.Value("string"),
|
71 |
-
"cop": datasets.features.ClassLabel(names=["a", "b", "c", "d"]),
|
72 |
-
"choice_type": datasets.Value("string"),
|
73 |
-
"exp": datasets.Value("string"),
|
74 |
-
"subject_name": datasets.Value("string"),
|
75 |
-
"topic_name": datasets.Value("string"),
|
76 |
-
}
|
77 |
-
)
|
78 |
-
return datasets.DatasetInfo(
|
79 |
-
description=_DESCRIPTION,
|
80 |
-
features=features,
|
81 |
-
homepage=_HOMEPAGE,
|
82 |
-
license=_LICENSE,
|
83 |
-
citation=_CITATION,
|
84 |
-
)
|
85 |
-
|
86 |
-
def _split_generators(self, dl_manager):
|
87 |
-
"""Returns SplitGenerators."""
|
88 |
-
data_dir = dl_manager.download_and_extract(_URL)
|
89 |
-
return [
|
90 |
-
datasets.SplitGenerator(
|
91 |
-
name=datasets.Split.TRAIN,
|
92 |
-
gen_kwargs={
|
93 |
-
"filepath": os.path.join(data_dir, "train.json"),
|
94 |
-
},
|
95 |
-
),
|
96 |
-
datasets.SplitGenerator(
|
97 |
-
name=datasets.Split.TEST,
|
98 |
-
gen_kwargs={
|
99 |
-
"filepath": os.path.join(data_dir, "test.json"),
|
100 |
-
},
|
101 |
-
),
|
102 |
-
datasets.SplitGenerator(
|
103 |
-
name=datasets.Split.VALIDATION,
|
104 |
-
gen_kwargs={
|
105 |
-
"filepath": os.path.join(data_dir, "dev.json"),
|
106 |
-
},
|
107 |
-
),
|
108 |
-
]
|
109 |
-
|
110 |
-
def _generate_examples(self, filepath):
|
111 |
-
with open(filepath, encoding="utf-8") as f:
|
112 |
-
for key, row in enumerate(f):
|
113 |
-
data = json.loads(row)
|
114 |
-
data["cop"] = int(data.get("cop", 0)) - 1
|
115 |
-
data["exp"] = data.get("exp", "")
|
116 |
-
yield key, data
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|