Upload aphantasia_drawing_dataset.py
Browse files- aphantasia_drawing_dataset.py +65 -62
aphantasia_drawing_dataset.py
CHANGED
@@ -1,13 +1,13 @@
|
|
1 |
# -*- coding: utf-8 -*-
|
2 |
"""aphantasia_drawing_dataset.ipynb
|
3 |
|
4 |
-
Automatically generated by
|
5 |
|
6 |
Original file is located at
|
7 |
-
https://colab.research.google.com/drive/
|
8 |
"""
|
9 |
|
10 |
-
|
11 |
|
12 |
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
|
13 |
#
|
@@ -26,9 +26,10 @@ Original file is located at
|
|
26 |
"""TODO: Add a description here."""
|
27 |
|
28 |
|
29 |
-
|
30 |
import base64
|
31 |
from PIL import Image
|
|
|
32 |
import numpy as np
|
33 |
import io
|
34 |
import json
|
@@ -49,7 +50,6 @@ month={Sep}
|
|
49 |
}
|
50 |
"""
|
51 |
|
52 |
-
|
53 |
_DESCRIPTION = """\
|
54 |
This dataset comes from the Brain Bridge Lab from the University of Chicago.
|
55 |
It is from an online memory drawing experiment with 61 individuals with aphantasia
|
@@ -65,10 +65,7 @@ updated September 27, 2023.
|
|
65 |
_HOMEPAGE = "https://osf.io/cahyd/"
|
66 |
|
67 |
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
url = "https://drive.google.com/file/d/1aRhQlKPDk29yYPkx2kPhqaMwec5QZ4JE/view?usp=sharing"
|
72 |
|
73 |
def _get_drive_url(url):
|
74 |
base_url = 'https://drive.google.com/uc?id='
|
@@ -78,6 +75,7 @@ def _get_drive_url(url):
|
|
78 |
_URL = {"train": _get_drive_url(url)}
|
79 |
|
80 |
|
|
|
81 |
class AphantasiaDrawingDataset(datasets.GeneratorBasedBuilder):
|
82 |
"""TODO: Short description of my dataset."""
|
83 |
|
@@ -141,67 +139,72 @@ class AphantasiaDrawingDataset(datasets.GeneratorBasedBuilder):
|
|
141 |
def _generate_examples(self, filepath):
|
142 |
"""This function returns the examples in the raw (text) form."""
|
143 |
logging.info("generating examples from = %s", filepath)
|
|
|
|
|
|
|
|
|
|
|
|
|
144 |
|
145 |
with open(filepath, "r") as subjects_file:
|
146 |
-
subjects_data =
|
147 |
-
idx = 0
|
148 |
-
for
|
149 |
-
for room in subjects_data[sub]["drawings"].keys():
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
for room in subjects_data[sub]["image"].keys():
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
idx += 1
|
169 |
-
age = int(
|
170 |
-
yield
|
171 |
-
"subject_id":
|
172 |
-
"treatment":
|
173 |
"demographics": {
|
174 |
-
"country":
|
175 |
-
"age":
|
176 |
-
"gender":
|
177 |
-
"occupation":
|
178 |
-
"art_ability":
|
179 |
-
"art_experience":
|
180 |
-
"device":
|
181 |
-
"input":
|
182 |
-
"difficult":
|
183 |
-
"diff_explanation":
|
184 |
-
"vviq_score":
|
185 |
-
"osiq_score":
|
186 |
-
|
187 |
"drawings": {
|
188 |
"kitchen": {
|
189 |
-
"perception":
|
190 |
-
"memory":
|
191 |
},
|
192 |
"livingroom": {
|
193 |
-
"perception":
|
194 |
-
"memory":
|
195 |
},
|
196 |
"bedroom": {
|
197 |
-
"perception":
|
198 |
-
"memory":
|
199 |
}
|
200 |
-
|
201 |
"image": {
|
202 |
-
"kitchen":
|
203 |
-
"livingroom":
|
204 |
-
"bedroom":
|
205 |
-
|
206 |
-
|
207 |
-
|
|
|
1 |
# -*- coding: utf-8 -*-
|
2 |
"""aphantasia_drawing_dataset.ipynb
|
3 |
|
4 |
+
Automatically generated by Colab.
|
5 |
|
6 |
Original file is located at
|
7 |
+
https://colab.research.google.com/drive/1DYVroeFqoNK7DDiw_3OIczPeqEME-rbh
|
8 |
"""
|
9 |
|
10 |
+
!pip install -q datasets
|
11 |
|
12 |
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
|
13 |
#
|
|
|
26 |
"""TODO: Add a description here."""
|
27 |
|
28 |
|
29 |
+
|
30 |
import base64
|
31 |
from PIL import Image
|
32 |
+
import pandas as pd
|
33 |
import numpy as np
|
34 |
import io
|
35 |
import json
|
|
|
50 |
}
|
51 |
"""
|
52 |
|
|
|
53 |
_DESCRIPTION = """\
|
54 |
This dataset comes from the Brain Bridge Lab from the University of Chicago.
|
55 |
It is from an online memory drawing experiment with 61 individuals with aphantasia
|
|
|
65 |
_HOMEPAGE = "https://osf.io/cahyd/"
|
66 |
|
67 |
|
68 |
+
url = "https://drive.google.com/file/d/1v1oaZog5j5dD_vIElOEWLCZUrXvJ3jzx/view?usp=drive_link"
|
|
|
|
|
|
|
69 |
|
70 |
def _get_drive_url(url):
|
71 |
base_url = 'https://drive.google.com/uc?id='
|
|
|
75 |
_URL = {"train": _get_drive_url(url)}
|
76 |
|
77 |
|
78 |
+
|
79 |
class AphantasiaDrawingDataset(datasets.GeneratorBasedBuilder):
|
80 |
"""TODO: Short description of my dataset."""
|
81 |
|
|
|
139 |
def _generate_examples(self, filepath):
|
140 |
"""This function returns the examples in the raw (text) form."""
|
141 |
logging.info("generating examples from = %s", filepath)
|
142 |
+
def byt_to_image(image_bytes):
|
143 |
+
if image_bytes is not None:
|
144 |
+
image_buffer = io.BytesIO(image_bytes)
|
145 |
+
image = Image.open(image_buffer)
|
146 |
+
return image
|
147 |
+
return None
|
148 |
|
149 |
with open(filepath, "r") as subjects_file:
|
150 |
+
subjects_data = pd.read_parquet(subjects_file)
|
151 |
+
#idx = 0
|
152 |
+
for sub_row in subjects_data.iterrows():
|
153 |
+
# for room in subjects_data[sub]["drawings"].keys():
|
154 |
+
# if subjects_data[sub]["drawings"][room]["perception"] != "":
|
155 |
+
# img_byt = base64.b64decode(subjects_data[sub]["drawings"][room]["perception"])
|
156 |
+
# img = Image.open(io.BytesIO(img_byt))
|
157 |
+
# subjects_data[sub]["drawings"][room]["perception"] = img
|
158 |
+
# else:
|
159 |
+
# subjects_data[sub]["drawings"][room]["perception"] = None
|
160 |
+
|
161 |
+
# if subjects_data[sub]["drawings"][room]["memory"] != "":
|
162 |
+
# img_byt = base64.b64decode(subjects_data[sub]["drawings"][room]["memory"])
|
163 |
+
# img = Image.open(io.BytesIO(img_byt))
|
164 |
+
# subjects_data[sub]["drawings"][room]["memory"] = img
|
165 |
+
# else:
|
166 |
+
# subjects_data[sub]["drawings"][room]["memory"] = None
|
167 |
+
|
168 |
+
# for room in subjects_data[sub]["image"].keys():
|
169 |
+
# img_byt = base64.b64decode(subjects_data[sub]["image"][room])
|
170 |
+
# img = Image.open(io.BytesIO(img_byt))
|
171 |
+
# subjects_data[sub]["image"][room] = img.resize((500,500))
|
172 |
+
# idx += 1
|
173 |
+
age = int(sub_row["demographics.age"]) if sub_row["demographics.age"] else np.nan
|
174 |
+
yield {
|
175 |
+
"subject_id": sub_row["subject_id"],
|
176 |
+
"treatment": sub_row["treatment"],
|
177 |
"demographics": {
|
178 |
+
"country": sub_row["demographics.country"],
|
179 |
+
"age": age, #sub_row["demographics.age"],
|
180 |
+
"gender": sub_row["demographics.gender"],
|
181 |
+
"occupation": sub_row["demographics.occupation"],
|
182 |
+
"art_ability": sub_row["demographics.art_ability"],
|
183 |
+
"art_experience": sub_row["demographics.art_experience"],
|
184 |
+
"device": sub_row["demographics.device"],
|
185 |
+
"input": sub_row["demographics.input"],
|
186 |
+
"difficult": sub_row["demographics.difficult"],
|
187 |
+
"diff_explanation": sub_row["demographics.diff_explanation"],
|
188 |
+
"vviq_score": sub_row["demographics.vviq_score"],
|
189 |
+
"osiq_score": sub_row["demographics.osiq_score"]
|
190 |
+
},
|
191 |
"drawings": {
|
192 |
"kitchen": {
|
193 |
+
"perception": byt_to_image(sub_row["drawings.kitchen.perception"]),
|
194 |
+
"memory": byt_to_image(sub_row["drawings.kitchen.memory"])
|
195 |
},
|
196 |
"livingroom": {
|
197 |
+
"perception": byt_to_image(sub_row["drawings.livingroom.perception"]),
|
198 |
+
"memory": byt_to_image(sub_row["drawings.livingroom.memory"])
|
199 |
},
|
200 |
"bedroom": {
|
201 |
+
"perception": byt_to_image(sub_row["drawings.bedroom.perception"]),
|
202 |
+
"memory": byt_to_image(sub_row["drawings.bedroom.memory"])
|
203 |
}
|
204 |
+
},
|
205 |
"image": {
|
206 |
+
"kitchen": byt_to_image(sub_row["image.kitchen"]),
|
207 |
+
"livingroom": byt_to_image(sub_row["image.livingroom"]),
|
208 |
+
"bedroom": byt_to_image(sub_row["image.bedroom"])
|
209 |
+
}
|
210 |
+
}
|
|