fix: pyarrow float to int64 conversion error in dataset generator

#3
Files changed (1) hide show
  1. DocLayNet-small.py +145 -81
DocLayNet-small.py CHANGED
@@ -25,6 +25,7 @@ DocLayNet dataset:
25
 
26
  import json
27
  import os
 
28
  # import base64
29
  from PIL import Image
30
  import datasets
@@ -56,12 +57,19 @@ _LICENSE = "https://github.com/DS4SD/DocLayNet/blob/main/LICENSE"
56
  # "second_domain": "https://huggingface.co/great-new-dataset-second_domain.zip",
57
  # }
58
 
 
59
  # functions
60
  def load_image(image_path):
61
  image = Image.open(image_path).convert("RGB")
62
  w, h = image.size
63
  return image, (w, h)
64
 
 
 
 
 
 
 
65
  logger = datasets.logging.get_logger(__name__)
66
 
67
 
@@ -75,7 +83,7 @@ class DocLayNetBuilderConfig(datasets.BuilderConfig):
75
  """
76
  super().__init__(name, **kwargs)
77
 
78
-
79
  class DocLayNet(datasets.GeneratorBasedBuilder):
80
  """
81
  DocLayNet small is a about 1% of the dataset DocLayNet (more information at https://huggingface.co/datasets/pierreguillou/DocLayNet-small)
@@ -100,40 +108,72 @@ class DocLayNet(datasets.GeneratorBasedBuilder):
100
  # data = datasets.load_dataset('my_dataset', 'second_domain')
101
 
102
  DEFAULT_CONFIG_NAME = "DocLayNet_2022.08_processed_on_2023.01" # It's not mandatory to have a default configuration. Just use one if it make sense.
103
-
104
  BUILDER_CONFIGS = [
105
- DocLayNetBuilderConfig(name=DEFAULT_CONFIG_NAME, version=VERSION, description="DocLayNeT small dataset"),
 
 
 
 
106
  ]
107
 
108
  BUILDER_CONFIG_CLASS = DocLayNetBuilderConfig
109
-
110
  def _info(self):
111
-
112
  features = datasets.Features(
113
- {
114
- "id": datasets.Value("string"),
115
- "texts": datasets.Sequence(datasets.Value("string")),
116
- "bboxes_block": datasets.Sequence(datasets.Sequence(datasets.Value("int64"))),
117
- "bboxes_line": datasets.Sequence(datasets.Sequence(datasets.Value("int64"))),
118
- "categories": datasets.Sequence(
119
- datasets.features.ClassLabel(
120
- names=["Caption", "Footnote", "Formula", "List-item", "Page-footer", "Page-header", "Picture", "Section-header", "Table", "Text", "Title"]
121
- )
122
- ),
123
- "image": datasets.features.Image(),
124
- # "pdf": datasets.Value("string"),
125
- "page_hash": datasets.Value("string"), # unique identifier, equal to filename
126
- "original_filename": datasets.Value("string"), # original document filename
127
- "page_no": datasets.Value("int32"), # page number in original document
128
- "num_pages": datasets.Value("int32"), # total pages in original document
129
- "original_width": datasets.Value("int32"), # width in pixels @72 ppi
130
- "original_height": datasets.Value("int32"), # height in pixels @72 ppi
131
- "coco_width": datasets.Value("int32"), # with in pixels in PNG and COCO format
132
- "coco_height": datasets.Value("int32"), # with in pixels in PNG and COCO format
133
- "collection": datasets.Value("string"), # sub-collection name
134
- "doc_category": datasets.Value("string"), # category type of the document
135
- }
136
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
137
 
138
  return datasets.DatasetInfo(
139
  # This is the description that will appear on the datasets page.
@@ -158,8 +198,10 @@ class DocLayNet(datasets.GeneratorBasedBuilder):
158
  # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
159
  # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
160
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
161
-
162
- downloaded_file = dl_manager.download_and_extract("https://huggingface.co/datasets/pierreguillou/DocLayNet-small/resolve/main/data/dataset_small.zip")
 
 
163
 
164
  return [
165
  datasets.SplitGenerator(
@@ -183,58 +225,80 @@ class DocLayNet(datasets.GeneratorBasedBuilder):
183
  # These kwargs will be passed to _generate_examples
184
  gen_kwargs={
185
  "filepath": os.path.join(downloaded_file, "small_dataset/test/"),
186
- "split": "test"
187
  },
188
  ),
189
  ]
190
 
191
-
192
  def _generate_examples(self, filepath, split):
193
- logger.info("⏳ Generating examples from = %s", filepath)
194
- ann_dir = os.path.join(filepath, "annotations")
195
- img_dir = os.path.join(filepath, "images")
196
- # pdf_dir = os.path.join(filepath, "pdfs")
197
-
198
- for guid, file in enumerate(sorted(os.listdir(ann_dir))):
199
- texts = []
200
- bboxes_block = []
201
- bboxes_line = []
202
- categories = []
203
-
204
- # get json
205
- file_path = os.path.join(ann_dir, file)
206
- with open(file_path, "r", encoding="utf8") as f:
207
- data = json.load(f)
208
-
209
- # get image
210
- image_path = os.path.join(img_dir, file)
211
- image_path = image_path.replace("json", "png")
212
- image, size = load_image(image_path)
213
-
214
- # # get pdf
215
- # pdf_path = os.path.join(pdf_dir, file)
216
- # pdf_path = pdf_path.replace("json", "pdf")
217
- # with open(pdf_path, "rb") as pdf_file:
218
- # pdf_bytes = pdf_file.read()
219
- # pdf_encoded_string = base64.b64encode(pdf_bytes)
220
-
221
- for item in data["form"]:
222
- text_example, category_example, bbox_block_example, bbox_line_example = item["text"], item["category"], item["box"], item["box_line"]
223
- texts.append(text_example)
224
- categories.append(category_example)
225
- bboxes_block.append(bbox_block_example)
226
- bboxes_line.append(bbox_line_example)
227
-
228
- # get all metadadata
229
- page_hash = data["metadata"]["page_hash"]
230
- original_filename = data["metadata"]["original_filename"]
231
- page_no = data["metadata"]["page_no"]
232
- num_pages = data["metadata"]["num_pages"]
233
- original_width = data["metadata"]["original_width"]
234
- original_height = data["metadata"]["original_height"]
235
- coco_width = data["metadata"]["coco_width"]
236
- coco_height = data["metadata"]["coco_height"]
237
- collection = data["metadata"]["collection"]
238
- doc_category = data["metadata"]["doc_category"]
239
-
240
- yield guid, {"id": str(guid), "texts": texts, "bboxes_block": bboxes_block, "bboxes_line": bboxes_line, "categories": categories, "image": image, "page_hash": page_hash, "original_filename": original_filename, "page_no": page_no, "num_pages": num_pages, "original_width": original_width, "original_height": original_height, "coco_width": coco_width, "coco_height": coco_height, "collection": collection, "doc_category": doc_category}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
 
26
  import json
27
  import os
28
+
29
  # import base64
30
  from PIL import Image
31
  import datasets
 
57
  # "second_domain": "https://huggingface.co/great-new-dataset-second_domain.zip",
58
  # }
59
 
60
+
61
  # functions
62
  def load_image(image_path):
63
  image = Image.open(image_path).convert("RGB")
64
  w, h = image.size
65
  return image, (w, h)
66
 
67
+
68
+ def convert_bbox_to_int(bbox):
69
+ """Convert bounding box coordinates to integers, handling float values."""
70
+ return [int(round(coord)) for coord in bbox]
71
+
72
+
73
  logger = datasets.logging.get_logger(__name__)
74
 
75
 
 
83
  """
84
  super().__init__(name, **kwargs)
85
 
86
+
87
  class DocLayNet(datasets.GeneratorBasedBuilder):
88
  """
89
  DocLayNet small is a about 1% of the dataset DocLayNet (more information at https://huggingface.co/datasets/pierreguillou/DocLayNet-small)
 
108
  # data = datasets.load_dataset('my_dataset', 'second_domain')
109
 
110
  DEFAULT_CONFIG_NAME = "DocLayNet_2022.08_processed_on_2023.01" # It's not mandatory to have a default configuration. Just use one if it make sense.
111
+
112
  BUILDER_CONFIGS = [
113
+ DocLayNetBuilderConfig(
114
+ name=DEFAULT_CONFIG_NAME,
115
+ version=VERSION,
116
+ description="DocLayNeT small dataset",
117
+ ),
118
  ]
119
 
120
  BUILDER_CONFIG_CLASS = DocLayNetBuilderConfig
121
+
122
  def _info(self):
123
+
124
  features = datasets.Features(
125
+ {
126
+ "id": datasets.Value("string"),
127
+ "texts": datasets.Sequence(datasets.Value("string")),
128
+ "bboxes_block": datasets.Sequence(
129
+ datasets.Sequence(datasets.Value("int64"))
130
+ ),
131
+ "bboxes_line": datasets.Sequence(
132
+ datasets.Sequence(datasets.Value("int64"))
133
+ ),
134
+ "categories": datasets.Sequence(
135
+ datasets.features.ClassLabel(
136
+ names=[
137
+ "Caption",
138
+ "Footnote",
139
+ "Formula",
140
+ "List-item",
141
+ "Page-footer",
142
+ "Page-header",
143
+ "Picture",
144
+ "Section-header",
145
+ "Table",
146
+ "Text",
147
+ "Title",
148
+ ]
149
+ )
150
+ ),
151
+ "image": datasets.features.Image(),
152
+ # "pdf": datasets.Value("string"),
153
+ "page_hash": datasets.Value(
154
+ "string"
155
+ ), # unique identifier, equal to filename
156
+ "original_filename": datasets.Value(
157
+ "string"
158
+ ), # original document filename
159
+ "page_no": datasets.Value("int32"), # page number in original document
160
+ "num_pages": datasets.Value(
161
+ "int32"
162
+ ), # total pages in original document
163
+ "original_width": datasets.Value("int32"), # width in pixels @72 ppi
164
+ "original_height": datasets.Value("int32"), # height in pixels @72 ppi
165
+ "coco_width": datasets.Value(
166
+ "int32"
167
+ ), # with in pixels in PNG and COCO format
168
+ "coco_height": datasets.Value(
169
+ "int32"
170
+ ), # with in pixels in PNG and COCO format
171
+ "collection": datasets.Value("string"), # sub-collection name
172
+ "doc_category": datasets.Value(
173
+ "string"
174
+ ), # category type of the document
175
+ }
176
+ )
177
 
178
  return datasets.DatasetInfo(
179
  # This is the description that will appear on the datasets page.
 
198
  # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
199
  # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
200
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
201
+
202
+ downloaded_file = dl_manager.download_and_extract(
203
+ "https://huggingface.co/datasets/pierreguillou/DocLayNet-small/resolve/main/data/dataset_small.zip"
204
+ )
205
 
206
  return [
207
  datasets.SplitGenerator(
 
225
  # These kwargs will be passed to _generate_examples
226
  gen_kwargs={
227
  "filepath": os.path.join(downloaded_file, "small_dataset/test/"),
228
+ "split": "test",
229
  },
230
  ),
231
  ]
232
 
 
233
  def _generate_examples(self, filepath, split):
234
+ logger.info("⏳ Generating examples from = %s", filepath)
235
+ ann_dir = os.path.join(filepath, "annotations")
236
+ img_dir = os.path.join(filepath, "images")
237
+ # pdf_dir = os.path.join(filepath, "pdfs")
238
+
239
+ for guid, file in enumerate(sorted(os.listdir(ann_dir))):
240
+ texts = []
241
+ bboxes_block = []
242
+ bboxes_line = []
243
+ categories = []
244
+
245
+ # get json
246
+ file_path = os.path.join(ann_dir, file)
247
+ with open(file_path, "r", encoding="utf8") as f:
248
+ data = json.load(f)
249
+
250
+ # get image
251
+ image_path = os.path.join(img_dir, file)
252
+ image_path = image_path.replace("json", "png")
253
+ image, size = load_image(image_path)
254
+
255
+ # # get pdf
256
+ # pdf_path = os.path.join(pdf_dir, file)
257
+ # pdf_path = pdf_path.replace("json", "pdf")
258
+ # with open(pdf_path, "rb") as pdf_file:
259
+ # pdf_bytes = pdf_file.read()
260
+ # pdf_encoded_string = base64.b64encode(pdf_bytes)
261
+
262
+ for item in data["form"]:
263
+ (
264
+ text_example,
265
+ category_example,
266
+ bbox_block_example,
267
+ bbox_line_example,
268
+ ) = (item["text"], item["category"], item["box"], item["box_line"])
269
+ texts.append(text_example)
270
+ categories.append(category_example)
271
+ # Convert bounding boxes to integers to avoid float->int64 conversion errors
272
+ bboxes_block.append(convert_bbox_to_int(bbox_block_example))
273
+ bboxes_line.append(convert_bbox_to_int(bbox_line_example))
274
+
275
+ # get all metadadata
276
+ page_hash = data["metadata"]["page_hash"]
277
+ original_filename = data["metadata"]["original_filename"]
278
+ page_no = data["metadata"]["page_no"]
279
+ num_pages = data["metadata"]["num_pages"]
280
+ original_width = data["metadata"]["original_width"]
281
+ original_height = data["metadata"]["original_height"]
282
+ coco_width = data["metadata"]["coco_width"]
283
+ coco_height = data["metadata"]["coco_height"]
284
+ collection = data["metadata"]["collection"]
285
+ doc_category = data["metadata"]["doc_category"]
286
+
287
+ yield guid, {
288
+ "id": str(guid),
289
+ "texts": texts,
290
+ "bboxes_block": bboxes_block,
291
+ "bboxes_line": bboxes_line,
292
+ "categories": categories,
293
+ "image": image,
294
+ "page_hash": page_hash,
295
+ "original_filename": original_filename,
296
+ "page_no": page_no,
297
+ "num_pages": num_pages,
298
+ "original_width": original_width,
299
+ "original_height": original_height,
300
+ "coco_width": coco_width,
301
+ "coco_height": coco_height,
302
+ "collection": collection,
303
+ "doc_category": doc_category,
304
+ }