Convert dataset to Parquet

#6
by lhoestq HF Staff - opened
.gitattributes CHANGED
@@ -14,3 +14,4 @@
14
  *.pb filter=lfs diff=lfs merge=lfs -text
15
  *.pt filter=lfs diff=lfs merge=lfs -text
16
  *.pth filter=lfs diff=lfs merge=lfs -text
 
 
14
  *.pb filter=lfs diff=lfs merge=lfs -text
15
  *.pt filter=lfs diff=lfs merge=lfs -text
16
  *.pth filter=lfs diff=lfs merge=lfs -text
17
+ funsd/train-00000-of-00001.parquet filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ dataset_info:
3
+ config_name: funsd
4
+ features:
5
+ - name: id
6
+ dtype: string
7
+ - name: words
8
+ sequence: string
9
+ - name: bboxes
10
+ sequence:
11
+ sequence: int64
12
+ - name: ner_tags
13
+ sequence:
14
+ class_label:
15
+ names:
16
+ '0': O
17
+ '1': B-HEADER
18
+ '2': I-HEADER
19
+ '3': B-QUESTION
20
+ '4': I-QUESTION
21
+ '5': B-ANSWER
22
+ '6': I-ANSWER
23
+ - name: image_path
24
+ dtype: string
25
+ splits:
26
+ - name: train
27
+ num_bytes: 1191784
28
+ num_examples: 149
29
+ - name: test
30
+ num_bytes: 472020
31
+ num_examples: 50
32
+ download_size: 329600
33
+ dataset_size: 1663804
34
+ configs:
35
+ - config_name: funsd
36
+ data_files:
37
+ - split: train
38
+ path: funsd/train-*
39
+ - split: test
40
+ path: funsd/test-*
41
+ default: true
42
+ ---
funsd.py DELETED
@@ -1,123 +0,0 @@
1
- # coding=utf-8
2
- import json
3
- import os
4
-
5
- import datasets
6
-
7
- from PIL import Image
8
- import numpy as np
9
-
10
- logger = datasets.logging.get_logger(__name__)
11
-
12
-
13
- _CITATION = """\
14
- @article{Jaume2019FUNSDAD,
15
- title={FUNSD: A Dataset for Form Understanding in Noisy Scanned Documents},
16
- author={Guillaume Jaume and H. K. Ekenel and J. Thiran},
17
- journal={2019 International Conference on Document Analysis and Recognition Workshops (ICDARW)},
18
- year={2019},
19
- volume={2},
20
- pages={1-6}
21
- }
22
- """
23
- _DESCRIPTION = """\
24
- https://guillaumejaume.github.io/FUNSD/
25
- """
26
-
27
- def load_image(image_path):
28
- image = Image.open(image_path).convert("RGB")
29
- w, h = image.size
30
- return image, (w, h)
31
-
32
- def normalize_bbox(bbox, size):
33
- return [
34
- int(1000 * bbox[0] / size[0]),
35
- int(1000 * bbox[1] / size[1]),
36
- int(1000 * bbox[2] / size[0]),
37
- int(1000 * bbox[3] / size[1]),
38
- ]
39
-
40
- class FunsdConfig(datasets.BuilderConfig):
41
- """BuilderConfig for FUNSD"""
42
-
43
- def __init__(self, **kwargs):
44
- """BuilderConfig for FUNSD.
45
-
46
- Args:
47
- **kwargs: keyword arguments forwarded to super.
48
- """
49
- super(FunsdConfig, self).__init__(**kwargs)
50
-
51
- class Funsd(datasets.GeneratorBasedBuilder):
52
- """FUNSD dataset."""
53
-
54
- BUILDER_CONFIGS = [
55
- FunsdConfig(name="funsd", version=datasets.Version("1.0.0"), description="FUNSD dataset"),
56
- ]
57
-
58
- def _info(self):
59
- return datasets.DatasetInfo(
60
- description=_DESCRIPTION,
61
- features=datasets.Features(
62
- {
63
- "id": datasets.Value("string"),
64
- "words": datasets.Sequence(datasets.Value("string")),
65
- "bboxes": datasets.Sequence(datasets.Sequence(datasets.Value("int64"))),
66
- "ner_tags": datasets.Sequence(
67
- datasets.features.ClassLabel(
68
- names=["O", "B-HEADER", "I-HEADER", "B-QUESTION", "I-QUESTION", "B-ANSWER", "I-ANSWER"]
69
- )
70
- ),
71
- "image_path": datasets.Value("string"),
72
- }
73
- ),
74
- supervised_keys=None,
75
- homepage="https://guillaumejaume.github.io/FUNSD/",
76
- citation=_CITATION,
77
- )
78
-
79
- def _split_generators(self, dl_manager):
80
- """Returns SplitGenerators."""
81
- downloaded_file = dl_manager.download_and_extract("https://guillaumejaume.github.io/FUNSD/dataset.zip")
82
- return [
83
- datasets.SplitGenerator(
84
- name=datasets.Split.TRAIN, gen_kwargs={"filepath": f"{downloaded_file}/dataset/training_data/"}
85
- ),
86
- datasets.SplitGenerator(
87
- name=datasets.Split.TEST, gen_kwargs={"filepath": f"{downloaded_file}/dataset/testing_data/"}
88
- ),
89
- ]
90
-
91
- def _generate_examples(self, filepath):
92
- logger.info("⏳ Generating examples from = %s", filepath)
93
- ann_dir = os.path.join(filepath, "annotations")
94
- img_dir = os.path.join(filepath, "images")
95
- for guid, file in enumerate(sorted(os.listdir(ann_dir))):
96
- words = []
97
- bboxes = []
98
- ner_tags = []
99
- file_path = os.path.join(ann_dir, file)
100
- with open(file_path, "r", encoding="utf8") as f:
101
- data = json.load(f)
102
- image_path = os.path.join(img_dir, file)
103
- image_path = image_path.replace("json", "png")
104
- image, size = load_image(image_path)
105
- for item in data["form"]:
106
- words_example, label = item["words"], item["label"]
107
- words_example = [w for w in words_example if w["text"].strip() != ""]
108
- if len(words_example) == 0:
109
- continue
110
- if label == "other":
111
- for w in words_example:
112
- words.append(w["text"])
113
- ner_tags.append("O")
114
- bboxes.append(normalize_bbox(w["box"], size))
115
- else:
116
- words.append(words_example[0]["text"])
117
- ner_tags.append("B-" + label.upper())
118
- bboxes.append(normalize_bbox(words_example[0]["box"], size))
119
- for w in words_example[1:]:
120
- words.append(w["text"])
121
- ner_tags.append("I-" + label.upper())
122
- bboxes.append(normalize_bbox(w["box"], size))
123
- yield guid, {"id": str(guid), "words": words, "bboxes": bboxes, "ner_tags": ner_tags, "image_path": image_path}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
funsd/test-00000-of-00001.parquet ADDED
Binary file (98.4 kB). View file
 
funsd/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:46bd645c378f4055c1d17c162d08765a423171fb04af9938094363d581cbf91f
3
+ size 231218