nielsr HF Staff lhoestq HF Staff commited on
Commit
ccd2a77
·
verified ·
1 Parent(s): 0c96f19

Convert dataset to Parquet (#8)

Browse files

- Convert dataset to Parquet (8f94e0f827d664cc275c26eedb60bcc0b7cbe96b)
- Delete loading script (fd4875faf85e8721c8b90a03aa4ad1e4446c7fe9)


Co-authored-by: Quentin Lhoest <[email protected]>

README.md ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ dataset_info:
3
+ config_name: funsd
4
+ features:
5
+ - name: id
6
+ dtype: string
7
+ - name: tokens
8
+ sequence: string
9
+ - name: bboxes
10
+ sequence:
11
+ sequence: int64
12
+ - name: ner_tags
13
+ sequence:
14
+ class_label:
15
+ names:
16
+ '0': O
17
+ '1': B-HEADER
18
+ '2': I-HEADER
19
+ '3': B-QUESTION
20
+ '4': I-QUESTION
21
+ '5': B-ANSWER
22
+ '6': I-ANSWER
23
+ - name: image
24
+ dtype: image
25
+ splits:
26
+ - name: train
27
+ num_bytes: 27288633.0
28
+ num_examples: 149
29
+ - name: test
30
+ num_bytes: 9931720.0
31
+ num_examples: 50
32
+ download_size: 35837449
33
+ dataset_size: 37220353.0
34
+ configs:
35
+ - config_name: funsd
36
+ data_files:
37
+ - split: train
38
+ path: funsd/train-*
39
+ - split: test
40
+ path: funsd/test-*
41
+ default: true
42
+ ---
funsd-layoutlmv3.py DELETED
@@ -1,143 +0,0 @@
1
- # coding=utf-8
2
- '''
3
- Reference: https://huggingface.co/datasets/nielsr/funsd/blob/main/funsd.py
4
- '''
5
- import json
6
- import os
7
-
8
- from PIL import Image
9
-
10
- import datasets
11
-
12
- def load_image(image_path):
13
- image = Image.open(image_path).convert("RGB")
14
- w, h = image.size
15
- return image, (w, h)
16
-
17
- def normalize_bbox(bbox, size):
18
- return [
19
- int(1000 * bbox[0] / size[0]),
20
- int(1000 * bbox[1] / size[1]),
21
- int(1000 * bbox[2] / size[0]),
22
- int(1000 * bbox[3] / size[1]),
23
- ]
24
-
25
- logger = datasets.logging.get_logger(__name__)
26
-
27
-
28
- _CITATION = """\
29
- @article{Jaume2019FUNSDAD,
30
- title={FUNSD: A Dataset for Form Understanding in Noisy Scanned Documents},
31
- author={Guillaume Jaume and H. K. Ekenel and J. Thiran},
32
- journal={2019 International Conference on Document Analysis and Recognition Workshops (ICDARW)},
33
- year={2019},
34
- volume={2},
35
- pages={1-6}
36
- }
37
- """
38
-
39
- _DESCRIPTION = """\
40
- https://guillaumejaume.github.io/FUNSD/
41
- """
42
-
43
-
44
- class FunsdConfig(datasets.BuilderConfig):
45
- """BuilderConfig for FUNSD"""
46
-
47
- def __init__(self, **kwargs):
48
- """BuilderConfig for FUNSD.
49
-
50
- Args:
51
- **kwargs: keyword arguments forwarded to super.
52
- """
53
- super(FunsdConfig, self).__init__(**kwargs)
54
-
55
-
56
- class Funsd(datasets.GeneratorBasedBuilder):
57
- """Conll2003 dataset."""
58
-
59
- BUILDER_CONFIGS = [
60
- FunsdConfig(name="funsd", version=datasets.Version("1.0.0"), description="FUNSD dataset"),
61
- ]
62
-
63
- def _info(self):
64
- return datasets.DatasetInfo(
65
- description=_DESCRIPTION,
66
- features=datasets.Features(
67
- {
68
- "id": datasets.Value("string"),
69
- "tokens": datasets.Sequence(datasets.Value("string")),
70
- "bboxes": datasets.Sequence(datasets.Sequence(datasets.Value("int64"))),
71
- "ner_tags": datasets.Sequence(
72
- datasets.features.ClassLabel(
73
- names=["O", "B-HEADER", "I-HEADER", "B-QUESTION", "I-QUESTION", "B-ANSWER", "I-ANSWER"]
74
- )
75
- ),
76
- "image": datasets.features.Image(),
77
- }
78
- ),
79
- supervised_keys=None,
80
- homepage="https://guillaumejaume.github.io/FUNSD/",
81
- citation=_CITATION,
82
- )
83
-
84
- def _split_generators(self, dl_manager):
85
- """Returns SplitGenerators."""
86
- downloaded_file = dl_manager.download_and_extract("https://guillaumejaume.github.io/FUNSD/dataset.zip")
87
- return [
88
- datasets.SplitGenerator(
89
- name=datasets.Split.TRAIN, gen_kwargs={"filepath": f"{downloaded_file}/dataset/training_data/"}
90
- ),
91
- datasets.SplitGenerator(
92
- name=datasets.Split.TEST, gen_kwargs={"filepath": f"{downloaded_file}/dataset/testing_data/"}
93
- ),
94
- ]
95
-
96
- def get_line_bbox(self, bboxs):
97
- x = [bboxs[i][j] for i in range(len(bboxs)) for j in range(0, len(bboxs[i]), 2)]
98
- y = [bboxs[i][j] for i in range(len(bboxs)) for j in range(1, len(bboxs[i]), 2)]
99
-
100
- x0, y0, x1, y1 = min(x), min(y), max(x), max(y)
101
-
102
- assert x1 >= x0 and y1 >= y0
103
- bbox = [[x0, y0, x1, y1] for _ in range(len(bboxs))]
104
- return bbox
105
-
106
- def _generate_examples(self, filepath):
107
- logger.info("⏳ Generating examples from = %s", filepath)
108
- ann_dir = os.path.join(filepath, "annotations")
109
- img_dir = os.path.join(filepath, "images")
110
- for guid, file in enumerate(sorted(os.listdir(ann_dir))):
111
- tokens = []
112
- bboxes = []
113
- ner_tags = []
114
-
115
- file_path = os.path.join(ann_dir, file)
116
- with open(file_path, "r", encoding="utf8") as f:
117
- data = json.load(f)
118
- image_path = os.path.join(img_dir, file)
119
- image_path = image_path.replace("json", "png")
120
- image, size = load_image(image_path)
121
- for item in data["form"]:
122
- cur_line_bboxes = []
123
- words, label = item["words"], item["label"]
124
- words = [w for w in words if w["text"].strip() != ""]
125
- if len(words) == 0:
126
- continue
127
- if label == "other":
128
- for w in words:
129
- tokens.append(w["text"])
130
- ner_tags.append("O")
131
- cur_line_bboxes.append(normalize_bbox(w["box"], size))
132
- else:
133
- tokens.append(words[0]["text"])
134
- ner_tags.append("B-" + label.upper())
135
- cur_line_bboxes.append(normalize_bbox(words[0]["box"], size))
136
- for w in words[1:]:
137
- tokens.append(w["text"])
138
- ner_tags.append("I-" + label.upper())
139
- cur_line_bboxes.append(normalize_bbox(w["box"], size))
140
- cur_line_bboxes = self.get_line_bbox(cur_line_bboxes)
141
- bboxes.extend(cur_line_bboxes)
142
- yield guid, {"id": str(guid), "tokens": tokens, "bboxes": bboxes, "ner_tags": ner_tags,
143
- "image": image}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
funsd/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:190b6c2d4fcf58e700f7952eb2ddf1cbf7399b681de714287077000f8f2eabc7
3
+ size 9537484
funsd/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:99dc36dc4e46bc76a3abd50e593f14f75903ca9c50722e2a835d43b65ec999c8
3
+ size 26299965