Rachel Wicks
commited on
Commit
·
cb132d3
1
Parent(s):
fc94098
starter files
Browse files- .gitattributes +1 -0
- files.yml +3 -0
- paradocs.py +170 -0
.gitattributes
CHANGED
@@ -53,3 +53,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
53 |
*.jpg filter=lfs diff=lfs merge=lfs -text
|
54 |
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
55 |
*.webp filter=lfs diff=lfs merge=lfs -text
|
|
|
|
53 |
*.jpg filter=lfs diff=lfs merge=lfs -text
|
54 |
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
55 |
*.webp filter=lfs diff=lfs merge=lfs -text
|
56 |
+
files.yml filter=lfs diff=lfs merge=lfs -text
|
files.yml
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:bf83d2c1ebc9d0f7c13f507e88977346ba358c142c38e04b51219f7fb9c74fbb
|
3 |
+
size 329
|
paradocs.py
ADDED
@@ -0,0 +1,170 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
2 |
+
# you may not use this file except in compliance with the License.
|
3 |
+
# You may obtain a copy of the License at
|
4 |
+
#
|
5 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
6 |
+
#
|
7 |
+
# Unless required by applicable law or agreed to in writing, software
|
8 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
9 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
10 |
+
# See the License for the specific language governing permissions and
|
11 |
+
# limitations under the License.
|
12 |
+
"""
|
13 |
+
This file provides a HuggingFace dataset loader implementation for
|
14 |
+
the ParaDocs dataset
|
15 |
+
ParaDocs is a multilingual machine translation dataset that has
|
16 |
+
labelled document annotations for ParaCrawl, NewsCommentary, and
|
17 |
+
Europarl data which can be used to create parallel document
|
18 |
+
datasets for training of context-aware machine translation models.
|
19 |
+
"""
|
20 |
+
|
21 |
+
# https://huggingface.co/docs/datasets/dataset_script
|
22 |
+
|
23 |
+
import csv
|
24 |
+
import json
|
25 |
+
import os
|
26 |
+
import re
|
27 |
+
import pathlib
|
28 |
+
from pathlib import Path
|
29 |
+
import yaml
|
30 |
+
from ast import literal_eval
|
31 |
+
|
32 |
+
import datasets
|
33 |
+
|
34 |
+
import gzip
|
35 |
+
try:
|
36 |
+
import lzma as xz
|
37 |
+
except ImportError:
|
38 |
+
import pylzma as xz
|
39 |
+
|
40 |
+
|
41 |
+
# TODO: Add BibTeX citation
|
42 |
+
# Find for instance the citation on arxiv or on the dataset repo/website
|
43 |
+
_CITATION = """\
|
44 |
+
"""
|
45 |
+
|
46 |
+
# TODO: Add description of the dataset here
|
47 |
+
# You can copy an official description
|
48 |
+
_DESCRIPTION = """\
|
49 |
+
ParaDocs is a multilingual machine translation dataset that has
|
50 |
+
labelled document annotations for ParaCrawl, NewsCommentary, and
|
51 |
+
Europarl data which can be used to create parallel document
|
52 |
+
datasets for training of context-aware machine translation models.
|
53 |
+
"""
|
54 |
+
|
55 |
+
_HOMEPAGE = "https://huggingface.co/datasets/rewicks/paradocs"
|
56 |
+
|
57 |
+
_LICENSE = "cc-by-sa-4.0"
|
58 |
+
|
59 |
+
_URL = "https://huggingface.co/datasets/rewicks/paradocs"
|
60 |
+
|
61 |
+
# Load the file paths for all the splits (per language currently)
|
62 |
+
|
63 |
+
file_list_url = "https://huggingface.co/datasets/rewicks/paradocs/raw/main/files.yml"
|
64 |
+
import urllib.request
|
65 |
+
with urllib.request.urlopen(file_list_url) as f:
|
66 |
+
try:
|
67 |
+
fnames = yaml.safe_load(f)
|
68 |
+
except yaml.YAMLError as exc:
|
69 |
+
print("Error loading the file paths for the dataset splits. Aborting.")
|
70 |
+
exit(1)
|
71 |
+
|
72 |
+
_DATA_URL = fnames['fnames']
|
73 |
+
|
74 |
+
_VARIANTS = list(_DATA_URL.keys())
|
75 |
+
|
76 |
+
|
77 |
+
class ParaDocs(datasets.GeneratorBasedBuilder):
|
78 |
+
BUILDER_CONFIGS = [datasets.BuilderConfig(name) for name in _VARIANTS]
|
79 |
+
|
80 |
+
def _info(self):
|
81 |
+
return datasets.DatasetInfo(
|
82 |
+
description=_DESCRIPTION,
|
83 |
+
features=datasets.Features(
|
84 |
+
{
|
85 |
+
"src": datasets.Value("string"),
|
86 |
+
"tgt": datasets.Value("string"),
|
87 |
+
"sim_score_one" : datasets.Value("float32"),
|
88 |
+
"sim_score_two": datasets.Value("float32"),
|
89 |
+
"collection": datasets.Value("string"),
|
90 |
+
"src_paragraph_id": datasets.Value("string"),
|
91 |
+
"tgt_paragraph_id": datasets.Value("string"),
|
92 |
+
"src_sentence_id": datasets.Value("string"),
|
93 |
+
"tgt_sentence_id": datasets.Value("string"),
|
94 |
+
"src_start_id": datasets.Value("string"),
|
95 |
+
"src_end_id": datasets.Value("string"),
|
96 |
+
"tgt_start_id": datasets.Value("string"),
|
97 |
+
"tgt_end_id": datasets.Value("string"),
|
98 |
+
"src_lid_prob": datasets.Value("float32"),
|
99 |
+
"tgt_lid_prob": datasets.Value("float32"),
|
100 |
+
"src_docid": datasets.Value("string"),
|
101 |
+
"tgt_docid": datasets.Value("string")
|
102 |
+
}
|
103 |
+
),
|
104 |
+
supervised_keys=None,
|
105 |
+
homepage=_URL,
|
106 |
+
citation=_CITATION,
|
107 |
+
)
|
108 |
+
|
109 |
+
def _split_generators(self, dl_manager):
|
110 |
+
data_sources = {self.config.name: _DATA_URL[self.config.name]}
|
111 |
+
|
112 |
+
return [
|
113 |
+
datasets.SplitGenerator(
|
114 |
+
name=lang,
|
115 |
+
gen_kwargs={
|
116 |
+
"filepaths": dl_manager.download(data_sources[lang])
|
117 |
+
}
|
118 |
+
)
|
119 |
+
for lang
|
120 |
+
in data_sources
|
121 |
+
]
|
122 |
+
|
123 |
+
def _get_qa_pair_list_features(self, qa_pair, feature_name):
|
124 |
+
res = []
|
125 |
+
|
126 |
+
if feature_name in qa_pair:
|
127 |
+
if qa_pair[feature_name]:
|
128 |
+
return qa_pair[feature_name]
|
129 |
+
else:
|
130 |
+
if feature_name.startswith('en'):
|
131 |
+
feature_name = '_'.join(feature_name.split('_')[1:])
|
132 |
+
return self._get_qa_pair_list_features(qa_pair, feature_name)
|
133 |
+
|
134 |
+
return res
|
135 |
+
|
136 |
+
def _generate_examples(self, filepaths):
|
137 |
+
"""This function returns the examples in the raw (text) form by iterating on all the files."""
|
138 |
+
id_ = 0
|
139 |
+
for filepath in filepaths:
|
140 |
+
# logger.info("Generating examples from = %s", filepath)
|
141 |
+
try:
|
142 |
+
with open(filepath, "r", encoding="utf-8") as f:
|
143 |
+
rstream = csv.DictReader(f,
|
144 |
+
delimiter='\t',
|
145 |
+
fieldnames = [
|
146 |
+
"src",
|
147 |
+
"tgt",
|
148 |
+
"sim_score_one",
|
149 |
+
"sim_score_two",
|
150 |
+
"collection",
|
151 |
+
"src_paragraph_id",
|
152 |
+
"tgt_paragraph_id",
|
153 |
+
"src_sentence_id",
|
154 |
+
"tgt_sentence_id",
|
155 |
+
"src_start_id",
|
156 |
+
"src_end_id",
|
157 |
+
"tgt_start_id",
|
158 |
+
"tgt_end_id",
|
159 |
+
"src_lid_prob",
|
160 |
+
"tgt_lid_prob",
|
161 |
+
"src_docid",
|
162 |
+
"tgt_docid"
|
163 |
+
],
|
164 |
+
quoting=csv.QUOTE_NONE
|
165 |
+
)
|
166 |
+
for example in rstream:
|
167 |
+
yield id_, example
|
168 |
+
id_ += 1
|
169 |
+
except:
|
170 |
+
print("Error reading file:", filepath)
|