holylovenia commited on
Commit
0d43453
1 Parent(s): 9ee6d35

Upload cc_aligned_doc.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. cc_aligned_doc.py +154 -0
cc_aligned_doc.py ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from pathlib import Path
17
+ from typing import Dict, List, Tuple
18
+
19
+ import datasets
20
+
21
+ from seacrowd.utils import schemas
22
+ from seacrowd.utils.configs import SEACrowdConfig
23
+ from seacrowd.utils.constants import Licenses, Tasks
24
+
25
+ _CITATION = """\
26
+ @inproceedings{elkishky_ccaligned_2020,
27
+ author = {El-Kishky, Ahmed and Chaudhary, Vishrav and Guzm{\'a}n, Francisco and Koehn, Philipp},
28
+ booktitle = {Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP 2020)},
29
+ month = {November},
30
+ title = {{CCAligned}: A Massive Collection of Cross-lingual Web-Document Pairs},
31
+ year = {2020}
32
+ address = "Online",
33
+ publisher = "Association for Computational Linguistics",
34
+ url = "https://www.aclweb.org/anthology/2020.emnlp-main.480",
35
+ doi = "10.18653/v1/2020.emnlp-main.480",
36
+ pages = "5960--5969"
37
+ }
38
+ """
39
+
40
+ _DATASETNAME = "cc_aligned_doc"
41
+
42
+ _DESCRIPTION = """\
43
+ CCAligned consists of parallel or comparable web-document pairs in 137 languages aligned with English\
44
+ (10 languages are from Southeast Asia; Burmese has two document collection with different scripts).\
45
+ These web-document pairs were constructed by performing language identification on raw web-documents, \
46
+ and ensuring corresponding language codes were corresponding in the URLs of web documents. This pattern \
47
+ matching approach yielded more than 100 million aligned documents paired with English.
48
+ """
49
+
50
+ _HOMEPAGE = "https://www2.statmt.org/cc-aligned/"
51
+
52
+ _LANGUAGES = ["ind", "sun", "tha", "vie", "zlm", "lao", "khm", "mya", "ceb", "war"]
53
+
54
+ _LICENSE = Licenses.UNKNOWN.value
55
+
56
+ _LOCAL = False
57
+ _SUBSETS = {"id_ID": "ind", "su_ID": "sun", "th_TH": "tha", "vi_VN": "vie", "ms_MY": "zlm", "lo_LA": "lao", "km_KH": "khm", "my_MM": "mya", "my_MM_zaw": "mya", "cx_PH": "ceb", "wy_PH": "war"}
58
+ _URLS = {_DATASETNAME: "https://data.statmt.org/cc-aligned/en_XX-{subset}.tsv.xz"}
59
+
60
+ _SUPPORTED_TASKS = [Tasks.MACHINE_TRANSLATION]
61
+
62
+ _SOURCE_VERSION = "1.0.0"
63
+
64
+ _SEACROWD_VERSION = "2024.06.20"
65
+
66
+
67
+ class CCAlignedDocDataset(datasets.GeneratorBasedBuilder):
68
+
69
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
70
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
71
+ SEACROWD_SCHEMA_NAME = "t2t"
72
+
73
+ BUILDER_CONFIGS = [SEACrowdConfig(name=f"{_DATASETNAME}_{subset}_source", version=datasets.Version(_SOURCE_VERSION), description=f"{_DATASETNAME} source schema", schema="source", subset_id=f"{_DATASETNAME}",) for subset in _SUBSETS.keys()] + [
74
+ SEACrowdConfig(
75
+ name=f"{_DATASETNAME}_{subset}_seacrowd_{schema_name}",
76
+ version=datasets.Version(_SEACROWD_VERSION),
77
+ description=f"{_DATASETNAME} SEACrowd schema",
78
+ schema=f"seacrowd_{schema_name}",
79
+ subset_id=f"{_DATASETNAME}",
80
+ )
81
+ for subset, schema_name in zip(_SUBSETS.keys(), len(_SUBSETS.keys()) * [SEACROWD_SCHEMA_NAME])
82
+ ]
83
+
84
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_id_ID_source"
85
+
86
+ def _info(self) -> datasets.DatasetInfo:
87
+
88
+ if self.config.schema == "source":
89
+ features = datasets.Features(
90
+ {
91
+ "Domain": datasets.Value("string"),
92
+ "Source_URL": datasets.Value("string"),
93
+ "Source_Content": datasets.Value("string"),
94
+ "Target_URL": datasets.Value("string"),
95
+ "Target_Content": datasets.Value("string"),
96
+ }
97
+ )
98
+ elif self.config.schema == f"seacrowd_{self.SEACROWD_SCHEMA_NAME}":
99
+ features = schemas.text2text_features
100
+
101
+ return datasets.DatasetInfo(
102
+ description=_DESCRIPTION,
103
+ features=features,
104
+ homepage=_HOMEPAGE,
105
+ license=_LICENSE,
106
+ citation=_CITATION,
107
+ )
108
+
109
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
110
+ """Returns SplitGenerators."""
111
+ subset = "_".join([self.config.name.split("_")[3], self.config.name.split("_")[4]])
112
+ urls = _URLS[_DATASETNAME].format(subset=subset)
113
+ data_dir = dl_manager.download_and_extract(urls)
114
+
115
+ return [
116
+ datasets.SplitGenerator(
117
+ name=datasets.Split.TRAIN,
118
+ gen_kwargs={
119
+ "filepath": data_dir,
120
+ "split": "train",
121
+ },
122
+ )
123
+ ]
124
+
125
+ def _generate_examples(self, filepath: Path, split: str) -> Tuple[int, Dict]:
126
+ """Yields examples as (key, example) tuples."""
127
+ subset = "_".join([self.config.name.split("_")[3], self.config.name.split("_")[4]])
128
+ lines = open(filepath, "r").readlines()
129
+ if self.config.schema == "source":
130
+ idx = 0
131
+ for line in lines:
132
+ content = line.split("\t")
133
+ example = {
134
+ "Domain": content[0],
135
+ "Source_URL": content[1],
136
+ "Source_Content": content[2],
137
+ "Target_URL": content[3],
138
+ "Target_Content": content[4],
139
+ }
140
+ yield idx, example
141
+ idx += 1
142
+ elif self.config.schema == f"seacrowd_{self.SEACROWD_SCHEMA_NAME}":
143
+ idx = 0
144
+ for line in lines:
145
+ content = line.split("\t")
146
+ example = {
147
+ "id": str(idx),
148
+ "text_1": content[2],
149
+ "text_2": content[4],
150
+ "text_1_name": "en",
151
+ "text_2_name": _SUBSETS[subset],
152
+ }
153
+ yield idx, example
154
+ idx += 1