Thinh Huynh Nguyen Truong
commited on
Commit
·
db0d271
1
Parent(s):
ee7b758
Refactor data
Browse files- data/{train.zip → set1/dev.zip} +0 -0
- data/set1/train.zip +3 -0
- data/set2/dev.zip +3 -0
- data/set2/train.zip +3 -0
- test.py +24 -36
data/{train.zip → set1/dev.zip}
RENAMED
File without changes
|
data/set1/train.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7bc049f7336b92c842c080007d5a6da9625d74807bf318d4d8c80e1b950bc512
|
3 |
+
size 1459611
|
data/set2/dev.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7bc049f7336b92c842c080007d5a6da9625d74807bf318d4d8c80e1b950bc512
|
3 |
+
size 1459611
|
data/set2/train.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7bc049f7336b92c842c080007d5a6da9625d74807bf318d4d8c80e1b950bc512
|
3 |
+
size 1459611
|
test.py
CHANGED
@@ -20,7 +20,7 @@ import json
|
|
20 |
import os
|
21 |
|
22 |
import datasets
|
23 |
-
|
24 |
|
25 |
# TODO: Add BibTeX citation
|
26 |
# Find for instance the citation on arxiv or on the dataset repo/website
|
@@ -49,13 +49,15 @@ _LICENSE = ""
|
|
49 |
# The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
|
50 |
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
|
51 |
_URLS = {
|
52 |
-
"first_domain": "https://huggingface.co/great-new-dataset-first_domain.zip",
|
53 |
-
"second_domain": "https://huggingface.co/great-new-dataset-second_domain.zip",
|
54 |
}
|
55 |
|
|
|
|
|
56 |
|
57 |
# TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
|
58 |
-
class
|
59 |
"""TODO: Short description of my dataset."""
|
60 |
|
61 |
VERSION = datasets.Version("1.1.0")
|
@@ -73,46 +75,32 @@ class NewDataset(datasets.GeneratorBasedBuilder):
|
|
73 |
# data = datasets.load_dataset('my_dataset', 'second_domain')
|
74 |
BUILDER_CONFIGS = [
|
75 |
datasets.BuilderConfig(
|
76 |
-
name="
|
77 |
version=VERSION,
|
78 |
-
description="
|
79 |
-
),
|
80 |
-
datasets.BuilderConfig(
|
81 |
-
name="second_domain",
|
82 |
-
version=VERSION,
|
83 |
-
description="This part of my dataset covers a second domain",
|
84 |
),
|
|
|
|
|
|
|
|
|
|
|
85 |
]
|
86 |
|
87 |
-
DEFAULT_CONFIG_NAME = "
|
88 |
|
89 |
def _info(self):
|
90 |
-
# TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
|
91 |
-
if (
|
92 |
-
self.config.name == "first_domain"
|
93 |
-
): # This is the name of the configuration selected in BUILDER_CONFIGS above
|
94 |
-
features = datasets.Features(
|
95 |
-
{
|
96 |
-
"sentence": datasets.Value("string"),
|
97 |
-
"option1": datasets.Value("string"),
|
98 |
-
"answer": datasets.Value("string")
|
99 |
-
# These are the features of your dataset like images, labels ...
|
100 |
-
}
|
101 |
-
)
|
102 |
-
else: # This is an example to show how to have different features for "first_domain" and "second_domain"
|
103 |
-
features = datasets.Features(
|
104 |
-
{
|
105 |
-
"sentence": datasets.Value("string"),
|
106 |
-
"option2": datasets.Value("string"),
|
107 |
-
"second_domain_answer": datasets.Value("string")
|
108 |
-
# These are the features of your dataset like images, labels ...
|
109 |
-
}
|
110 |
-
)
|
111 |
return datasets.DatasetInfo(
|
112 |
# This is the description that will appear on the datasets page.
|
113 |
description=_DESCRIPTION,
|
114 |
# This defines the different columns of the dataset and their types
|
115 |
-
features=
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
116 |
# If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
|
117 |
# specify them. They'll be used if as_supervised=True in builder.as_dataset.
|
118 |
# supervised_keys=("sentence", "label"),
|
@@ -124,7 +112,7 @@ class NewDataset(datasets.GeneratorBasedBuilder):
|
|
124 |
citation=_CITATION,
|
125 |
)
|
126 |
|
127 |
-
def _split_generators(self, dl_manager):
|
128 |
# TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
|
129 |
# If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
|
130 |
|
@@ -161,7 +149,7 @@ class NewDataset(datasets.GeneratorBasedBuilder):
|
|
161 |
]
|
162 |
|
163 |
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
|
164 |
-
def _generate_examples(self, filepath, split):
|
165 |
# TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
|
166 |
# The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
|
167 |
with open(filepath, encoding="utf-8") as f:
|
|
|
20 |
import os
|
21 |
|
22 |
import datasets
|
23 |
+
from datasets import DownloadManager
|
24 |
|
25 |
# TODO: Add BibTeX citation
|
26 |
# Find for instance the citation on arxiv or on the dataset repo/website
|
|
|
49 |
# The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
|
50 |
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
|
51 |
_URLS = {
|
52 |
+
# "first_domain": "https://huggingface.co/great-new-dataset-first_domain.zip",
|
53 |
+
# "second_domain": "https://huggingface.co/great-new-dataset-second_domain.zip",
|
54 |
}
|
55 |
|
56 |
+
_BASE_URL = ""
|
57 |
+
|
58 |
|
59 |
# TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
|
60 |
+
class Test(datasets.GeneratorBasedBuilder):
|
61 |
"""TODO: Short description of my dataset."""
|
62 |
|
63 |
VERSION = datasets.Version("1.1.0")
|
|
|
75 |
# data = datasets.load_dataset('my_dataset', 'second_domain')
|
76 |
BUILDER_CONFIGS = [
|
77 |
datasets.BuilderConfig(
|
78 |
+
name="set_1",
|
79 |
version=VERSION,
|
80 |
+
description="RGB-D SOD Set 1",
|
|
|
|
|
|
|
|
|
|
|
81 |
),
|
82 |
+
# datasets.BuilderConfig(
|
83 |
+
# name="set_2",
|
84 |
+
# version=VERSION,
|
85 |
+
# description="RGB-D SOD Set 2",
|
86 |
+
# ),
|
87 |
]
|
88 |
|
89 |
+
DEFAULT_CONFIG_NAME = "set_1" # It's not mandatory to have a default configuration. Just use one if it make sense.
|
90 |
|
91 |
def _info(self):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
92 |
return datasets.DatasetInfo(
|
93 |
# This is the description that will appear on the datasets page.
|
94 |
description=_DESCRIPTION,
|
95 |
# This defines the different columns of the dataset and their types
|
96 |
+
features=datasets.Features(
|
97 |
+
{
|
98 |
+
"depth": datasets.Image(),
|
99 |
+
"rgb": datasets.Image(),
|
100 |
+
"gt": datasets.Image(),
|
101 |
+
# These are the features of your dataset like images, labels ...
|
102 |
+
}
|
103 |
+
), # Here we define them above because they are different between the two configurations
|
104 |
# If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
|
105 |
# specify them. They'll be used if as_supervised=True in builder.as_dataset.
|
106 |
# supervised_keys=("sentence", "label"),
|
|
|
112 |
citation=_CITATION,
|
113 |
)
|
114 |
|
115 |
+
def _split_generators(self, dl_manager: DownloadManager):
|
116 |
# TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
|
117 |
# If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
|
118 |
|
|
|
149 |
]
|
150 |
|
151 |
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
|
152 |
+
def _generate_examples(self, filepath: str, split: str):
|
153 |
# TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
|
154 |
# The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
|
155 |
with open(filepath, encoding="utf-8") as f:
|