Thinh Huynh Nguyen Truong commited on
Commit
6651025
·
1 Parent(s): dd95e30
Files changed (5) hide show
  1. .gitignore +3 -0
  2. HOW_TO_RUN.md +7 -0
  3. generate_metadata.py +57 -0
  4. requirements.txt +3 -0
  5. rgbdsod_datasets.py +180 -0
.gitignore ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ env
2
+ load_data
3
+ __pycache__
HOW_TO_RUN.md ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ # How to run
2
+
3
+ ## Test
4
+
5
+ ~~~bash
6
+ datasets-cli test test.py --save_info --all_configs
7
+ ~~~
generate_metadata.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import glob
3
+ import json
4
+ from typing import Dict, List
5
+ from tqdm import tqdm
6
+
7
+ """
8
+ Generate Metadata for this dataset
9
+ {
10
+ "data": [
11
+ {
12
+ "depth": "depths/COME_Train_1.png",
13
+ "rgb": "RGB/COME_Train_1.jpg",
14
+ "gt": "GT/COME_Train_1.png"
15
+ },
16
+ ...
17
+ ]
18
+ }
19
+ """
20
+
21
+ depths = glob.glob("depths/*")
22
+ gts = glob.glob("GT/*")
23
+ rgbs = glob.glob("RGB/*")
24
+
25
+ metadata: List[Dict[str, str]] = []
26
+
27
+ for depth in tqdm(depths):
28
+ name = os.path.basename(depth).split(".")[0]
29
+
30
+ gts = glob.glob(f"GT/{name}.*")
31
+ if len(gts) != 1:
32
+ raise Exception(f"Inconsitent corresponding GT of name {name}, gts = ", gts)
33
+ gt = gts[0]
34
+
35
+ rgbs = glob.glob(f"RGB/{name}.*")
36
+ if len(rgbs) != 1:
37
+ raise Exception(f"Inconsitent corresponding RGB of name {name}, rgbs = ", rgbs)
38
+ rgb = rgbs[0]
39
+
40
+ metadata.append(
41
+ {
42
+ "depth": depth,
43
+ "gt": gt,
44
+ "rgb": rgb,
45
+ "name": name,
46
+ }
47
+ )
48
+
49
+ # Serializing json
50
+ json_object = json.dumps(dict(metadata=metadata), indent=4)
51
+
52
+ # Writing to sample.json
53
+ with open("metadata.json", "w") as outfile:
54
+ outfile.write(json_object)
55
+
56
+ os.system('zip -r train.zip depths GT RGB metadata.json generate_metadata.py')
57
+
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ # Python 3.10.6
2
+ datasets
3
+ Pillow
rgbdsod_datasets.py ADDED
@@ -0,0 +1,180 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # TODO: Address all TODOs and remove all explanatory comments
15
+ """TODO: Add a description here."""
16
+
17
+
18
+ import csv
19
+ import json
20
+ import os
21
+ from typing import Dict, List
22
+ from PIL import Image
23
+
24
+ import datasets
25
+ from datasets import DownloadManager
26
+
27
+ # TODO: Add BibTeX citation
28
+ # Find for instance the citation on arxiv or on the dataset repo/website
29
+ _CITATION = """\
30
+ @InProceedings{huggingface:dataset,
31
+ title = {A great new dataset},
32
+ author={huggingface, Inc.
33
+ },
34
+ year={2020}
35
+ }
36
+ """
37
+
38
+ # TODO: Add description of the dataset here
39
+ # You can copy an official description
40
+ _DESCRIPTION = """\
41
+ This new dataset is designed to solve this great NLP task and is crafted with a lot of care.
42
+ """
43
+
44
+ # TODO: Add a link to an official homepage for the dataset here
45
+ _HOMEPAGE = ""
46
+
47
+ # TODO: Add the licence for the dataset here if you can find it
48
+ _LICENSE = ""
49
+
50
+ # TODO: Add link to the official dataset URLs here
51
+ # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
52
+ # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
53
+ _URLS = {
54
+ # "first_domain": "https://huggingface.co/great-new-dataset-first_domain.zip",
55
+ # "second_domain": "https://huggingface.co/great-new-dataset-second_domain.zip",
56
+ }
57
+
58
+ _BASE_URL = ""
59
+
60
+
61
+ def get_download_url(config_name: str, partition: str) -> str:
62
+ """Get download URL based on config name and parition (train/dev/test)
63
+
64
+ Args:
65
+ config_name (str): can be "v1", "v2",...
66
+ partition (str): can be "train", "dev" or "test"
67
+
68
+ Returns:
69
+ str: URL to download file
70
+ """
71
+ return f"https://huggingface.co/datasets/RGBD-SOD/test/resolve/main/data/{config_name}/{partition}.zip"
72
+
73
+
74
+ # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
75
+ class Test(datasets.GeneratorBasedBuilder):
76
+ """TODO: Short description of my dataset."""
77
+
78
+ VERSION = datasets.Version("1.1.0")
79
+
80
+ # This is an example of a dataset with multiple configurations.
81
+ # If you don't want/need to define several sub-sets in your dataset,
82
+ # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
83
+
84
+ # If you need to make complex sub-parts in the datasets with configurable options
85
+ # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
86
+ # BUILDER_CONFIG_CLASS = MyBuilderConfig
87
+
88
+ # You will be able to load one or the other configurations in the following list with
89
+ # data = datasets.load_dataset('my_dataset', 'first_domain')
90
+ # data = datasets.load_dataset('my_dataset', 'second_domain')
91
+ BUILDER_CONFIGS = [
92
+ datasets.BuilderConfig(
93
+ name="v1",
94
+ version=VERSION,
95
+ description="RGB-D SOD Set 1",
96
+ ),
97
+ # datasets.BuilderConfig(
98
+ # name="v2",
99
+ # version=VERSION,
100
+ # description="RGB-D SOD Set 2",
101
+ # ),
102
+ ]
103
+
104
+ DEFAULT_CONFIG_NAME = "v1" # It's not mandatory to have a default configuration. Just use one if it make sense.
105
+
106
+ def _info(self):
107
+ return datasets.DatasetInfo(
108
+ # This is the description that will appear on the datasets page.
109
+ description=_DESCRIPTION,
110
+ # This defines the different columns of the dataset and their types
111
+ features=datasets.Features(
112
+ {
113
+ "depth": datasets.Image(),
114
+ "rgb": datasets.Image(),
115
+ "gt": datasets.Image(),
116
+ "name": datasets.Value("string"),
117
+ # These are the features of your dataset like images, labels ...
118
+ }
119
+ ), # Here we define them above because they are different between the two configurations
120
+ # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
121
+ # specify them. They'll be used if as_supervised=True in builder.as_dataset.
122
+ # supervised_keys=("sentence", "label"),
123
+ # Homepage of the dataset for documentation
124
+ homepage=_HOMEPAGE,
125
+ # License for the dataset if available
126
+ license=_LICENSE,
127
+ # Citation for the dataset
128
+ citation=_CITATION,
129
+ )
130
+
131
+ def _split_generators(self, dl_manager: DownloadManager):
132
+ # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
133
+ # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
134
+
135
+ # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
136
+ # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
137
+ # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
138
+ train_dir = dl_manager.download_and_extract(
139
+ get_download_url(self.config.name, "train")
140
+ )
141
+ dev_dir = dl_manager.download_and_extract(
142
+ get_download_url(self.config.name, "dev")
143
+ )
144
+ # test_dir = dl_manager.download_and_extract(
145
+ # get_download_url(self.config.name, "test")
146
+ # )
147
+ return [
148
+ datasets.SplitGenerator(
149
+ name=datasets.Split.TRAIN,
150
+ # These kwargs will be passed to _generate_examples
151
+ gen_kwargs={"dir_path": train_dir},
152
+ ),
153
+ datasets.SplitGenerator(
154
+ name=datasets.Split.VALIDATION,
155
+ # These kwargs will be passed to _generate_examples
156
+ gen_kwargs={"dir_path": dev_dir},
157
+ ),
158
+ # datasets.SplitGenerator(
159
+ # name=datasets.Split.TEST,
160
+ # # These kwargs will be passed to _generate_examples
161
+ # gen_kwargs={"dir_path": test_dir},
162
+ # ),
163
+ ]
164
+
165
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
166
+ def _generate_examples(self, dir_path: str):
167
+ # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
168
+ # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
169
+ with open(os.path.join(dir_path, "metadata.json"), "r") as f:
170
+ json_object = json.load(f)
171
+
172
+ metadata: List[Dict[str, str]] = json_object["metadata"]
173
+
174
+ for key, row in enumerate(metadata):
175
+ yield key, {
176
+ "name": row["name"],
177
+ "rgb": Image.open(os.path.join(dir_path, row["rgb"])).convert("RGB"),
178
+ "gt": Image.open(os.path.join(dir_path, row["gt"])).convert("L"),
179
+ "depth": Image.open(os.path.join(dir_path, row["depth"])).convert("L"),
180
+ }