Yiwei-Ou commited on
Commit
3da7bcd
·
verified ·
1 Parent(s): 277ce0a

Upload dataset_script.py

Browse files
Files changed (1) hide show
  1. dataset_script.py +136 -0
dataset_script.py ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import os
3
+ import pandas as pd
4
+ from pathlib import Path
5
+ import datasets
6
+
7
+ _CITATION = """
8
+ @inproceedings{your_neurips_submission,
9
+ title={Multimodal Street-level Place Recognition Dataset},
10
+ author={Ou, Yiwei},
11
+ year={2025},
12
+ booktitle={NeurIPS Datasets and Benchmarks Track}
13
+ }
14
+ """
15
+
16
+ _DESCRIPTION = """
17
+ Multimodal Street-level Place Recognition Dataset (Resized version).
18
+ This version loads images, videos, and associated annotations for place recognition tasks,
19
+ including GPS, camera metadata, and temporal information.
20
+ """
21
+
22
+ _HOMEPAGE = "https://huggingface.co/datasets/Yiwei-Ou/Multimodal_Street-level_Place_Recognition_Dataset"
23
+ _LICENSE = "cc-by-4.0"
24
+
25
+ class MultimodalPlaceRecognition(datasets.GeneratorBasedBuilder):
26
+ VERSION = datasets.Version("1.0.0")
27
+
28
+ def _info(self):
29
+ return datasets.DatasetInfo(
30
+ description=_DESCRIPTION,
31
+ features=datasets.Features({
32
+ "image_path": datasets.Value("string"),
33
+ "video_path": datasets.Value("string"),
34
+ "location_code": datasets.Value("string"),
35
+ "spatial_type": datasets.Value("string"),
36
+ "index": datasets.Value("int32"),
37
+ "shop_names": datasets.Value("string"),
38
+ "sign_text": datasets.Value("string"),
39
+ "image_metadata": datasets.Value("string"),
40
+ "video_metadata": datasets.Value("string"),
41
+ }),
42
+ supervised_keys=None,
43
+ homepage=_HOMEPAGE,
44
+ license=_LICENSE,
45
+ citation=_CITATION,
46
+ )
47
+
48
+ def _split_generators(self, dl_manager):
49
+ archive_path = dl_manager.download_and_extract(
50
+ "https://huggingface.co/datasets/Yiwei-Ou/Multimodal_Street-level_Place_Recognition_Dataset/resolve/main/Annotated_Resized.tar.gz"
51
+ )
52
+ base_dir = os.path.join(archive_path, "03 Annotated_Resized", "Dataset_Full")
53
+ image_dir = os.path.join(base_dir, "Images")
54
+ video_dir = os.path.join(base_dir, "Videos")
55
+ text_dir = os.path.join(base_dir, "Texts")
56
+
57
+ return [
58
+ datasets.SplitGenerator(
59
+ name=datasets.Split.TRAIN,
60
+ gen_kwargs={
61
+ "image_dir": image_dir,
62
+ "video_dir": video_dir,
63
+ "annotations_path": os.path.join(text_dir, "Annotations.xlsx"),
64
+ "image_meta_path": os.path.join(text_dir, "Media_Metadata-Images.xlsx"),
65
+ "video_meta_path": os.path.join(text_dir, "Media_Metadata-Videos.xlsx"),
66
+ },
67
+ )
68
+ ]
69
+
70
+ def _generate_examples(self, image_dir, video_dir, annotations_path, image_meta_path, video_meta_path):
71
+ id_ = 0
72
+
73
+ annotations_df = pd.read_excel(annotations_path, engine="openpyxl")
74
+ annotations_dict = {
75
+ str(row["Code"]).strip(): {
76
+ "spatial_type": str(row["Type"]).strip(),
77
+ "index": int(row["Index"]),
78
+ "shop_names": str(row["List of Store Names and Signs"]) if not pd.isna(row["List of Store Names and Signs"]) else "",
79
+ "sign_text": "", # Not explicitly available
80
+ }
81
+ for _, row in annotations_df.iterrows()
82
+ }
83
+
84
+ image_meta_df = pd.read_excel(image_meta_path, engine="openpyxl")
85
+ image_meta_dict = {
86
+ str(row["Filename"]).strip(): row.drop("Filename").dropna().to_dict()
87
+ for _, row in image_meta_df.iterrows()
88
+ }
89
+
90
+ video_meta_df = pd.read_excel(video_meta_path, engine="openpyxl")
91
+ video_meta_dict = {
92
+ str(row["Filename"]).strip(): row.drop("Filename").dropna().to_dict()
93
+ for _, row in video_meta_df.iterrows()
94
+ }
95
+
96
+ for spatial_type in os.listdir(image_dir):
97
+ spatial_path = os.path.join(image_dir, spatial_type)
98
+ if not os.path.isdir(spatial_path):
99
+ continue
100
+ for location_code in os.listdir(spatial_path):
101
+ loc_img_path = os.path.join(spatial_path, location_code)
102
+ if not os.path.isdir(loc_img_path):
103
+ continue
104
+
105
+ loc_vid_path = os.path.join(video_dir, spatial_type, location_code) if os.path.exists(os.path.join(video_dir, spatial_type, location_code)) else None
106
+ vid_files = set(os.listdir(loc_vid_path)) if loc_vid_path else set()
107
+
108
+ for file_name in os.listdir(loc_img_path):
109
+ if file_name.lower().endswith((".jpg", ".jpeg", ".png")):
110
+ base_name = os.path.splitext(file_name)[0]
111
+ video_match = [v for v in vid_files if v.startswith(base_name) and v.endswith(".mp4")]
112
+ video_file = video_match[0] if video_match else ""
113
+ video_path = os.path.join(loc_vid_path, video_file) if video_file else ""
114
+
115
+ meta = annotations_dict.get(location_code, {
116
+ "spatial_type": spatial_type,
117
+ "index": -1,
118
+ "shop_names": "",
119
+ "sign_text": "",
120
+ })
121
+
122
+ img_meta = image_meta_dict.get(file_name, {})
123
+ vid_meta = video_meta_dict.get(video_file, {}) if video_file else {}
124
+
125
+ yield id_, {
126
+ "image_path": os.path.join(loc_img_path, file_name),
127
+ "video_path": video_path,
128
+ "location_code": location_code,
129
+ "spatial_type": meta["spatial_type"],
130
+ "index": meta["index"],
131
+ "shop_names": meta["shop_names"],
132
+ "sign_text": meta["sign_text"],
133
+ "image_metadata": str(img_meta),
134
+ "video_metadata": str(vid_meta),
135
+ }
136
+ id_ += 1