|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""VALERIE22 dataset""" |
|
|
|
import os |
|
import json |
|
import glob |
|
|
|
import datasets |
|
|
|
|
|
_HOMEPAGE = "https://huggingface.co/datasets/Intel/VALERIE22" |
|
|
|
_LICENSE = "Creative Commons — CC0 1.0 Universal" |
|
|
|
_CITATION = """\ |
|
tba |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
The VALERIE22 dataset was generated with the VALERIE procedural tools pipeline providing a photorealistic sensor simulation rendered from automatically synthesized scenes. The dataset provides a uniquely rich set of metadata, allowing extraction of specific scene and semantic features (like pixel-accurate occlusion rates, positions in the scene and distance + angle to the camera). This enables a multitude of possible tests on the data and we hope to stimulate research on understanding performance of DNNs. |
|
""" |
|
|
|
_REPO = "https://huggingface.co/datasets/Intel/VALERIE22/resolve/main" |
|
|
|
_SEQUENCES = { |
|
"train": ["intel_results_sequence_0057.zip", "intel_results_sequence_0058.zip", "intel_results_sequence_0059.zip", "intel_results_sequence_0060.zip", "intel_results_sequence_0062_part1.zip", "intel_results_sequence_0062_part2.zip"], |
|
"validation":["intel_results_sequence_0062_part1.zip", "intel_results_sequence_0062_part2.zip"], |
|
"test":["intel_results_sequence_0062_part1.zip", "intel_results_sequence_0062_part2.zip"] |
|
} |
|
|
|
_URLS = { |
|
"train": [f"{_REPO}/data/{sequence}" for sequence in _SEQUENCES["train"]], |
|
"validation": [f"{_REPO}/data/{sequence}" for sequence in _SEQUENCES["validation"]], |
|
"test": [f"{_REPO}/data/{sequence}" for sequence in _SEQUENCES["test"]] |
|
} |
|
|
|
class VALERIE22(datasets.GeneratorBasedBuilder): |
|
"""VALERIE22 dataset.""" |
|
|
|
VERSION = datasets.Version("1.0.0") |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"image": datasets.Image(), |
|
"image_distorted": datasets.Image(), |
|
"persons_png": datasets.Sequence( |
|
{ |
|
"bbox": datasets.Sequence(datasets.Value("float32"), length=4), |
|
"bbox_vis": datasets.Sequence(datasets.Value("float32"), length=4), |
|
"occlusion": datasets.Value("float32"), |
|
"distance": datasets.Value("float32"), |
|
"v_x": datasets.Value("float32"), |
|
"v_y": datasets.Value("float32"), |
|
"truncated": datasets.Value("bool"), |
|
"total_pixels_object": datasets.Value("float32"), |
|
"total_visible_pixels_object": datasets.Value("float32"), |
|
"contrast_rgb_full": datasets.Value("float32"), |
|
"contrast_edge": datasets.Value("float32"), |
|
"contrast_rgb": datasets.Value("float32"), |
|
"luminance": datasets.Value("float32"), |
|
"perceived_lightness": datasets.Value("float32"), |
|
"3dbbox": datasets.Sequence(datasets.Value("float32"), length=6) |
|
} |
|
), |
|
"persons_png_distorted": datasets.Sequence( |
|
{ |
|
"bbox": datasets.Sequence(datasets.Value("float32"), length=4), |
|
"bbox_vis": datasets.Sequence(datasets.Value("float32"), length=4), |
|
"occlusion": datasets.Value("float32"), |
|
"distance": datasets.Value("float32"), |
|
"v_x": datasets.Value("float32"), |
|
"v_y": datasets.Value("float32"), |
|
"truncated": datasets.Value("bool"), |
|
"total_pixels_object": datasets.Value("float32"), |
|
"total_visible_pixels_object": datasets.Value("float32"), |
|
"contrast_rgb_full": datasets.Value("float32"), |
|
"contrast_edge": datasets.Value("float32"), |
|
"contrast_rgb": datasets.Value("float32"), |
|
"luminance": datasets.Value("float32"), |
|
"perceived_lightness": datasets.Value("float32"), |
|
"3dbbox": datasets.Sequence(datasets.Value("float32"), length=6) |
|
} |
|
), |
|
"semantic_group_segmentation": datasets.Image(), |
|
"semantic_instance_segmentation": datasets.Image() |
|
} |
|
), |
|
supervised_keys=None, |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
data_dir = dl_manager.download_and_extract(_URLS) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"split": "train", |
|
"data_dirs": data_dir["train"], |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={ |
|
"split": "test", |
|
"data_dirs": data_dir["test"], |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
gen_kwargs={ |
|
"split": "validation", |
|
"data_dirs": data_dir["validation"], |
|
}, |
|
), |
|
] |
|
|
|
def _generate_examples(self, split, data_dirs): |
|
sequence_dirs = [] |
|
for data_dir, sequence in zip(data_dirs, _SEQUENCES[split]): |
|
sequence = sequence.replace(".zip","") |
|
if "_part1" in sequence: |
|
sequence = sequence.replace("_part1","") |
|
if "_part2" in sequence: |
|
sequence_0062_part2_dir = os.path.join(data_dir, sequence.replace("_part2","_b")) |
|
continue |
|
sequence_dirs.append(os.path.join(data_dir, sequence)) |
|
|
|
idx = 0 |
|
for sequence_dir in sequence_dirs: |
|
for filename in glob.glob(os.path.join(os.path.join(sequence_dir, "sensor/camera/left/png"), "*.png")): |
|
|
|
image_file_path = filename |
|
|
|
|
|
if "_0062" in sequence_dir: |
|
image_distorted_file_path = os.path.join(sequence_0062_part2_dir, "sensor/camera/left/png_distorted/", os.path.basename(filename)) |
|
else: |
|
image_distorted_file_path = filename.replace("/png/", "/png_distorted/") |
|
|
|
|
|
persons_png_path = filename.replace("sensor/camera/left/png/", "ground-truth/2d-bounding-box_json/") |
|
|
|
|
|
persons_distorted_png_path = filename.replace("sensor/camera/left/png/", "ground-truth/2d-bounding-box_json_png_distorted/") |
|
|
|
|
|
semantic_group_segmentation_file_path = filename.replace("sensor/camera/left/png/", "ground-truth/semantic-group-segmentation_png/") |
|
|
|
|
|
semantic_instance_segmentation_file_path = filename.replace("sensor/camera/left/png/", "ground-truth/semantic-instance-segmentation_png/") |
|
|
|
|
|
if not (os.path.isfile(image_file_path) and os.path.isfile(image_distorted_file_path) and os.path.isfile(persons_png_path.replace(".png",".json")) and os.path.isfile(persons_distorted_png_path.replace(".png",".json")) and os.path.isfile(semantic_group_segmentation_file_path) and os.path.isfile(semantic_instance_segmentation_file_path)): |
|
continue |
|
|
|
with open(persons_png_path.replace(".png",".json"), 'r') as json_file: |
|
bb_person_json = json.load(json_file) |
|
|
|
with open(persons_distorted_png_path.replace(".png",".json"), 'r') as json_file: |
|
bb_person_distorted_json = json.load(json_file) |
|
|
|
threed_bb_person_path = filename.replace("sensor/camera/left/png/", "ground-truth/3d-bounding-box_json/") |
|
with open(os.path.join(threed_bb_person_path.replace(".png",".json")), 'r') as json_file: |
|
threed_bb_person_distorted_json = json.load(json_file) |
|
|
|
persons_png = [] |
|
persons_png_distorted = [] |
|
for key in bb_person_json: |
|
persons_png.append( |
|
{ |
|
"bbox": [bb_person_json[key]["bb"]["c_x"], bb_person_json[key]["bb"]["c_y"], bb_person_json[key]["bb"]["w"], bb_person_json[key]["bb"]["h"]], |
|
"bbox_vis": [bb_person_json[key]["bb_vis"]["c_x"], bb_person_json[key]["bb_vis"]["c_y"], bb_person_json[key]["bb_vis"]["w"], bb_person_json[key]["bb_vis"]["h"]], |
|
"occlusion": bb_person_json[key]["occlusion"], |
|
"distance": bb_person_json[key]["distance"], |
|
"v_x": bb_person_json[key]["v_x"], |
|
"v_y": bb_person_json[key]["v_y"], |
|
"truncated": bb_person_json[key]["truncated"], |
|
"total_pixels_object": bb_person_json[key]["total_pixels_object"], |
|
"total_visible_pixels_object": bb_person_json[key]["total_visible_pixels_object"], |
|
"contrast_rgb_full": bb_person_json[key]["contrast_rgb_full"], |
|
"contrast_edge": bb_person_json[key]["contrast_edge"], |
|
"contrast_rgb": bb_person_json[key]["contrast_rgb"], |
|
"luminance": bb_person_json[key]["luminance"], |
|
"perceived_lightness": bb_person_json[key]["perceived_lightness"], |
|
"3dbbox": [threed_bb_person_distorted_json[key]["center"][0], threed_bb_person_distorted_json[key]["center"][1], threed_bb_person_distorted_json[key]["center"][2], threed_bb_person_distorted_json[key]["size"][0], |
|
threed_bb_person_distorted_json[key]["size"][1], threed_bb_person_distorted_json[key]["size"][2]] |
|
} |
|
) |
|
|
|
persons_png_distorted.append( |
|
{ |
|
"bbox": [bb_person_distorted_json[key]["bb"]["c_x"], bb_person_distorted_json[key]["bb"]["c_y"], bb_person_distorted_json[key]["bb"]["w"], bb_person_distorted_json[key]["bb"]["h"]], |
|
"bbox_vis": [bb_person_distorted_json[key]["bb_vis"]["c_x"], bb_person_distorted_json[key]["bb_vis"]["c_y"], bb_person_distorted_json[key]["bb_vis"]["w"], bb_person_distorted_json[key]["bb_vis"]["h"]], |
|
"occlusion": bb_person_distorted_json[key]["occlusion"], |
|
"distance": bb_person_distorted_json[key]["distance"], |
|
"v_x": bb_person_distorted_json[key]["v_x"], |
|
"v_y": bb_person_distorted_json[key]["v_y"], |
|
"truncated": bb_person_distorted_json[key]["truncated"], |
|
"total_pixels_object": bb_person_distorted_json[key]["total_pixels_object"], |
|
"total_visible_pixels_object": bb_person_distorted_json[key]["total_visible_pixels_object"], |
|
"contrast_rgb_full": bb_person_distorted_json[key]["contrast_rgb_full"], |
|
"contrast_edge": bb_person_distorted_json[key]["contrast_edge"], |
|
"contrast_rgb": bb_person_distorted_json[key]["contrast_rgb"], |
|
"luminance": bb_person_distorted_json[key]["luminance"], |
|
"perceived_lightness": bb_person_distorted_json[key]["perceived_lightness"], |
|
"3dbbox": [threed_bb_person_distorted_json[key]["center"][0], threed_bb_person_distorted_json[key]["center"][1], threed_bb_person_distorted_json[key]["center"][2], threed_bb_person_distorted_json[key]["size"][0], |
|
threed_bb_person_distorted_json[key]["size"][1], threed_bb_person_distorted_json[key]["size"][2]] |
|
} |
|
) |
|
|
|
yield idx, {"image": image_file_path, "image_distorted": image_distorted_file_path, "persons_png": persons_png, "persons_png_distorted":persons_png_distorted, "semantic_group_segmentation": semantic_group_segmentation_file_path, "semantic_instance_segmentation": semantic_instance_segmentation_file_path} |
|
idx += 1 |
|
|