Datasets:

Modalities:
Text
Formats:
text
Libraries:
Datasets
License:
MalumaDev's picture
init
2e11411
import argparse
import random
import os
# Read arguments
parser = argparse.ArgumentParser(description="Generate test and val splits for the Facets-OOD-detection dataset")
parser.add_argument("-t", "--threshold", default="1", help="Can be 1 or 2: images (from the SUN397 dataset) that have OODness lower or equal to the threshold are considered in-distribution")
parser.add_argument("-v", "--val_perc", default="0.5", help="Percentage (as ratio, between 0 and 1) of images to be used as validation set. Remaining images are used as test set")
parser.add_argument("-s", "--seed", default="1234", help="PRNG seed used to generate random numbers")
args = parser.parse_args()
datasets = ['places_val', 'sun', 'in_val', 'in_train']
ID = 0
OOD = 1
threshold = int(args.threshold)
val_perc = float(args.val_perc)
random.seed(int(args.seed))
counters = [0, 0]
# Stratified sampling to obtain test and val splits of the Facets OOD-detection dataset
# Output format: oodness (0/1), dataset_index (0-3), class_index (dataset dependent), image_path (relative to the dataset folder)
# Code is largely improvable (remove duplicated code and reduce complexity)
# Create output files
with open("facets_ood_val_t" + args.threshold + ".txt", "w") as f_val:
with open("facets_ood_test_t" + args.threshold + ".txt", "w") as f_test:
# Places365-Standard validation set
with open("../places365-standard-small/places365_val.txt", "r") as places_val_list:
places_val = {}
for line in places_val_list:
fields = line.split()
if not fields[1] in places_val:
places_val[fields[1]] = []
places_val[fields[1]].append(fields[0])
for class_index, images in places_val.items():
output_line = str(ID) + " places_val " + class_index + " "
class_count = len(images)
val_num = round(class_count * val_perc)
# flags = random.sample([True, False], counts=[val_num, class_count-val_num], k=class_count) # Python 3.9+
flags = random.sample([True for _ in range(val_num)] + [False for _ in range(class_count - val_num)], k=class_count)
for i in range(class_count):
if flags[i]:
f_val.write(output_line + images[i] + "\n")
else:
f_test.write(output_line + images[i] + "\n")
counters[ID] += 1
# sun
for oodness in range(4):
sun = {}
with open("sun_oodness_" + str(oodness) + ".txt", "r") as f:
for line in f:
fields = line.split()
if not fields[0] in sun:
sun[fields[0]] = []
sun[fields[0]].append(fields[1])
for class_index, images in sun.items():
if oodness <= threshold:
split_oodness = ID
else:
split_oodness = OOD
output_line = str(split_oodness) + " sun " + str(class_index) + " "
class_count = len(images)
val_num = round(class_count * val_perc)
flags = random.sample([True for _ in range(val_num)] + [False for _ in range(class_count-val_num)], k=class_count)
for i in range(class_count):
if flags[i]:
f_val.write(output_line + images[i] + "\n")
else:
f_test.write(output_line + images[i] + "\n")
counters[split_oodness] += 1
# ImageNet val
with open("imagenet_val_oodness.txt", "r") as f:
imagenet_val = {}
for line in f:
fields = line.split()
if not fields[0] in imagenet_val:
imagenet_val[fields[0]] = []
imagenet_val[fields[0]].append((fields[1], ID if fields[2]=="0" else OOD))
for class_index, images in imagenet_val.items():
class_count = len(images)
val_num = round(class_count * val_perc)
flags = random.sample([True for _ in range(val_num)] + [False for _ in range(class_count - val_num)], k=class_count)
for i in range(class_count):
if flags[i]:
f_val.write(str(images[i][1]) + " in_val " + str(class_index) + " " + images[i][0] + "\n")
else:
f_test.write(str(images[i][1]) + " in_val " + str(class_index) + " " + images[i][0] + "\n")
counters[images[i][1]] += 1
# ImageNet train
# Improvable code
missing_ood_samples = counters[ID] - counters[OOD]
if missing_ood_samples > 0:
imagenet_synsets = []
imagenet_train_dir = "../imagenet2012/ILSVRC/Data/CLS-LOC/train/"
imagenet = {}
with open("../imagenet2012/LOC_synset_mapping.txt", "r") as f:
for line in f:
imagenet_synsets.append(line.split()[0])
with open("ImageNet_OOD_classes.txt", "r") as f:
for line in f:
class_index = int(line.split(":")[0])
synset = imagenet_synsets[class_index]
imagenet[str(class_index)] = synset
ood_classes = len(imagenet.keys())
samples_per_class = missing_ood_samples // ood_classes
remainder = missing_ood_samples % ood_classes
for class_index, synset in imagenet.items():
class_images = os.listdir(imagenet_train_dir + synset)
sample = random.sample(class_images, k=samples_per_class if remainder == 0 else samples_per_class + 1)
if remainder > 0:
remainder -= 1
# Split between test and val
class_tot = len(sample)
val_count = round(class_tot * val_perc)
flags = random.sample([True for _ in range(val_count)] + [False for _ in range(class_tot - val_count)], k=class_tot)
output_line = str(OOD) + " in_train " + class_index + " " + synset + "/"
for i in range(class_tot):
if flags[i]:
f_val.write(output_line + sample[i] + "\n")
else:
f_test.write(output_line + sample[i] + "\n")
counters[OOD] += 1
print("Tot id samples:", counters[ID])
print("Tot ood samples:", counters[OOD])