Datasets:

Modalities:
Text
Formats:
text
Libraries:
Datasets
License:
File size: 4,059 Bytes
2e11411
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124

import argparse
import random
import os

# Read arguments
parser = argparse.ArgumentParser(description="Generate test and val splits for the inter_dataset ood detection")
parser.add_argument("-v", "--val_perc", default="0.5", help="Percentage (as ratio, between 0 and 1) of images to be used as validation set. Remaining images are used as test set")
parser.add_argument("-s", "--seed", default="42", help="PRNG seed used to generate random numbers")
args = parser.parse_args()

datasets = ['places_val', 'in_train']
ID  = 0
OOD = 1
val_perc = float(args.val_perc)
random.seed(int(args.seed))

counters = [0, 0]

# Stratified sampling to obtain test and val splits
# Output format: oodness (0/1), dataset_index (0-3), class_index (dataset dependent), image_path (relative to the dataset folder)


# Create output files
with open("inter_dataset_val.txt", "w") as f_val:
    with open("inter_dataset_test.txt", "w") as f_test:

        # Places365-Standard validation set

        with open("../places365-standard-small/places365_val.txt", "r") as places_val_list:

            places_val = {}

            for line in places_val_list:

                fields = line.split()

                if not fields[1] in places_val:
                    places_val[fields[1]] = []

                places_val[fields[1]].append(fields[0])


        for class_index, images in places_val.items():

            output_line = str(ID) + " places_val " + class_index + " "

            class_count = len(images)
            val_num     = round(class_count * val_perc)

            # flags = random.sample([True, False], counts=[val_num, class_count-val_num], k=class_count)  # Python 3.9+ 

            flags = random.sample([True for _ in range(val_num)] + [False for _ in range(class_count - val_num)], k=class_count)

            for i in range(class_count):

                if flags[i]:
                    f_val.write(output_line + images[i] + "\n")
                else:
                    f_test.write(output_line + images[i] + "\n")

                counters[ID] += 1



        # ImageNet train
        # Improvable code

        missing_ood_samples = counters[ID] - counters[OOD]

        if missing_ood_samples > 0:

            imagenet_synsets   = []
            imagenet_train_dir = "../imagenet2012/ILSVRC/Data/CLS-LOC/train/" 
            imagenet = {}

            with open("../imagenet2012/LOC_synset_mapping.txt", "r") as f:

                for line in f:
                    imagenet_synsets.append(line.split()[0])

            with open("ImageNet_only_classes.txt", "r") as f:

                for line in f:

                    class_index = int(line.split(":")[0])
                    synset = imagenet_synsets[class_index]
                    imagenet[str(class_index)] = synset

            ood_classes = len(imagenet.keys())
            samples_per_class = missing_ood_samples // ood_classes
            remainder         = missing_ood_samples % ood_classes

            for class_index, synset in imagenet.items():

                class_images = os.listdir(imagenet_train_dir + synset)
                sample = random.sample(class_images, k=samples_per_class if remainder == 0 else samples_per_class + 1)

                if remainder > 0:
                    remainder -= 1
                
                # Split between test and val
                class_tot = len(sample)
                val_count = round(class_tot * val_perc)

                flags = random.sample([True for _ in range(val_count)] + [False for _ in range(class_tot - val_count)], k=class_tot)

                output_line = str(OOD) + " in_train " + class_index + " " + synset + "/"

                for i in range(class_tot):
                    
                    if flags[i]:
                        f_val.write(output_line + sample[i] + "\n")
                    else:
                        f_test.write(output_line + sample[i] + "\n")

                    counters[OOD] += 1



print("Tot id samples:", counters[ID])
print("Tot ood samples:", counters[OOD])