Datasets:
File size: 8,521 Bytes
3cfc838 a48c62f 3cfc838 a48c62f 3cfc838 a48c62f 3cfc838 a48c62f 3cfc838 a48c62f 3cfc838 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 |
import os
import json
import shutil
from pathlib import Path
import re
def extract_info_from_filename(filename, attack_type):
"""Extract relevant information from filename."""
# Extract label information (if exists)
label = 'True' if 'labelTrue' in filename else 'False' if 'labelFalse' in filename else None
# Extract prediction if it exists
pred_match = re.search(r'pred(\d+)', filename)
prediction = int(pred_match.group(1)) if pred_match else float('nan')
# Extract base number using different patterns
base_num_match = re.search(r'[_](\d+)(?:_|\.)', filename)
base_num = base_num_match.group(1) if base_num_match else None
# Extract image type based on prefix and attack type
if filename.startswith('adv_'):
img_type = 'adversarial'
elif filename.startswith('orig_'):
img_type = 'original'
elif filename.startswith(('perturbation_', 'transformation_')):
img_type = 'perturbation'
else:
img_type = None
return label, prediction, img_type, base_num
def create_new_filename(filename, attack_name, base_num):
"""Create new filename with attack name and PCam-style numbering."""
# Split filename into parts
name_parts = filename.rsplit('.', 1)
extension = name_parts[1] if len(name_parts) > 1 else 'png'
if filename.startswith(('perturbation_', 'transformation_')):
prefix = 'perturbation_' if filename.startswith('perturbation_') else 'transformation_'
return f"{prefix}{base_num}_{attack_name}.{extension}"
elif filename.startswith('adv_'):
return f"adv_{base_num}_{attack_name}.{extension}"
elif filename.startswith('orig_'):
return f"orig_{base_num}_{attack_name}.{extension}"
return filename
def determine_attack_category(path):
"""Determine if the attack is black box or non-black box based on path."""
path_str = str(path).lower()
if "black_box_attacks" in path_str:
return "black_box_attacks"
elif "non_black_box_attacks" in path_str:
return "non_black_box_attacks"
return None
def organize_dataset(base_path, cleanup_original=False):
"""
Organize dataset into PCam-style structure with only train split.
"""
base_path = Path(base_path)
# Create output directories in PCam style
output_base = base_path / "organized_dataset"
labels = ['0', '1'] # PCam uses 0/1 instead of False/True
# Create directory structure
for label in labels:
(output_base / 'train' / label).mkdir(parents=True, exist_ok=True)
# Create perturbations and originals directories
(output_base / 'perturbations').mkdir(parents=True, exist_ok=True)
(output_base / 'originals').mkdir(parents=True, exist_ok=True)
# Dictionary to store dataset information
dataset_entries = []
file_groups = {}
# Walk through the directory
for root, _, files in os.walk(base_path):
for file in files:
if file.endswith(('.png', '.jpg', '.jpeg')) and file != '.DS_Store':
full_path = Path(root) / file
# Determine attack category
attack_category = determine_attack_category(full_path)
if not attack_category:
continue
# Extract attack type from path
attack_type = full_path.parent.name
if attack_type in ['black_box_attacks', 'non_black_box_attacks', '.DS_Store']:
continue
# Extract file information
label, prediction, img_type, base_num = extract_info_from_filename(file, attack_type)
if base_num:
key = (base_num, attack_type, attack_category)
if key not in file_groups:
file_groups[key] = []
file_groups[key].append((full_path, label, prediction, img_type))
# Process each group of files
for key, files in file_groups.items():
base_num, attack_type, attack_category = key
entry = {
"attack": attack_type,
"type": attack_category,
"perturbation": None,
"adversarial": None,
"original": [],
"label": None,
"prediction": None
}
# First pass to find label from adversarial examples
for file_path, label, prediction, img_type in files:
if img_type == 'adversarial' and label:
entry["label"] = 1 if label == "True" else 0
entry["prediction"] = prediction
break
if entry["label"] is None:
continue
# Second pass to organize files
label_str = str(entry["label"])
dest_folder = output_base / 'train' / label_str
for file_path, _, _, img_type in files:
old_filename = file_path.name
new_filename = create_new_filename(old_filename, attack_type, base_num)
# Determine destination folder and path based on image type
if img_type == 'perturbation':
dest = output_base / 'perturbations'
rel_path = f"perturbations/{new_filename}"
elif img_type == 'original':
dest = output_base / 'originals'
rel_path = f"originals/{new_filename}"
else: # adversarial images go to train folders
dest = dest_folder
rel_path = f"train/{label_str}/{new_filename}"
# Copy file to the appropriate folder
shutil.copy2(file_path, dest / new_filename)
if img_type == 'perturbation':
entry["perturbation"] = rel_path
elif img_type == 'adversarial':
entry["adversarial"] = rel_path
elif img_type == 'original':
entry["original"].append(rel_path)
# Only add entries that have at least one image path
if entry["perturbation"] or entry["adversarial"] or entry["original"]:
dataset_entries.append(entry)
# Create Hugging Face compatible dataset.json
hf_dataset = {
"train": {
"features": {
"image_path": {"dtype": "string", "_type": "Value"},
"label": {"dtype": "int64", "_type": "Value"},
"prediction": {"dtype": "int64", "_type": "Value"},
"attack": {"dtype": "string", "_type": "Value"},
"attack_type": {"dtype": "string", "_type": "Value"},
"perturbation_path": {"dtype": "string", "_type": "Value"},
"original_path": {"dtype": "string", "_type": "Value"}
},
"rows": []
}
}
# Convert entries to Hugging Face format
for entry in dataset_entries:
if entry["adversarial"]: # Only include entries that have adversarial images
hf_entry = {
"image_path": entry["adversarial"],
"label": entry["label"],
"prediction": entry["prediction"] if entry["prediction"] is not None else -1,
"attack": entry["attack"],
"attack_type": entry["type"],
"perturbation_path": entry["perturbation"] if entry["perturbation"] else "",
"original_path": entry["original"][0] if entry["original"] else ""
}
hf_dataset["train"]["rows"].append(hf_entry)
# Save Hugging Face compatible dataset.json
with open(output_base / "dataset.json", 'w') as f:
json.dump(hf_dataset, f, indent=4)
# If cleanup is requested and everything was successful
if cleanup_original:
print("Cleaning up original files...")
for folder in ['black_box_attacks', 'non_black_box_attacks']:
folder_path = base_path / folder
if folder_path.exists():
shutil.rmtree(folder_path)
print(f"Deleted {folder}")
return output_base
if __name__ == "__main__":
# Ask user about cleanup
cleanup = input("Do you want to delete original files after organization? (yes/no): ").lower() == 'yes'
# Script will work relative to its location
script_dir = Path(__file__).parent
output_path = organize_dataset(script_dir, cleanup)
print(f"Dataset organized and saved to: {output_path}")
|