ckadirt commited on
Commit
0a8b79b
·
verified ·
1 Parent(s): 112faa1

Add files using upload-large-folder tool

Browse files
Files changed (50) hide show
  1. .gitattributes +13 -59
  2. .gitignore +66 -0
  3. MindEye2.py +102 -0
  4. accel-multi.slurm +41 -0
  5. accel.slurm +41 -0
  6. all_stimuli/MST_pairs1/pair_10_14_33.png +3 -0
  7. all_stimuli/MST_pairs1/pair_40_w_escalator1.jpg +3 -0
  8. all_stimuli/MST_pairs1/pair_44_w_log_cabin1.jpg +3 -0
  9. all_stimuli/MST_pairs1/pair_48_w_runway1.jpg +3 -0
  10. all_stimuli/MST_pairs1/pair_48_w_runway2.jpg +3 -0
  11. check_union_mask.ipynb +0 -0
  12. compare_protocols.ipynb +125 -0
  13. enhanced_recon_inference.ipynb +460 -0
  14. environment.yml +322 -0
  15. evals/testing_MST_ViT-H_1024/final_evals +12 -0
  16. evals/testing_MST_ViT-H_1024/testing_MST_ViT-H_1024_MST_ID.npy +3 -0
  17. evals/testing_MST_ViT-H_1024/testing_MST_ViT-H_1024_MST_pairmate_indices.npy +3 -0
  18. evals/testing_MST_ViT-H_1024_MST_ID.npy +3 -0
  19. evals/testing_MST_ViT-H_1024_scalf/final_evals +12 -0
  20. evals/testing_MST_ViT-H_1024_scalf/testing_MST_ViT-H_1024_scalf_MST_pairmate_indices.npy +3 -0
  21. evals/testing_MST_ViT-H_1024_scalf_best/final_evals +12 -0
  22. evals/testing_MST_ViT-H_1024_scalf_best/testing_MST_ViT-H_1024_scalf_MST_ID.npy +3 -0
  23. evals/testing_MST_ViT-H_1024_scalf_best/testing_MST_ViT-H_1024_scalf_MST_pairmate_indices.npy +3 -0
  24. final_evaluations.ipynb +1778 -0
  25. final_evaluations_sdxl_turbo.ipynb +1837 -0
  26. main-finetune-rt-preproc.ipynb +0 -0
  27. main-finetune-rt-preproc.py +2260 -0
  28. main-finetune.ipynb +0 -0
  29. main-finetune.py +2280 -0
  30. main-multisession-3tasks.ipynb +0 -0
  31. main-multisession-3tasks.py +2269 -0
  32. main-multisession-sub-005_ses-03_union_mask.py +1956 -0
  33. main-multisession-sub-005_ses-03_union_mask_sdxl_turbo.ipynb +0 -0
  34. main-multisession.ipynb +0 -0
  35. main-test.py +1962 -0
  36. main.ipynb +1950 -0
  37. modeling_git.py +2050 -0
  38. models.py +756 -0
  39. recon_inference-multisession-simple.ipynb +0 -0
  40. recon_inference-multisession.ipynb +1689 -0
  41. recon_inference-multisession_union_mask.ipynb +0 -0
  42. recon_inference-multisession_union_mask_sdxl_turbo.ipynb +0 -0
  43. recon_inference.ipynb +0 -0
  44. rt_glmsingle.ipynb +0 -0
  45. run_all_batch.slurm +109 -0
  46. run_all_nb.slurm +64 -0
  47. run_all_workstation.sh +42 -0
  48. run_main_finetune.slurm +90 -0
  49. unit_test.py +234 -0
  50. utils.py +1151 -0
.gitattributes CHANGED
@@ -1,59 +1,13 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.lz4 filter=lfs diff=lfs merge=lfs -text
12
- *.mds filter=lfs diff=lfs merge=lfs -text
13
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
14
- *.model filter=lfs diff=lfs merge=lfs -text
15
- *.msgpack filter=lfs diff=lfs merge=lfs -text
16
- *.npy filter=lfs diff=lfs merge=lfs -text
17
- *.npz filter=lfs diff=lfs merge=lfs -text
18
- *.onnx filter=lfs diff=lfs merge=lfs -text
19
- *.ot filter=lfs diff=lfs merge=lfs -text
20
- *.parquet filter=lfs diff=lfs merge=lfs -text
21
- *.pb filter=lfs diff=lfs merge=lfs -text
22
- *.pickle filter=lfs diff=lfs merge=lfs -text
23
- *.pkl filter=lfs diff=lfs merge=lfs -text
24
- *.pt filter=lfs diff=lfs merge=lfs -text
25
- *.pth filter=lfs diff=lfs merge=lfs -text
26
- *.rar filter=lfs diff=lfs merge=lfs -text
27
- *.safetensors filter=lfs diff=lfs merge=lfs -text
28
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
29
- *.tar.* filter=lfs diff=lfs merge=lfs -text
30
- *.tar filter=lfs diff=lfs merge=lfs -text
31
- *.tflite filter=lfs diff=lfs merge=lfs -text
32
- *.tgz filter=lfs diff=lfs merge=lfs -text
33
- *.wasm filter=lfs diff=lfs merge=lfs -text
34
- *.xz filter=lfs diff=lfs merge=lfs -text
35
- *.zip filter=lfs diff=lfs merge=lfs -text
36
- *.zst filter=lfs diff=lfs merge=lfs -text
37
- *tfevents* filter=lfs diff=lfs merge=lfs -text
38
- # Audio files - uncompressed
39
- *.pcm filter=lfs diff=lfs merge=lfs -text
40
- *.sam filter=lfs diff=lfs merge=lfs -text
41
- *.raw filter=lfs diff=lfs merge=lfs -text
42
- # Audio files - compressed
43
- *.aac filter=lfs diff=lfs merge=lfs -text
44
- *.flac filter=lfs diff=lfs merge=lfs -text
45
- *.mp3 filter=lfs diff=lfs merge=lfs -text
46
- *.ogg filter=lfs diff=lfs merge=lfs -text
47
- *.wav filter=lfs diff=lfs merge=lfs -text
48
- # Image files - uncompressed
49
- *.bmp filter=lfs diff=lfs merge=lfs -text
50
- *.gif filter=lfs diff=lfs merge=lfs -text
51
- *.png filter=lfs diff=lfs merge=lfs -text
52
- *.tiff filter=lfs diff=lfs merge=lfs -text
53
- # Image files - compressed
54
- *.jpg filter=lfs diff=lfs merge=lfs -text
55
- *.jpeg filter=lfs diff=lfs merge=lfs -text
56
- *.webp filter=lfs diff=lfs merge=lfs -text
57
- # Video files - compressed
58
- *.mp4 filter=lfs diff=lfs merge=lfs -text
59
- *.webm filter=lfs diff=lfs merge=lfs -text
 
1
+ *.png,[[:space:]]*.jpg filter=lfs diff=lfs merge=lfs -text
2
+ main*.ipynb filter=lfs diff=lfs merge=lfs -text
3
+ all_stimuli/MST_pairs1/pair_10_14_33.png filter=lfs diff=lfs merge=lfs -text
4
+ evals/testing_MST_ViT-H_1024_scalf_best/testing_MST_ViT-H_1024_scalf_MST_pairmate_indices.npy filter=lfs diff=lfs merge=lfs -text
5
+ evals/testing_MST_ViT-H_1024_MST_ID.npy filter=lfs diff=lfs merge=lfs -text
6
+ evals/testing_MST_ViT-H_1024_scalf/testing_MST_ViT-H_1024_scalf_MST_pairmate_indices.npy filter=lfs diff=lfs merge=lfs -text
7
+ evals/testing_MST_ViT-H_1024/testing_MST_ViT-H_1024_MST_ID.npy filter=lfs diff=lfs merge=lfs -text
8
+ evals/testing_MST_ViT-H_1024_scalf_best/testing_MST_ViT-H_1024_scalf_MST_ID.npy filter=lfs diff=lfs merge=lfs -text
9
+ all_stimuli/MST_pairs1/pair_44_w_log_cabin1.jpg filter=lfs diff=lfs merge=lfs -text
10
+ all_stimuli/MST_pairs1/pair_48_w_runway1.jpg filter=lfs diff=lfs merge=lfs -text
11
+ all_stimuli/MST_pairs1/pair_40_w_escalator1.jpg filter=lfs diff=lfs merge=lfs -text
12
+ all_stimuli/MST_pairs1/pair_48_w_runway2.jpg filter=lfs diff=lfs merge=lfs -text
13
+ evals/testing_MST_ViT-H_1024/testing_MST_ViT-H_1024_MST_pairmate_indices.npy filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
.gitignore ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ src/clip_img_embedder
2
+ src/processor
3
+ src/clip_text_model
4
+ src/diffusion_engine
5
+ raw/
6
+ src/eval_downloads/
7
+ src/evals/
8
+ rt_python/masks
9
+ rt_python/generative_models
10
+ rt_python/fine_tuned_model
11
+ rt_python/data
12
+ *DS_Store
13
+ __pycache__/
14
+ *.py[cod]
15
+ *$py.class
16
+ Untitled*.ipynb
17
+ .ipynb_checkpoints/
18
+ Train (2).ipynb
19
+ Train (2).py
20
+ Train.ipynb
21
+ accel-more-train.slurm
22
+ accel-multi-3subj.slurm
23
+ accel-multi-batch.slurm
24
+ accel-ses-03.slurm
25
+ debug/
26
+ enhanced_recon_inference-ses-03.ipynb
27
+ enhanced_recon_inference.py
28
+ enhanced_recon_inference_orig.ipynb
29
+ enhanced_recon_inference_orig.py
30
+ evals/
31
+ figures/
32
+ final_batch_evals.ipynb
33
+ final_evaluations-ses-03.ipynb
34
+ final_evaluations.py
35
+ final_evaluations_orig.ipynb
36
+ final_evaluations_orig.py
37
+ main-more-train.ipynb
38
+ main-more-train.py
39
+ main-multisession-3subj.ipynb
40
+ main-multisession.py
41
+ main-multisession_old.ipynb
42
+ main-multisession_old.py
43
+ main-ses-03.ipynb
44
+ main-ses-03.py
45
+ main.py
46
+ masks/
47
+ orig_main.ipynb
48
+ orig_main.py
49
+ recon_inference-multisession-simple-copy.ipynb
50
+ recon_inference-multisession-simple-copy.py
51
+ recon_inference-multisession-simple.ipynb
52
+ recon_inference-multisession-simple.py
53
+ recon_inference-multisession.py
54
+ recon_inference-ses-03.ipynb
55
+ recon_inference.py
56
+ recon_inference_orig.ipynb
57
+ recon_inference_orig.py
58
+ run_final_evals.slurm
59
+ run_recon_final.slurm
60
+ run_recon_final_batch.slurm
61
+ run_recon_multi.slurm
62
+ slurms/
63
+ wandb/
64
+ wandb_test.ipynb
65
+ wandb_test.py
66
+ .run_all_batch.slurm.swp
MindEye2.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+
4
+ class MindEyeModule(nn.Module):
5
+ def __init__(self):
6
+ super(MindEyeModule, self).__init__()
7
+ def forward(self, x):
8
+ return x
9
+
10
+
11
+ class RidgeRegression(torch.nn.Module):
12
+ # make sure to add weight_decay when initializing optimizer
13
+ def __init__(self, input_sizes, out_features, seq_len=1):
14
+ super(RidgeRegression, self).__init__()
15
+ self.seq_len = seq_len
16
+ self.out_features = out_features
17
+ self.linears = torch.nn.ModuleList([
18
+ torch.nn.Linear(input_size, out_features) for input_size in input_sizes
19
+ ])
20
+ def forward(self, x, subj_idx=0):
21
+ out = torch.cat([self.linears[subj_idx](x[:,seq]).unsqueeze(1) for seq in range(self.seq_len)], dim=1)
22
+ return out
23
+
24
+
25
+ from functools import partial
26
+
27
+ class BrainNetwork(nn.Module):
28
+ def __init__(self, h=4096, in_dim=15724, out_dim=768, seq_len=1, n_blocks=4, drop=.15,
29
+ clip_size=768, clip_scale=1):
30
+ super().__init__()
31
+ self.seq_len = seq_len
32
+ self.h = h
33
+ self.clip_size = clip_size
34
+ self.clip_scale = clip_scale
35
+
36
+ self.mixer_blocks1 = nn.ModuleList([
37
+ self.mixer_block1(h, drop) for _ in range(n_blocks)
38
+ ])
39
+ self.mixer_blocks2 = nn.ModuleList([
40
+ self.mixer_block2(seq_len, drop) for _ in range(n_blocks)
41
+ ])
42
+
43
+ # Output linear layer
44
+ self.backbone_linear = nn.Linear(h * seq_len, out_dim, bias=True)
45
+ if self.clip_scale>0:
46
+ self.clip_proj = self.projector(clip_size, clip_size, h=clip_size)
47
+
48
+ def projector(self, in_dim, out_dim, h=2048):
49
+ return nn.Sequential(
50
+ nn.LayerNorm(in_dim),
51
+ nn.GELU(),
52
+ nn.Linear(in_dim, h),
53
+ nn.LayerNorm(h),
54
+ nn.GELU(),
55
+ nn.Linear(h, h),
56
+ nn.LayerNorm(h),
57
+ nn.GELU(),
58
+ nn.Linear(h, out_dim)
59
+ )
60
+
61
+ def mlp(self, in_dim, out_dim, drop):
62
+ return nn.Sequential(
63
+ nn.Linear(in_dim, out_dim),
64
+ nn.GELU(),
65
+ nn.Dropout(drop),
66
+ nn.Linear(out_dim, out_dim),
67
+ )
68
+
69
+ def mixer_block1(self, h, drop):
70
+ return nn.Sequential(
71
+ nn.LayerNorm(h),
72
+ self.mlp(h, h, drop), # Token mixing
73
+ )
74
+
75
+ def mixer_block2(self, seq_len, drop):
76
+ return nn.Sequential(
77
+ nn.LayerNorm(seq_len),
78
+ self.mlp(seq_len, seq_len, drop) # Channel mixing
79
+ )
80
+
81
+ def forward(self, x):
82
+ # make empty tensors
83
+ c,b = torch.Tensor([0.]), torch.Tensor([[0.],[0.]])
84
+
85
+ # Mixer blocks
86
+ residual1 = x
87
+ residual2 = x.permute(0,2,1)
88
+ for block1, block2 in zip(self.mixer_blocks1,self.mixer_blocks2):
89
+ x = block1(x) + residual1
90
+ residual1 = x
91
+ x = x.permute(0,2,1)
92
+
93
+ x = block2(x) + residual2
94
+ residual2 = x
95
+ x = x.permute(0,2,1)
96
+
97
+ x = x.reshape(x.size(0), -1)
98
+ backbone = self.backbone_linear(x).reshape(len(x), -1, self.clip_size)
99
+ if self.clip_scale>0:
100
+ c = self.clip_proj(backbone)
101
+
102
+ return backbone, c, b
accel-multi.slurm ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=ses-01
3
+ #SBATCH --ntasks-per-node=1
4
+ #SBATCH --nodes=1
5
+ #SBATCH --gres=gpu:1
6
+ #SBATCH --constraint=gpu80
7
+ #SBATCH --gpus-per-task=1 # Set to equal gres=gpu:#!
8
+ #SBATCH --cpus-per-task=40 # 40 / 80 / 176 distributed across node
9
+ #SBATCH --time=01:20:00 # total run time limit (HH:MM:SS)
10
+ #SBATCH -e slurms/%j.err # first create a "slurms" folder in current directory to store logs
11
+ #SBATCH -o slurms/%j.out
12
+ #SBATCH --no-requeue
13
+ #SBATCH --array=0-19
14
+ #SBATCH --mail-type=END
15
+ #SBATCH [email protected]
16
+
17
+ module purge
18
+ module load anaconda3/2023.3
19
+ # conda activate rt_mindEye2
20
+ source /scratch/gpfs/ri4541/MindEyeV2/src/fmri/bin/activate
21
+
22
+ # The following line converts your jupyter notebook into a python script runnable with Slurm
23
+ jupyter nbconvert main-multisession.ipynb --to python
24
+
25
+ export NUM_GPUS=1 # Set to equal gres=gpu:#!
26
+ export BATCH_SIZE=24
27
+ export GLOBAL_BATCH_SIZE=$((BATCH_SIZE * NUM_GPUS))
28
+
29
+ # Make sure another job doesnt use same port, here using random number
30
+ export MASTER_PORT=$((RANDOM % (19000 - 11000 + 1) + 11000))
31
+ export HOSTNAMES=$(scontrol show hostnames "$SLURM_JOB_NODELIST")
32
+ export MASTER_ADDR=$(scontrol show hostnames "$SLURM_JOB_NODELIST" | head -n 1)
33
+ export COUNT_NODE=$(scontrol show hostnames "$SLURM_JOB_NODELIST" | wc -l)
34
+ echo MASTER_ADDR=${MASTER_ADDR}
35
+ echo MASTER_PORT=${MASTER_PORT}
36
+ echo WORLD_SIZE=${COUNT_NODE}
37
+
38
+ # singlesubject finetuning
39
+ model_name="sub-001_ses-01_bs24_MST_rishab_MSTsplit_orig"
40
+ echo model_name=${model_name}
41
+ accelerate launch --num_processes=$(($NUM_GPUS * $COUNT_NODE)) --num_machines=$COUNT_NODE --main_process_ip=$MASTER_ADDR --main_process_port=$MASTER_PORT orig_main.py --data_path=/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2 --model_name=${model_name} --no-multi_subject --subj=1 --batch_size=${BATCH_SIZE} --max_lr=3e-5 --mixup_pct=.33 --num_epochs=150 --use_prior --prior_scale=30 --clip_scale=1 --no-blurry_recon --blur_scale=.5 --no-use_image_aug --n_blocks=4 --hidden_dim=1024 --num_sessions=40 --ckpt_interval=999 --ckpt_saving --multisubject_ckpt=/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/train_logs/multisubject_subj01_1024hid_nolow_300ep
accel.slurm ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=ses-01_orig
3
+ #SBATCH --ntasks-per-node=1
4
+ #SBATCH --nodes=1
5
+ #SBATCH --gres=gpu:1
6
+ #SBATCH --constraint=gpu80
7
+ #SBATCH --gpus-per-task=1 # Set to equal gres=gpu:#!
8
+ #SBATCH --cpus-per-task=40 # 40 / 80 / 176 distributed across node
9
+ #SBATCH --time=01:20:00 # total run time limit (HH:MM:SS)
10
+ #SBATCH -e slurms/%j.err # first create a "slurms" folder in current directory to store logs
11
+ #SBATCH -o slurms/%j.out
12
+ #SBATCH --no-requeue
13
+ #SBATCH --array=0
14
+ #SBATCH --mail-type=END
15
+ #SBATCH [email protected]
16
+
17
+ module load anaconda3/2023.3
18
+ conda activate rt_mindEye2
19
+
20
+ # The following line converts your jupyter notebook into a python script runnable with Slurm
21
+ jupyter nbconvert orig_main.ipynb --to python
22
+
23
+ export NUM_GPUS=1 # Set to equal gres=gpu:#!
24
+ export BATCH_SIZE=24
25
+ export GLOBAL_BATCH_SIZE=$((BATCH_SIZE * NUM_GPUS))
26
+
27
+ # Make sure another job doesnt use same port, here using random number
28
+ export MASTER_PORT=$((RANDOM % (19000 - 11000 + 1) + 11000))
29
+ export HOSTNAMES=$(scontrol show hostnames "$SLURM_JOB_NODELIST")
30
+ export MASTER_ADDR=$(scontrol show hostnames "$SLURM_JOB_NODELIST" | head -n 1)
31
+ export COUNT_NODE=$(scontrol show hostnames "$SLURM_JOB_NODELIST" | wc -l)
32
+ echo MASTER_ADDR=${MASTER_ADDR}
33
+ echo MASTER_PORT=${MASTER_PORT}
34
+ echo WORLD_SIZE=${COUNT_NODE}
35
+
36
+ # singlesubject finetuning
37
+ model_name="sub-001_ses-01_bs24_MST_original"
38
+ echo model_name=${model_name}
39
+ accelerate launch --num_processes=$(($NUM_GPUS * $COUNT_NODE)) --num_machines=$COUNT_NODE --main_process_ip=$MASTER_ADDR --main_process_port=$MASTER_PORT orig_main.py --data_path=/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2 --model_name=${model_name} --no-multi_subject --subj=1 --batch_size=${BATCH_SIZE} --max_lr=3e-4 --mixup_pct=.33 --num_epochs=150 --use_prior --prior_scale=30 --clip_scale=1 --no-blurry_recon --blur_scale=.5 --no-use_image_aug --n_blocks=4 --hidden_dim=1024 --num_sessions=40 --ckpt_interval=999 --ckpt_saving --multisubject_ckpt=/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/train_logs/multisubject_subj01_1024hid_nolow_300ep
40
+
41
+ #accelerate launch --num_processes=$(($NUM_GPUS * $COUNT_NODE)) --num_machines=$COUNT_NODE --main_process_ip=$MASTER_ADDR --main_process_port=$MASTER_PORT Train.py --data_path=/weka/proj-fmri/shared/mindeyev2_dataset --cache_dir=/weka/proj-fmri/shared/cache --model_name=${model_name} --no-multi_subject --subj=1 --batch_size=${BATCH_SIZE} --max_lr=3e-4 --mixup_pct=.33 --num_epochs=150 --use_prior --prior_scale=30 --clip_scale=1 --no-blurry_recon --blur_scale=.5 --no-use_image_aug --n_blocks=4 --hidden_dim=1024 --num_sessions=40 --ckpt_interval=999 --ckpt_saving --wandb_log --multisubject_ckpt=../train_logs/multisubject_excludingsubj01_40sess
all_stimuli/MST_pairs1/pair_10_14_33.png ADDED

Git LFS Details

  • SHA256: 5ec8c0e610ff64dd44275135e82db8d1c83df28e939744373549e584238865b0
  • Pointer size: 130 Bytes
  • Size of remote file: 83.9 kB
all_stimuli/MST_pairs1/pair_40_w_escalator1.jpg ADDED

Git LFS Details

  • SHA256: d8583aa560afbbcb26cec7fba3efa1ff44a3253e8504595d1e5fe13bc51ae7c7
  • Pointer size: 130 Bytes
  • Size of remote file: 37 kB
all_stimuli/MST_pairs1/pair_44_w_log_cabin1.jpg ADDED

Git LFS Details

  • SHA256: 313e73b9baf517d3396f32a79dfbe59e933fd304ad187e0c3282f8b5e1904543
  • Pointer size: 130 Bytes
  • Size of remote file: 38.5 kB
all_stimuli/MST_pairs1/pair_48_w_runway1.jpg ADDED

Git LFS Details

  • SHA256: f427e8ae0e6c6d8a168e9c1d6b0a505cdebf55607e5922cad4d678d4f4f43593
  • Pointer size: 130 Bytes
  • Size of remote file: 26.1 kB
all_stimuli/MST_pairs1/pair_48_w_runway2.jpg ADDED

Git LFS Details

  • SHA256: 7667b25851179391af46d17eb2d74a5b3a11a1c3802d4c44175d835fb22cfa80
  • Pointer size: 130 Bytes
  • Size of remote file: 23 kB
check_union_mask.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
compare_protocols.ipynb ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 1,
6
+ "id": "a42c36bb-6436-4225-aef0-e0396d3fb323",
7
+ "metadata": {},
8
+ "outputs": [
9
+ {
10
+ "name": "stdout",
11
+ "output_type": "stream",
12
+ "text": [
13
+ "importing modules\n"
14
+ ]
15
+ }
16
+ ],
17
+ "source": [
18
+ "print(\"importing modules\")\n",
19
+ "import os\n",
20
+ "import sys\n",
21
+ "import json\n",
22
+ "import argparse\n",
23
+ "import numpy as np\n",
24
+ "import time\n",
25
+ "import random\n",
26
+ "import string\n",
27
+ "import h5py\n",
28
+ "from tqdm import tqdm\n",
29
+ "import webdataset as wds\n",
30
+ "from PIL import Image\n",
31
+ "import pandas as pd\n",
32
+ "import nibabel as nib\n",
33
+ "import nilearn\n",
34
+ "\n",
35
+ "import matplotlib.pyplot as plt\n",
36
+ "import torch\n",
37
+ "import torch.nn as nn\n",
38
+ "from torchvision import transforms\n",
39
+ "\n",
40
+ "# tf32 data type is faster than standard float32\n",
41
+ "torch.backends.cuda.matmul.allow_tf32 = True\n",
42
+ "\n",
43
+ "import utils\n",
44
+ "from utils import load_preprocess_betas, resample, applyxfm, apply_thresh, resample_betas\n",
45
+ "\n",
46
+ "# imports utils from mindeye_preproc as \"preproc\"\n",
47
+ "import importlib.util\n",
48
+ "parent_utils_path = \"/home/ri4541/mindeye_preproc/analysis/utils.py\"\n",
49
+ "spec = importlib.util.spec_from_file_location(\"utils\", parent_utils_path)\n",
50
+ "preproc = importlib.util.module_from_spec(spec)\n",
51
+ "parent_dir = os.path.dirname(parent_utils_path)\n",
52
+ "if parent_dir not in sys.path:\n",
53
+ " sys.path.append(parent_dir)\n",
54
+ "spec.loader.exec_module(preproc)\n",
55
+ "\n",
56
+ "if utils.is_interactive():\n",
57
+ " from IPython.display import clear_output # function to clear print outputs in cell\n",
58
+ " %load_ext autoreload \n",
59
+ " # this allows you to change functions in models.py or utils.py and have this notebook automatically update with your revisions\n",
60
+ " %autoreload 2 "
61
+ ]
62
+ },
63
+ {
64
+ "cell_type": "code",
65
+ "execution_count": null,
66
+ "id": "6a697e85-f2fe-473a-9ce0-fb4f2ffc3622",
67
+ "metadata": {},
68
+ "outputs": [],
69
+ "source": [
70
+ "sub = 'sub-001'\n",
71
+ "session = 'ses-05'\n",
72
+ "tasks = ['A', 'B', 'C']\n",
73
+ "split = 'MST' # train/test split should be MST for multi-protocol experiments\n",
74
+ "n_seeds = 10"
75
+ ]
76
+ },
77
+ {
78
+ "cell_type": "code",
79
+ "execution_count": null,
80
+ "id": "f00cab3e-0d92-4205-8273-f8961f08f4fe",
81
+ "metadata": {},
82
+ "outputs": [],
83
+ "source": [
84
+ "import ast # safe evaluation of limited python data types\n",
85
+ "\n",
86
+ "all_evals = []\n",
87
+ "for i in range(10):\n",
88
+ " evals = []\n",
89
+ " model_name=f\"{sub}_{session}_task-{task}_bs24_MST_rishab_{split}split_{i}\"\n",
90
+ " eval_dir = f\"/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/evals/{model_name}\"\n",
91
+ " with open(f\"{eval_dir}/final_evals\", 'r') as f:\n",
92
+ " # skip the first two lines - no useful info\n",
93
+ " f.readline(); f.readline()\n",
94
+ " for line in f: # pixcorr, ssim, alexnet2, alexnet5, inception, clip_, effnet, swav, fwd_acc, bwd_acc, mst_score\n",
95
+ " # print(line.strip())\n",
96
+ " evals.append(ast.literal_eval(line.strip())) # strip() gets rid of leading and trailing spaces/newlines; literal_eval() returns the values as the \"intended\" type instead of strings\n",
97
+ " # evals.append(line.strip())\n",
98
+ " all_evals.append(evals)\n",
99
+ "\n",
100
+ "all_evals[0]"
101
+ ]
102
+ }
103
+ ],
104
+ "metadata": {
105
+ "kernelspec": {
106
+ "display_name": "rt_mindEye2 [~/.conda/envs/rt_mindEye2/]",
107
+ "language": "python",
108
+ "name": "conda_rt_mindeye2"
109
+ },
110
+ "language_info": {
111
+ "codemirror_mode": {
112
+ "name": "ipython",
113
+ "version": 3
114
+ },
115
+ "file_extension": ".py",
116
+ "mimetype": "text/x-python",
117
+ "name": "python",
118
+ "nbconvert_exporter": "python",
119
+ "pygments_lexer": "ipython3",
120
+ "version": "3.11.7"
121
+ }
122
+ },
123
+ "nbformat": 4,
124
+ "nbformat_minor": 5
125
+ }
enhanced_recon_inference.ipynb ADDED
@@ -0,0 +1,460 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 1,
6
+ "id": "6b18f6a3-cc4f-437e-9756-c99fc6a5fad4",
7
+ "metadata": {},
8
+ "outputs": [
9
+ {
10
+ "name": "stdout",
11
+ "output_type": "stream",
12
+ "text": [
13
+ "importing modules\n"
14
+ ]
15
+ },
16
+ {
17
+ "name": "stderr",
18
+ "output_type": "stream",
19
+ "text": [
20
+ "Detected kernel version 4.18.0, which is below the recommended minimum of 5.5.0; this can cause the process to hang. It is recommended to upgrade the kernel to the minimum version or higher.\n"
21
+ ]
22
+ },
23
+ {
24
+ "name": "stdout",
25
+ "output_type": "stream",
26
+ "text": [
27
+ "LOCAL RANK 0\n",
28
+ "device: cuda\n"
29
+ ]
30
+ }
31
+ ],
32
+ "source": [
33
+ "print('importing modules')\n",
34
+ "import os\n",
35
+ "import sys\n",
36
+ "import json\n",
37
+ "import argparse\n",
38
+ "import numpy as np\n",
39
+ "import math\n",
40
+ "from einops import rearrange\n",
41
+ "import time\n",
42
+ "import random\n",
43
+ "import string\n",
44
+ "import h5py\n",
45
+ "from tqdm import tqdm\n",
46
+ "\n",
47
+ "import matplotlib.pyplot as plt\n",
48
+ "import torch\n",
49
+ "import torch.nn as nn\n",
50
+ "from torchvision import transforms\n",
51
+ "from accelerate import Accelerator, DeepSpeedPlugin\n",
52
+ "\n",
53
+ "# SDXL unCLIP requires code from https://github.com/Stability-AI/generative-models/tree/main\n",
54
+ "sys.path.append('generative_models/')\n",
55
+ "import sgm\n",
56
+ "from generative_models.sgm.modules.encoders.modules import FrozenOpenCLIPImageEmbedder, FrozenCLIPEmbedder, FrozenOpenCLIPEmbedder2\n",
57
+ "from generative_models.sgm.models.diffusion import DiffusionEngine\n",
58
+ "from generative_models.sgm.util import append_dims\n",
59
+ "from omegaconf import OmegaConf\n",
60
+ "\n",
61
+ "# tf32 data type is faster than standard float32\n",
62
+ "torch.backends.cuda.matmul.allow_tf32 = True\n",
63
+ "\n",
64
+ "# custom functions #\n",
65
+ "import utils\n",
66
+ "from models import *\n",
67
+ "\n",
68
+ "### Multi-GPU config ###\n",
69
+ "local_rank = os.getenv('RANK')\n",
70
+ "if local_rank is None: \n",
71
+ " local_rank = 0\n",
72
+ "else:\n",
73
+ " local_rank = int(local_rank)\n",
74
+ "print(\"LOCAL RANK \", local_rank) \n",
75
+ "\n",
76
+ "accelerator = Accelerator(split_batches=False, mixed_precision=\"fp16\")\n",
77
+ "device = accelerator.device\n",
78
+ "print(\"device:\",device)"
79
+ ]
80
+ },
81
+ {
82
+ "cell_type": "code",
83
+ "execution_count": 6,
84
+ "id": "20cdb696-1d6e-4b73-951b-b0cd1dda219a",
85
+ "metadata": {},
86
+ "outputs": [
87
+ {
88
+ "name": "stdout",
89
+ "output_type": "stream",
90
+ "text": [
91
+ "model_name: sub-001_multi_bs24_MST_rishab_MSTsplit_remove_150_random_seed_0\n",
92
+ "--model_name=sub-001_multi_bs24_MST_rishab_MSTsplit_remove_150_random_seed_0 --all_recons_path=/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/evals/sub-001_multi_bs24_MST_rishab_MSTsplit_remove_150_random_seed_0/all_recons.pt\n",
93
+ "The autoreload extension is already loaded. To reload it, use:\n",
94
+ " %reload_ext autoreload\n"
95
+ ]
96
+ }
97
+ ],
98
+ "source": [
99
+ "# if running this interactively, can specify jupyter_args here for argparser to use\n",
100
+ "if utils.is_interactive():\n",
101
+ " model_name = f\"sub-001_multi_bs24_MST_rishab_MSTsplit_remove_150_random_seed_0\" #\"sub-001_bs24_MST\"\n",
102
+ " print(\"model_name:\", model_name)\n",
103
+ " if (\"remove\" in model_name and \"random\" in model_name) or \"ses-04\" in model_name:\n",
104
+ " all_recons_path = f\"{eval_dir}/all_recons.pt\"\n",
105
+ " elif \"paul\" in model_name:\n",
106
+ " all_recons_path = f\"evals/{model_name}/{model_name}_all_recons.pt\"\n",
107
+ " else:\n",
108
+ " all_recons_path = f\"{eval_dir}/{model_name}_all_recons.pt\" \n",
109
+ "\n",
110
+ " # global_batch_size and batch_size should already be defined in the above cells\n",
111
+ " # other variables can be specified in the following string:\n",
112
+ " jupyter_args = f\"--model_name={model_name} --all_recons_path={all_recons_path}\"\n",
113
+ " print(jupyter_args)\n",
114
+ " jupyter_args = jupyter_args.split()\n",
115
+ " \n",
116
+ " from IPython.display import clear_output # function to clear print outputs in cell\n",
117
+ " %load_ext autoreload \n",
118
+ " # this allows you to change functions in models.py or utils.py and have this notebook automatically update with your revisions\n",
119
+ " %autoreload 2 "
120
+ ]
121
+ },
122
+ {
123
+ "cell_type": "code",
124
+ "execution_count": 8,
125
+ "id": "4b31d7c0-f5bd-4a19-a8be-7a3a165d79b6",
126
+ "metadata": {},
127
+ "outputs": [
128
+ {
129
+ "name": "stderr",
130
+ "output_type": "stream",
131
+ "text": [
132
+ "/home/ri4541/.conda/envs/rt_mindEye2/lib/python3.11/site-packages/torchvision/transforms/functional.py:1603: UserWarning: The default value of the antialias parameter of all the resizing transforms (Resize(), RandomResizedCrop(), etc.) will change from None to True in v0.17, in order to be consistent across the PIL and Tensor backends. To suppress this warning, directly pass antialias=True (recommended, future default), antialias=None (current default, which means False for Tensors and True for PIL), or antialias=False (only works on Tensors - PIL will still use antialiasing). This also applies if you are using the inference transforms from the models weights: update the call to weights.transforms(antialias=True).\n",
133
+ " warnings.warn(\n"
134
+ ]
135
+ },
136
+ {
137
+ "name": "stdout",
138
+ "output_type": "stream",
139
+ "text": [
140
+ "sub-001_multi_bs24_MST_rishab_MSTsplit_remove_150_random_seed_0\n",
141
+ "torch.Size([300, 3, 768, 768]) torch.Size([300, 256, 1664])\n"
142
+ ]
143
+ }
144
+ ],
145
+ "source": [
146
+ "parser = argparse.ArgumentParser(description=\"Model Training Configuration\")\n",
147
+ "parser.add_argument(\n",
148
+ " \"--model_name\", type=str, default=\"testing\",\n",
149
+ " help=\"will load ckpt for model found in ../train_logs/model_name\",\n",
150
+ ")\n",
151
+ "parser.add_argument(\n",
152
+ " \"--all_recons_path\", type=str,\n",
153
+ " help=\"Path to where all_recons.pt is stored\",\n",
154
+ ")\n",
155
+ "parser.add_argument(\n",
156
+ " \"--seed\",type=int,default=42,\n",
157
+ ")\n",
158
+ "if utils.is_interactive():\n",
159
+ " args = parser.parse_args(jupyter_args)\n",
160
+ "else:\n",
161
+ " args = parser.parse_args()\n",
162
+ "\n",
163
+ "# create global variables without the args prefix\n",
164
+ "for attribute_name in vars(args).keys():\n",
165
+ " globals()[attribute_name] = getattr(args, attribute_name)\n",
166
+ " \n",
167
+ "# seed all random functions\n",
168
+ "utils.seed_everything(seed)\n",
169
+ "\n",
170
+ "# make output directory\n",
171
+ "eval_dir = f\"/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/evals/{model_name}\"\n",
172
+ "if not exists(f\"eval_dir\"):\n",
173
+ " os.mkdir(\"eval_dir\")\n",
174
+ "\n",
175
+ "# all_images = torch.load(f\"{eval_dir}/all_images.pt\")\n",
176
+ "# all_recons = torch.load(f\"{eval_dir}/all_recons.pt\")\n",
177
+ "# all_clipvoxels = torch.load(f\"{eval_dir}/all_clipvoxels.pt\")\n",
178
+ "# all_predcaptions = torch.load(f\"{eval_dir}/all_predcaptions.pt\")\n",
179
+ "if (\"remove\" in model_name and \"random\" in model_name) or \"ses-04\" in model_name:\n",
180
+ " all_images = torch.load(f\"{eval_dir}/all_images.pt\")\n",
181
+ " all_clipvoxels = torch.load(f\"{eval_dir}/all_clipvoxels.pt\")\n",
182
+ " all_predcaptions = torch.load(f\"{eval_dir}/all_predcaptions.pt\")\n",
183
+ " all_unrefinedrecons = torch.load(f\"{eval_dir}/all_recons.pt\")\n",
184
+ "elif \"ses-01\" in model_name and \"paul\" in model_name:\n",
185
+ " all_images = torch.load(f\"evals/{model_name}/{model_name}_all_images.pt\")\n",
186
+ " all_clipvoxels = torch.load(f\"evals/{model_name}/{model_name}_all_clipvoxels.pt\")\n",
187
+ " all_predcaptions = torch.load(f\"evals/{model_name}/{model_name}_all_predcaptions.pt\")\n",
188
+ " all_unrefinedrecons = torch.load(f\"evals/{model_name}/{model_name}_all_recons.pt\")\n",
189
+ "else:\n",
190
+ " all_images = torch.load(f\"{eval_dir}/{model_name}_all_images.pt\") \n",
191
+ " all_clipvoxels = torch.load(f\"{eval_dir}/{model_name}_all_clipvoxels.pt\") \n",
192
+ " all_predcaptions = torch.load(f\"{eval_dir}/{model_name}_all_predcaptions.pt\") \n",
193
+ " all_unrefinedrecons = torch.load(f\"{eval_dir}/{model_name}_all_recons.pt\") \n",
194
+ "\n",
195
+ "all_recons = torch.load(all_recons_path)\n",
196
+ "all_recons = transforms.Resize((768,768))(all_recons).float()\n",
197
+ "\n",
198
+ "print(model_name)\n",
199
+ "print(all_recons.shape, all_clipvoxels.shape)"
200
+ ]
201
+ },
202
+ {
203
+ "cell_type": "code",
204
+ "execution_count": 5,
205
+ "id": "24bdd667-0862-4561-b432-9fa7543df863",
206
+ "metadata": {
207
+ "tags": []
208
+ },
209
+ "outputs": [
210
+ {
211
+ "name": "stderr",
212
+ "output_type": "stream",
213
+ "text": [
214
+ "SpatialTransformer: Found context dims [2048] of depth 1, which does not match the specified 'depth' of 2. Setting context_dim to [2048, 2048] now.\n",
215
+ "SpatialTransformer: Found context dims [2048] of depth 1, which does not match the specified 'depth' of 2. Setting context_dim to [2048, 2048] now.\n",
216
+ "SpatialTransformer: Found context dims [2048] of depth 1, which does not match the specified 'depth' of 10. Setting context_dim to [2048, 2048, 2048, 2048, 2048, 2048, 2048, 2048, 2048, 2048] now.\n",
217
+ "SpatialTransformer: Found context dims [2048] of depth 1, which does not match the specified 'depth' of 10. Setting context_dim to [2048, 2048, 2048, 2048, 2048, 2048, 2048, 2048, 2048, 2048] now.\n",
218
+ "SpatialTransformer: Found context dims [2048] of depth 1, which does not match the specified 'depth' of 10. Setting context_dim to [2048, 2048, 2048, 2048, 2048, 2048, 2048, 2048, 2048, 2048] now.\n",
219
+ "SpatialTransformer: Found context dims [2048] of depth 1, which does not match the specified 'depth' of 10. Setting context_dim to [2048, 2048, 2048, 2048, 2048, 2048, 2048, 2048, 2048, 2048] now.\n",
220
+ "SpatialTransformer: Found context dims [2048] of depth 1, which does not match the specified 'depth' of 10. Setting context_dim to [2048, 2048, 2048, 2048, 2048, 2048, 2048, 2048, 2048, 2048] now.\n",
221
+ "SpatialTransformer: Found context dims [2048] of depth 1, which does not match the specified 'depth' of 10. Setting context_dim to [2048, 2048, 2048, 2048, 2048, 2048, 2048, 2048, 2048, 2048] now.\n",
222
+ "SpatialTransformer: Found context dims [2048] of depth 1, which does not match the specified 'depth' of 2. Setting context_dim to [2048, 2048] now.\n",
223
+ "SpatialTransformer: Found context dims [2048] of depth 1, which does not match the specified 'depth' of 2. Setting context_dim to [2048, 2048] now.\n",
224
+ "SpatialTransformer: Found context dims [2048] of depth 1, which does not match the specified 'depth' of 2. Setting context_dim to [2048, 2048] now.\n"
225
+ ]
226
+ },
227
+ {
228
+ "name": "stdout",
229
+ "output_type": "stream",
230
+ "text": [
231
+ "Initialized embedder #0: FrozenCLIPEmbedder with 123060480 params. Trainable: False\n",
232
+ "Initialized embedder #1: FrozenOpenCLIPEmbedder2 with 694659841 params. Trainable: False\n",
233
+ "Initialized embedder #2: ConcatTimestepEmbedderND with 0 params. Trainable: False\n",
234
+ "Initialized embedder #3: ConcatTimestepEmbedderND with 0 params. Trainable: False\n",
235
+ "Initialized embedder #4: ConcatTimestepEmbedderND with 0 params. Trainable: False\n",
236
+ "Restored from /scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/zavychromaxl_v30.safetensors with 1 missing and 1 unexpected keys\n",
237
+ "Missing Keys: ['denoiser.sigmas']\n",
238
+ "Unexpected Keys: ['conditioner.embedders.0.transformer.text_model.embeddings.position_ids']\n",
239
+ "crossattn torch.Size([1, 77, 2048])\n",
240
+ "vector_suffix torch.Size([1, 1536])\n",
241
+ "---\n",
242
+ "crossattn_uc torch.Size([1, 77, 2048])\n",
243
+ "vector_uc torch.Size([1, 2816])\n"
244
+ ]
245
+ }
246
+ ],
247
+ "source": [
248
+ "config = OmegaConf.load(\"generative_models/configs/unclip6.yaml\")\n",
249
+ "config = OmegaConf.to_container(config, resolve=True)\n",
250
+ "unclip_params = config[\"model\"][\"params\"]\n",
251
+ "sampler_config = unclip_params[\"sampler_config\"]\n",
252
+ "sampler_config['params']['num_steps'] = 38\n",
253
+ "config = OmegaConf.load(\"generative_models/configs/inference/sd_xl_base.yaml\")\n",
254
+ "config = OmegaConf.to_container(config, resolve=True)\n",
255
+ "refiner_params = config[\"model\"][\"params\"]\n",
256
+ "\n",
257
+ "network_config = refiner_params[\"network_config\"]\n",
258
+ "denoiser_config = refiner_params[\"denoiser_config\"]\n",
259
+ "first_stage_config = refiner_params[\"first_stage_config\"]\n",
260
+ "conditioner_config = refiner_params[\"conditioner_config\"]\n",
261
+ "scale_factor = refiner_params[\"scale_factor\"]\n",
262
+ "disable_first_stage_autocast = refiner_params[\"disable_first_stage_autocast\"]\n",
263
+ "\n",
264
+ "# base_ckpt_path = '/weka/robin/projects/stable-research/checkpoints/sd_xl_base_1.0.safetensors'\n",
265
+ "# base_ckpt_path = '/weka/proj-fmri/paulscotti/stable-research/zavychromaxl_v30.safetensors'\n",
266
+ "\n",
267
+ "# if running on Della compute node, won't be able to find openai/clip-vit-large-patch14 (or any other internet-accessed file like from huggingface) so always download \"locally\" (onto Della)\n",
268
+ "base_ckpt_path = '/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/zavychromaxl_v30.safetensors'\n",
269
+ "base_engine = DiffusionEngine(network_config=network_config,\n",
270
+ " denoiser_config=denoiser_config,\n",
271
+ " first_stage_config=first_stage_config,\n",
272
+ " conditioner_config=conditioner_config,\n",
273
+ " sampler_config=sampler_config, # using the one defined by the unclip\n",
274
+ " scale_factor=scale_factor,\n",
275
+ " disable_first_stage_autocast=disable_first_stage_autocast,\n",
276
+ " ckpt_path=base_ckpt_path)\n",
277
+ "base_engine.eval().requires_grad_(False)\n",
278
+ "base_engine.to(device)\n",
279
+ "\n",
280
+ "base_text_embedder1 = FrozenCLIPEmbedder(\n",
281
+ " layer=conditioner_config['params']['emb_models'][0]['params']['layer'],\n",
282
+ " layer_idx=conditioner_config['params']['emb_models'][0]['params']['layer_idx'],\n",
283
+ ")\n",
284
+ "base_text_embedder1.to(device)\n",
285
+ "\n",
286
+ "base_text_embedder2 = FrozenOpenCLIPEmbedder2(\n",
287
+ " arch=conditioner_config['params']['emb_models'][1]['params']['arch'],\n",
288
+ " version=conditioner_config['params']['emb_models'][1]['params']['version'],\n",
289
+ " freeze=conditioner_config['params']['emb_models'][1]['params']['freeze'],\n",
290
+ " layer=conditioner_config['params']['emb_models'][1]['params']['layer'],\n",
291
+ " always_return_pooled=conditioner_config['params']['emb_models'][1]['params']['always_return_pooled'],\n",
292
+ " legacy=conditioner_config['params']['emb_models'][1]['params']['legacy'],\n",
293
+ ")\n",
294
+ "base_text_embedder2.to(device)\n",
295
+ "\n",
296
+ "batch={\"txt\": \"\",\n",
297
+ " \"original_size_as_tuple\": torch.ones(1, 2).to(device) * 768,\n",
298
+ " \"crop_coords_top_left\": torch.zeros(1, 2).to(device),\n",
299
+ " \"target_size_as_tuple\": torch.ones(1, 2).to(device) * 1024}\n",
300
+ "out = base_engine.conditioner(batch)\n",
301
+ "crossattn = out[\"crossattn\"].to(device)\n",
302
+ "vector_suffix = out[\"vector\"][:,-1536:].to(device)\n",
303
+ "print(\"crossattn\", crossattn.shape)\n",
304
+ "print(\"vector_suffix\", vector_suffix.shape)\n",
305
+ "print(\"---\")\n",
306
+ "\n",
307
+ "batch_uc={\"txt\": \"painting, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, deformed, ugly, blurry, bad anatomy, bad proportions, extra limbs, cloned face, skinny, glitchy, double torso, extra arms, extra hands, mangled fingers, missing lips, ugly face, distorted face, extra legs, anime\",\n",
308
+ " \"original_size_as_tuple\": torch.ones(1, 2).to(device) * 768,\n",
309
+ " \"crop_coords_top_left\": torch.zeros(1, 2).to(device),\n",
310
+ " \"target_size_as_tuple\": torch.ones(1, 2).to(device) * 1024}\n",
311
+ "out = base_engine.conditioner(batch_uc)\n",
312
+ "crossattn_uc = out[\"crossattn\"].to(device)\n",
313
+ "vector_uc = out[\"vector\"].to(device)\n",
314
+ "print(\"crossattn_uc\", crossattn_uc.shape)\n",
315
+ "print(\"vector_uc\", vector_uc.shape)"
316
+ ]
317
+ },
318
+ {
319
+ "cell_type": "code",
320
+ "execution_count": 6,
321
+ "id": "07f437d1-9b8e-4b13-85ad-d45062a5ce09",
322
+ "metadata": {
323
+ "tags": []
324
+ },
325
+ "outputs": [],
326
+ "source": [
327
+ "num_samples = 1 # PS: I tried increasing this to 16 and picking highest cosine similarity, it didnt seem to increase eval performance!\n",
328
+ "img2img_timepoint = 15 # 13 # higher number means more reliance on prompt, less reliance on matching the conditioning image\n",
329
+ "base_engine.sampler.guider.scale = 5 # cfg\n",
330
+ "def denoiser(x, sigma, c): return base_engine.denoiser(base_engine.model, x, sigma, c)"
331
+ ]
332
+ },
333
+ {
334
+ "cell_type": "code",
335
+ "execution_count": 7,
336
+ "id": "939e1cbb-5836-48c2-87d8-3e493e950011",
337
+ "metadata": {
338
+ "tags": []
339
+ },
340
+ "outputs": [
341
+ {
342
+ "name": "stderr",
343
+ "output_type": "stream",
344
+ "text": [
345
+ " 0%| | 0/300 [00:00<?, ?it/s]/home/ri4541/.conda/envs/rt_mindEye2/lib/python3.11/site-packages/torch/utils/checkpoint.py:429: UserWarning: torch.utils.checkpoint: please pass in use_reentrant=True or use_reentrant=False explicitly. The default value of use_reentrant will be updated to be False in the future. To maintain current behavior, pass use_reentrant=True. It is recommended that you use use_reentrant=False. Refer to docs for more details on the differences between the two variants.\n",
346
+ " warnings.warn(\n",
347
+ "/home/ri4541/.conda/envs/rt_mindEye2/lib/python3.11/site-packages/torch/utils/checkpoint.py:61: UserWarning: None of the inputs have requires_grad=True. Gradients will be None\n",
348
+ " warnings.warn(\n",
349
+ "100%|██████████| 300/300 [09:15<00:00, 1.85s/it]\n",
350
+ "/home/ri4541/.conda/envs/rt_mindEye2/lib/python3.11/site-packages/torchvision/transforms/functional.py:1603: UserWarning: The default value of the antialias parameter of all the resizing transforms (Resize(), RandomResizedCrop(), etc.) will change from None to True in v0.17, in order to be consistent across the PIL and Tensor backends. To suppress this warning, directly pass antialias=True (recommended, future default), antialias=None (current default, which means False for Tensors and True for PIL), or antialias=False (only works on Tensors - PIL will still use antialiasing). This also applies if you are using the inference transforms from the models weights: update the call to weights.transforms(antialias=True).\n",
351
+ " warnings.warn(\n"
352
+ ]
353
+ },
354
+ {
355
+ "name": "stdout",
356
+ "output_type": "stream",
357
+ "text": [
358
+ "all_enhancedrecons torch.Size([300, 3, 512, 512])\n",
359
+ "saved /scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/evals/sub-001_multi_bs24_MST_rishab_MSTsplit_remove_150_random_seed_0/sub-001_multi_bs24_MST_rishab_MSTsplit_remove_150_random_seed_0_all_enhancedrecons.pt\n"
360
+ ]
361
+ }
362
+ ],
363
+ "source": [
364
+ "all_enhancedrecons = None\n",
365
+ "for img_idx in tqdm(range(len(all_recons))):\n",
366
+ " with torch.no_grad(), torch.cuda.amp.autocast(dtype=torch.float16), base_engine.ema_scope():\n",
367
+ " base_engine.sampler.num_steps = 25\n",
368
+ " \n",
369
+ " image = all_recons[[img_idx]]\n",
370
+ " image = image.to(device)\n",
371
+ " prompt = all_predcaptions[[img_idx]][0]\n",
372
+ " # prompt = \"\"\n",
373
+ "\n",
374
+ " # z = torch.randn(num_samples,4,96,96).to(device)\n",
375
+ " assert image.shape[-1]==768\n",
376
+ " z = base_engine.encode_first_stage(image*2-1).repeat(num_samples,1,1,1)\n",
377
+ "\n",
378
+ " openai_clip_text = base_text_embedder1(prompt)\n",
379
+ " clip_text_tokenized, clip_text_emb = base_text_embedder2(prompt)\n",
380
+ " clip_text_emb = torch.hstack((clip_text_emb, vector_suffix))\n",
381
+ " clip_text_tokenized = torch.cat((openai_clip_text, clip_text_tokenized),dim=-1)\n",
382
+ " c = {\"crossattn\": clip_text_tokenized.repeat(num_samples,1,1), \"vector\": clip_text_emb.repeat(num_samples,1)}\n",
383
+ " uc = {\"crossattn\": crossattn_uc.repeat(num_samples,1,1), \"vector\": vector_uc.repeat(num_samples,1)}\n",
384
+ "\n",
385
+ " noise = torch.randn_like(z)\n",
386
+ " sigmas = base_engine.sampler.discretization(base_engine.sampler.num_steps).to(device)\n",
387
+ " init_z = (z + noise * append_dims(sigmas[-img2img_timepoint], z.ndim)) / torch.sqrt(1.0 + sigmas[0] ** 2.0)\n",
388
+ " sigmas = sigmas[-img2img_timepoint:].repeat(num_samples,1)\n",
389
+ "\n",
390
+ " base_engine.sampler.num_steps = sigmas.shape[-1] - 1\n",
391
+ " noised_z, _, _, _, c, uc = base_engine.sampler.prepare_sampling_loop(init_z, cond=c, uc=uc, \n",
392
+ " num_steps=base_engine.sampler.num_steps)\n",
393
+ " for timestep in range(base_engine.sampler.num_steps):\n",
394
+ " noised_z = base_engine.sampler.sampler_step(sigmas[:,timestep],\n",
395
+ " sigmas[:,timestep+1],\n",
396
+ " denoiser, noised_z, cond=c, uc=uc, gamma=0)\n",
397
+ " samples_z_base = noised_z\n",
398
+ " samples_x = base_engine.decode_first_stage(samples_z_base)\n",
399
+ " samples = torch.clamp((samples_x + 1.0) / 2.0, min=0.0, max=1.0)[0].cpu()[None]\n",
400
+ "\n",
401
+ " if all_enhancedrecons is None:\n",
402
+ " all_enhancedrecons = samples\n",
403
+ " else:\n",
404
+ " all_enhancedrecons = torch.vstack((all_enhancedrecons, samples))\n",
405
+ "\n",
406
+ "all_enhancedrecons = transforms.Resize((512,512))(all_enhancedrecons).float()\n",
407
+ "print(\"all_enhancedrecons\", all_enhancedrecons.shape)\n",
408
+ "\n",
409
+ "torch.save(all_enhancedrecons,f\"{eval_dir}/all_enhancedrecons.pt\")\n",
410
+ "print(f\"saved {eval_dir}/all_enhancedrecons.pt\")"
411
+ ]
412
+ },
413
+ {
414
+ "cell_type": "code",
415
+ "execution_count": 11,
416
+ "id": "1da11b84-9bf7-4057-872e-20cba36f6921",
417
+ "metadata": {},
418
+ "outputs": [],
419
+ "source": [
420
+ "# x = torch.permute(all_images, (0,2,3,1))\n",
421
+ "# y = torch.permute(all_enhancedrecons, (0,2,3,1))\n",
422
+ "# fig, ax = plt.subplots(102, 2, figsize=(15, 500))\n",
423
+ "# for row, _ in enumerate(ax):\n",
424
+ "# ax[row][0].imshow(x.cpu()[row])\n",
425
+ "# ax[row][1].imshow(y.cpu()[row])\n",
426
+ "# plt.tight_layout()\n",
427
+ "# plt.show()"
428
+ ]
429
+ },
430
+ {
431
+ "cell_type": "code",
432
+ "execution_count": null,
433
+ "id": "cc52f485-1e51-44d8-ad69-04388fc4e8c7",
434
+ "metadata": {},
435
+ "outputs": [],
436
+ "source": []
437
+ }
438
+ ],
439
+ "metadata": {
440
+ "kernelspec": {
441
+ "display_name": "rt_mindEye2 [~/.conda/envs/rt_mindEye2/]",
442
+ "language": "python",
443
+ "name": "conda_rt_mindeye2"
444
+ },
445
+ "language_info": {
446
+ "codemirror_mode": {
447
+ "name": "ipython",
448
+ "version": 3
449
+ },
450
+ "file_extension": ".py",
451
+ "mimetype": "text/x-python",
452
+ "name": "python",
453
+ "nbconvert_exporter": "python",
454
+ "pygments_lexer": "ipython3",
455
+ "version": "3.11.7"
456
+ }
457
+ },
458
+ "nbformat": 4,
459
+ "nbformat_minor": 5
460
+ }
environment.yml ADDED
@@ -0,0 +1,322 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: rt_mindEye2
2
+ channels:
3
+ - conda-forge
4
+ - defaults
5
+ dependencies:
6
+ - _libgcc_mutex=0.1=main
7
+ - _openmp_mutex=5.1=1_gnu
8
+ - asttokens=2.4.1=pyhd8ed1ab_0
9
+ - bcrypt=3.2.0=py311h5eee18b_1
10
+ - blas=1.0=mkl
11
+ - boto3=1.34.117=pyhd8ed1ab_0
12
+ - botocore=1.34.117=pyge310_1234567_0
13
+ - brotli=1.0.9=h9c3ff4c_4
14
+ - brotli-python=1.0.9=py311h6a678d5_8
15
+ - bzip2=1.0.8=h5eee18b_5
16
+ - ca-certificates=2024.8.30=hbcca054_0
17
+ - cffi=1.16.0=py311h5eee18b_1
18
+ - cryptography=42.0.5=py311hdda0065_1
19
+ - cycler=0.12.1=pyhd8ed1ab_0
20
+ - decorator=5.1.1=pyhd8ed1ab_0
21
+ - exceptiongroup=1.2.2=pyhd8ed1ab_0
22
+ - freetype=2.10.4=h0708190_1
23
+ - icu=73.1=h6a678d5_0
24
+ - inotify-tools=3.20.2=h516909a_0
25
+ - inotify_simple=1.3.5=pyha770c72_3
26
+ - intel-openmp=2021.4.0=h06a4308_3561
27
+ - ipympl=0.9.4=pyhd8ed1ab_0
28
+ - ipython_genutils=0.2.0=pyhd8ed1ab_1
29
+ - jedi=0.19.1=pyhd8ed1ab_0
30
+ - jmespath=1.0.1=py311h06a4308_0
31
+ - jpeg=9e=h166bdaf_1
32
+ - jupyterlab_pygments=0.3.0=pyhd8ed1ab_1
33
+ - jupyterlab_widgets=3.0.13=pyhd8ed1ab_0
34
+ - lcms2=2.12=h3be6417_0
35
+ - ld_impl_linux-64=2.38=h1181459_1
36
+ - lerc=3.0=h295c915_0
37
+ - libdeflate=1.17=h5eee18b_1
38
+ - libffi=3.4.4=h6a678d5_0
39
+ - libgcc=7.2.0=h69d50b8_2
40
+ - libgcc-ng=11.2.0=h1234567_1
41
+ - libgomp=11.2.0=h1234567_1
42
+ - libpng=1.6.39=h5eee18b_0
43
+ - libsodium=1.0.18=h7b6447c_0
44
+ - libstdcxx-ng=11.2.0=h1234567_1
45
+ - libtiff=4.5.1=h6a678d5_0
46
+ - libuuid=1.41.5=h5eee18b_0
47
+ - libuv=1.48.0=h5eee18b_0
48
+ - libwebp-base=1.3.2=h5eee18b_1
49
+ - lz4-c=1.9.4=h6a678d5_1
50
+ - matplotlib-base=3.9.2=py311h6fb44e5_0
51
+ - mkl=2021.4.0=h06a4308_640
52
+ - mkl-service=2.4.0=py311h5eee18b_0
53
+ - mkl_fft=1.3.1=py311h30b3d60_0
54
+ - mkl_random=1.2.2=py311hba01205_0
55
+ - munkres=1.1.4=pyh9f0ad1d_0
56
+ - ncurses=6.4=h6a678d5_0
57
+ - nodejs=20.17.0=hb8e3597_0
58
+ - openjpeg=2.5.2=he7f1fd0_0
59
+ - openssl=3.0.15=h5eee18b_0
60
+ - paramiko=2.8.1=pyhd3eb1b0_0
61
+ - pexpect=4.9.0=pyhd8ed1ab_0
62
+ - pickleshare=0.7.5=py_1003
63
+ - plumbum=1.8.3=pyhd8ed1ab_0
64
+ - ptyprocess=0.7.0=pyhd3deb0d_0
65
+ - pure_eval=0.2.3=pyhd8ed1ab_0
66
+ - pycparser=2.21=pyhd3eb1b0_0
67
+ - pynacl=1.5.0=py311h5eee18b_0
68
+ - pyparsing=3.2.0=pyhd8ed1ab_1
69
+ - pysocks=1.7.1=py311h06a4308_0
70
+ - python=3.11.7=h955ad1f_0
71
+ - python-dateutil=2.9.0post0=py311h06a4308_2
72
+ - python_abi=3.11=2_cp311
73
+ - pywin32-on-windows=0.1.0=pyh1179c8e_3
74
+ - readline=8.2=h5eee18b_0
75
+ - rpyc=5.3.0=pyhd8ed1ab_0
76
+ - s3transfer=0.10.1=py311h06a4308_0
77
+ - six=1.16.0=pyhd3eb1b0_1
78
+ - sqlite=3.41.2=h5eee18b_0
79
+ - stack_data=0.6.2=pyhd8ed1ab_0
80
+ - tk=8.6.12=h1ccaba5_0
81
+ - toml=0.10.2=pyhd3eb1b0_0
82
+ - typing_extensions=4.12.2=pyha770c72_0
83
+ - watchdog=4.0.1=py311h38be061_0
84
+ - wcwidth=0.2.13=pyhd8ed1ab_0
85
+ - wheel=0.41.2=py311h06a4308_0
86
+ - xz=5.4.6=h5eee18b_0
87
+ - yaml=0.2.5=h7b6447c_0
88
+ - zlib=1.2.13=h5eee18b_0
89
+ - zstd=1.5.6=hc292b87_0
90
+ - pip:
91
+ - accelerate==0.24.1
92
+ - aiofiles==22.1.0
93
+ - aiohttp==3.9.3
94
+ - aiosignal==1.3.1
95
+ - aiosqlite==0.20.0
96
+ - annotated-types==0.6.0
97
+ - antlr4-python3-runtime==4.9.3
98
+ - anyio==4.3.0
99
+ - appdirs==1.4.4
100
+ - argon2-cffi==23.1.0
101
+ - argon2-cffi-bindings==21.2.0
102
+ - arrow==1.3.0
103
+ - astor==0.8.1
104
+ - astroid==3.3.5
105
+ - async-lru==2.0.4
106
+ - attrs==23.2.0
107
+ - babel==2.14.0
108
+ - beartype==0.17.2
109
+ - beautifulsoup4==4.12.3
110
+ - bids-validator==1.14.6
111
+ - bleach==6.1.0
112
+ - bokeh==3.3.4
113
+ - braceexpand==0.1.7
114
+ - certifi==2024.8.30
115
+ - charset-normalizer==3.4.0
116
+ - click==8.1.7
117
+ - clip==1.0
118
+ - clip-anytorch==2.6.0
119
+ - coca-pytorch==0.1.0
120
+ - comm==0.2.1
121
+ - contourpy==1.3.1
122
+ - dalle2-pytorch==1.15.6
123
+ - datasets==2.18.0
124
+ - debugpy==1.8.1
125
+ - deepspeed==0.13.1
126
+ - defusedxml==0.7.1
127
+ - diffusers==0.23.0
128
+ - dill==0.3.8
129
+ - docker-pycreds==0.4.0
130
+ - docopt==0.6.2
131
+ - einops==0.7.0
132
+ - einx==0.1.3
133
+ - ema-pytorch==0.4.2
134
+ - embedding-reader==1.7.0
135
+ - entrypoints==0.4
136
+ - evaluate==0.4.1
137
+ - executing==2.0.1
138
+ - fastjsonschema==2.19.1
139
+ - filelock==3.13.1
140
+ - fonttools==4.55.1
141
+ - formulaic==0.3.4
142
+ - fqdn==1.5.1
143
+ - frozendict==2.4.0
144
+ - frozenlist==1.4.1
145
+ - fsspec==2024.2.0
146
+ - ftfy==6.1.3
147
+ - gitdb==4.0.11
148
+ - gitpython==3.1.43
149
+ - greenlet==3.0.3
150
+ - h11==0.14.0
151
+ - h5py==3.10.0
152
+ - hf-transfer==0.1.8
153
+ - hjson==3.1.0
154
+ - httpcore==1.0.6
155
+ - httpx==0.27.2
156
+ - huggingface-hub==0.20.3
157
+ - idna==3.10
158
+ - imageio==2.34.0
159
+ - importlib-metadata==7.0.1
160
+ - inotify==0.2.10
161
+ - interface-meta==1.3.0
162
+ - ipykernel==6.29.2
163
+ - ipython==8.22.1
164
+ - ipywidgets==8.1.2
165
+ - isoduration==20.11.0
166
+ - isort==5.13.2
167
+ - jinja2==3.1.3
168
+ - joblib==1.3.2
169
+ - json5==0.9.17
170
+ - jsonpointer==2.4
171
+ - jsonschema==4.21.1
172
+ - jsonschema-specifications==2023.12.1
173
+ - jupyter==1.1.1
174
+ - jupyter-client==7.4.9
175
+ - jupyter-console==6.6.3
176
+ - jupyter-core==5.7.1
177
+ - jupyter-events==0.9.0
178
+ - jupyter-lsp==2.2.5
179
+ - jupyter-server==2.12.5
180
+ - jupyter-server-fileid==0.9.1
181
+ - jupyter-server-proxy==4.1.0
182
+ - jupyter-server-terminals==0.5.2
183
+ - jupyter-server-ydoc==0.8.0
184
+ - jupyter-ydoc==0.2.5
185
+ - jupyterlab==3.6.7
186
+ - jupyterlab-nvdashboard==0.9.0
187
+ - jupyterlab-server==2.27.3
188
+ - kiwisolver==1.4.7
189
+ - kornia==0.7.1
190
+ - lazy-loader==0.3
191
+ - lightning-utilities==0.10.1
192
+ - lxml==5.1.0
193
+ - markupsafe==2.1.5
194
+ - matplotlib==3.8.2
195
+ - matplotlib-inline==0.1.6
196
+ - mccabe==0.7.0
197
+ - mistune==3.0.2
198
+ - mpmath==1.3.0
199
+ - multidict==6.0.5
200
+ - multiprocess==0.70.16
201
+ - nbclassic==1.0.0
202
+ - nbclient==0.9.0
203
+ - nbconvert==7.16.1
204
+ - nbformat==5.9.2
205
+ - nest-asyncio==1.6.0
206
+ - networkx==3.2.1
207
+ - nibabel==5.2.1
208
+ - nilearn==0.10.3
209
+ - ninja==1.11.1.1
210
+ - nose==1.3.7
211
+ - notebook==6.5.6
212
+ - notebook-shim==0.2.4
213
+ - num2words==0.5.13
214
+ - numpy==1.26.4
215
+ - nvidia-cublas-cu12==12.1.3.1
216
+ - nvidia-cuda-cupti-cu12==12.1.105
217
+ - nvidia-cuda-nvrtc-cu12==12.1.105
218
+ - nvidia-cuda-runtime-cu12==12.1.105
219
+ - nvidia-cudnn-cu12==8.9.2.26
220
+ - nvidia-cufft-cu12==11.0.2.54
221
+ - nvidia-curand-cu12==10.3.2.106
222
+ - nvidia-cusolver-cu12==11.4.5.107
223
+ - nvidia-cusparse-cu12==12.1.0.106
224
+ - nvidia-nccl-cu12==2.18.1
225
+ - nvidia-nvjitlink-cu12==12.3.101
226
+ - nvidia-nvtx-cu12==12.1.105
227
+ - omegaconf==2.3.0
228
+ - open-clip-torch==2.24.0
229
+ - overrides==7.7.0
230
+ - packaging==24.2
231
+ - pandas==2.2.0
232
+ - pandocfilters==1.5.1
233
+ - parso==0.8.3
234
+ - pillow==10.2.0
235
+ - pip==24.3.1
236
+ - platformdirs==4.3.6
237
+ - prometheus-client==0.20.0
238
+ - prompt-toolkit==3.0.43
239
+ - protobuf==5.28.3
240
+ - psutil==6.1.0
241
+ - pure-eval==0.2.2
242
+ - py-cpuinfo==9.0.0
243
+ - pyarrow==15.0.0
244
+ - pyarrow-hotfix==0.6
245
+ - pybids==0.15.3
246
+ - pydantic==2.6.2
247
+ - pydantic-core==2.16.3
248
+ - pydicom==2.4.4
249
+ - pygments==2.17.2
250
+ - pylint==3.3.1
251
+ - pynvml==11.5.0
252
+ - python-json-logger==2.0.7
253
+ - pytorch-lightning==2.0.1
254
+ - pytorch-warmup==0.1.1
255
+ - pytz==2024.1
256
+ - pyyaml==6.0.2
257
+ - pyzmq==24.0.1
258
+ - qtconsole==5.5.1
259
+ - qtpy==2.4.1
260
+ - referencing==0.33.0
261
+ - regex==2023.12.25
262
+ - requests==2.32.3
263
+ - resize-right==0.0.2
264
+ - responses==0.18.0
265
+ - rfc3339-validator==0.1.4
266
+ - rfc3986-validator==0.1.1
267
+ - rotary-embedding-torch==0.5.3
268
+ - rpds-py==0.18.0
269
+ - safetensors==0.4.2
270
+ - scikit-image==0.22.0
271
+ - scikit-learn==1.4.1.post1
272
+ - scipy==1.12.0
273
+ - send2trash==1.8.2
274
+ - sentence-transformers==2.5.1
275
+ - sentencepiece==0.2.0
276
+ - sentry-sdk==2.18.0
277
+ - setproctitle==1.3.4
278
+ - setuptools==75.5.0
279
+ - simpervisor==1.0.0
280
+ - smmap==5.0.1
281
+ - sniffio==1.3.0
282
+ - soupsieve==2.5
283
+ - sqlalchemy==1.3.24
284
+ - stack-data==0.6.3
285
+ - sympy==1.12
286
+ - terminado==0.18.0
287
+ - threadpoolctl==3.3.0
288
+ - tifffile==2024.2.12
289
+ - timm==0.9.16
290
+ - tinycss2==1.2.1
291
+ - tokenizers==0.15.2
292
+ - tomlkit==0.13.2
293
+ - torch==2.1.0
294
+ - torch-fidelity==0.3.0
295
+ - torchmetrics==1.3.0.post0
296
+ - torchvision==0.16.0
297
+ - tornado==6.4
298
+ - tqdm==4.66.2
299
+ - traitlets==5.14.1
300
+ - transformers==4.37.2
301
+ - triton==2.1.0
302
+ - types-python-dateutil==2.8.19.20240106
303
+ - typing-extensions==4.9.0
304
+ - tzdata==2024.1
305
+ - uri-template==1.3.0
306
+ - urllib3==2.2.3
307
+ - vector-quantize-pytorch==1.14.1
308
+ - wandb==0.17.2
309
+ - webcolors==1.13
310
+ - webdataset==0.2.73
311
+ - webencodings==0.5.1
312
+ - websocket-client==1.7.0
313
+ - widgetsnbextension==4.0.10
314
+ - wrapt==1.16.0
315
+ - x-clip==0.14.4
316
+ - xformers==0.0.22.post7
317
+ - xxhash==3.4.1
318
+ - xyzservices==2023.10.1
319
+ - y-py==0.6.2
320
+ - yarl==1.9.4
321
+ - ypy-websocket==0.8.4
322
+ - zipp==3.17.0
evals/testing_MST_ViT-H_1024/final_evals ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metric,Value
2
+ alexnet2,0.5318009664359409
3
+ alexnet5,0.5370902442209743
4
+ inception,0.5256954420791432
5
+ clip_,0.5321927647903878
6
+ effnet,0.9561490287097593
7
+ swav,0.6627069928859551
8
+ pixcorr,0.06609624355941902
9
+ ssim,0.4277698868332266
10
+ percent_correct_fwd,0.016129031777381897
11
+ percent_correct_bwd,0.016129031777381897
12
+ mst_score,[]
evals/testing_MST_ViT-H_1024/testing_MST_ViT-H_1024_MST_ID.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9bb68a3eb40936eedda0b612eac941b1c6666f0286d6637a7dbc29ec4a61e1c8
3
+ size 11216
evals/testing_MST_ViT-H_1024/testing_MST_ViT-H_1024_MST_pairmate_indices.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:28370fbe1a5e02d265e8e38f19bcd7d1ec6c69da6417b9d462c46ced1a9487d9
3
+ size 624
evals/testing_MST_ViT-H_1024_MST_ID.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9bb68a3eb40936eedda0b612eac941b1c6666f0286d6637a7dbc29ec4a61e1c8
3
+ size 11216
evals/testing_MST_ViT-H_1024_scalf/final_evals ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metric,Value
2
+ alexnet2,0.5106438552958077
3
+ alexnet5,0.49751861042183626
4
+ inception,0.5076890427060207
5
+ clip_,0.519328718819381
6
+ effnet,0.9543885725097773
7
+ swav,0.6520650061534471
8
+ pixcorr,0.05185274977219404
9
+ ssim,0.4141594942526016
10
+ percent_correct_fwd,0.04838709533214569
11
+ percent_correct_bwd,0.016129031777381897
12
+ mst_score,[]
evals/testing_MST_ViT-H_1024_scalf/testing_MST_ViT-H_1024_scalf_MST_pairmate_indices.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:28370fbe1a5e02d265e8e38f19bcd7d1ec6c69da6417b9d462c46ced1a9487d9
3
+ size 624
evals/testing_MST_ViT-H_1024_scalf_best/final_evals ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metric,Value
2
+ alexnet2,0.5031343868355753
3
+ alexnet5,0.5026772887553872
4
+ inception,0.5045220060075747
5
+ clip_,0.49902050411388277
6
+ effnet,0.9661921941512774
7
+ swav,0.6689534720263354
8
+ pixcorr,0.029649783436161304
9
+ ssim,0.410797652241365
10
+ percent_correct_fwd,0.06451612710952759
11
+ percent_correct_bwd,0.016129031777381897
12
+ mst_score,[]
evals/testing_MST_ViT-H_1024_scalf_best/testing_MST_ViT-H_1024_scalf_MST_ID.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9bb68a3eb40936eedda0b612eac941b1c6666f0286d6637a7dbc29ec4a61e1c8
3
+ size 11216
evals/testing_MST_ViT-H_1024_scalf_best/testing_MST_ViT-H_1024_scalf_MST_pairmate_indices.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:28370fbe1a5e02d265e8e38f19bcd7d1ec6c69da6417b9d462c46ced1a9487d9
3
+ size 624
final_evaluations.ipynb ADDED
@@ -0,0 +1,1778 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 1,
6
+ "id": "7d5f265e-407a-40bd-92fb-a652091fd7ea",
7
+ "metadata": {
8
+ "tags": []
9
+ },
10
+ "outputs": [
11
+ {
12
+ "name": "stdout",
13
+ "output_type": "stream",
14
+ "text": [
15
+ "importing modules\n"
16
+ ]
17
+ },
18
+ {
19
+ "name": "stderr",
20
+ "output_type": "stream",
21
+ "text": [
22
+ "Detected kernel version 4.18.0, which is below the recommended minimum of 5.5.0; this can cause the process to hang. It is recommended to upgrade the kernel to the minimum version or higher.\n"
23
+ ]
24
+ },
25
+ {
26
+ "name": "stdout",
27
+ "output_type": "stream",
28
+ "text": [
29
+ "LOCAL RANK 0\n",
30
+ "PID of this process = 1915809\n",
31
+ "device: cuda\n",
32
+ "Distributed environment: DistributedType.NO\n",
33
+ "Num processes: 1\n",
34
+ "Process index: 0\n",
35
+ "Local process index: 0\n",
36
+ "Device: cuda\n",
37
+ "\n",
38
+ "Mixed precision type: fp16\n",
39
+ "\n",
40
+ "distributed = False num_devices = 1 local rank = 0 world size = 1\n"
41
+ ]
42
+ }
43
+ ],
44
+ "source": [
45
+ "print('importing modules')\n",
46
+ "import os\n",
47
+ "import sys\n",
48
+ "import json\n",
49
+ "import argparse\n",
50
+ "import numpy as np\n",
51
+ "import math\n",
52
+ "from einops import rearrange\n",
53
+ "import time\n",
54
+ "import random\n",
55
+ "import string\n",
56
+ "import h5py\n",
57
+ "from tqdm import tqdm\n",
58
+ "import webdataset as wds\n",
59
+ "\n",
60
+ "import matplotlib.pyplot as plt\n",
61
+ "import torch\n",
62
+ "import torch.nn as nn\n",
63
+ "from torchvision import transforms\n",
64
+ "from accelerate import Accelerator, DeepSpeedPlugin\n",
65
+ "\n",
66
+ "from generative_models.sgm.modules.encoders.modules import FrozenOpenCLIPImageEmbedder\n",
67
+ "from models import GNet8_Encoder\n",
68
+ "\n",
69
+ "# tf32 data type is faster than standard float32\n",
70
+ "torch.backends.cuda.matmul.allow_tf32 = True\n",
71
+ "\n",
72
+ "# custom functions #\n",
73
+ "import utils\n",
74
+ "\n",
75
+ "### Multi-GPU config ###\n",
76
+ "local_rank = os.getenv('RANK')\n",
77
+ "if local_rank is None: \n",
78
+ " local_rank = 0\n",
79
+ "else:\n",
80
+ " local_rank = int(local_rank)\n",
81
+ "print(\"LOCAL RANK \", local_rank) \n",
82
+ "\n",
83
+ "accelerator = Accelerator(split_batches=False, mixed_precision=\"fp16\") # ['no', 'fp8', 'fp16', 'bf16']\n",
84
+ "\n",
85
+ "print(\"PID of this process =\",os.getpid())\n",
86
+ "device = accelerator.device\n",
87
+ "print(\"device:\",device)\n",
88
+ "world_size = accelerator.state.num_processes\n",
89
+ "distributed = not accelerator.state.distributed_type == 'NO'\n",
90
+ "num_devices = torch.cuda.device_count()\n",
91
+ "if num_devices==0 or not distributed: num_devices = 1\n",
92
+ "num_workers = num_devices\n",
93
+ "print(accelerator.state)\n",
94
+ "\n",
95
+ "print(\"distributed =\",distributed, \"num_devices =\", num_devices, \"local rank =\", local_rank, \"world size =\", world_size)\n",
96
+ "print = accelerator.print # only print if local_rank=0"
97
+ ]
98
+ },
99
+ {
100
+ "cell_type": "code",
101
+ "execution_count": 2,
102
+ "id": "1b8e8d9e-2931-4546-a2ce-a7417bbe21f4",
103
+ "metadata": {
104
+ "tags": []
105
+ },
106
+ "outputs": [],
107
+ "source": [
108
+ "# Load embedding model (last hidden layer)\n",
109
+ "try:\n",
110
+ " print(clip_img_embedder)\n",
111
+ "except:\n",
112
+ " clip_img_embedder = FrozenOpenCLIPImageEmbedder(\n",
113
+ " arch=\"ViT-bigG-14\",\n",
114
+ " version=\"laion2b_s39b_b160k\",\n",
115
+ " output_tokens=True,\n",
116
+ " only_tokens=True,\n",
117
+ " )\n",
118
+ " clip_img_embedder.to(device)\n",
119
+ "clip_seq_dim = 256\n",
120
+ "clip_emb_dim = 1664\n",
121
+ "\n",
122
+ "## Load embedding model (last layer)\n",
123
+ "# clip_img_embedder = FrozenOpenCLIPImageEmbedder(\n",
124
+ "# arch=\"ViT-bigG-14\",\n",
125
+ "# version=\"laion2b_s39b_b160k\",\n",
126
+ "# output_tokens=False,\n",
127
+ "# only_tokens=False,\n",
128
+ "# )\n",
129
+ "# clip_img_embedder.to(device)\n",
130
+ "# clip_seq_dim = 1\n",
131
+ "# clip_emb_dim = 1280"
132
+ ]
133
+ },
134
+ {
135
+ "cell_type": "code",
136
+ "execution_count": 3,
137
+ "id": "1ffb659a-8154-4536-ab27-2d976da1bf4e",
138
+ "metadata": {
139
+ "tags": []
140
+ },
141
+ "outputs": [
142
+ {
143
+ "name": "stdout",
144
+ "output_type": "stream",
145
+ "text": [
146
+ "model_name: sub-001_ses-01_bs24_MST_paul_MSTsplit_random_seed_0\n",
147
+ "--model_name=sub-001_ses-01_bs24_MST_paul_MSTsplit_random_seed_0 --data_path=/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2 --all_recons_path=evals/sub-001_ses-01_bs24_MST_paul_MSTsplit_random_seed_0/sub-001_ses-01_bs24_MST_paul_MSTsplit_random_seed_0_all_recons.pt --eval_dir=/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/evals/sub-001_ses-01_bs24_MST_paul_MSTsplit_random_seed_0\n"
148
+ ]
149
+ }
150
+ ],
151
+ "source": [
152
+ "plot_all = False\n",
153
+ "compute_circular = False # for the circular tests looking at image similarity in clip space (without any brain data involved)\n",
154
+ "saving = True\n",
155
+ "\n",
156
+ "# if running this interactively, can specify jupyter_args here for argparser to use\n",
157
+ "if utils.is_interactive():\n",
158
+ " model_name = f\"sub-001_ses-01_bs24_MST_paul_MSTsplit_random_seed_0\"\n",
159
+ " eval_dir = f\"/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/evals/{model_name}\"\n",
160
+ " if (\"remove\" in model_name and \"random\" in model_name) or \"ses-04\" in model_name:\n",
161
+ " all_recons_path = f\"{eval_dir}/all_recons.pt\"\n",
162
+ " elif \"paul\" in model_name:\n",
163
+ " all_recons_path = f\"evals/{model_name}/{model_name}_all_recons.pt\"\n",
164
+ " else:\n",
165
+ " all_recons_path = f\"{eval_dir}/{model_name}_all_recons.pt\" \n",
166
+ "\n",
167
+ " data_path = \"/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2\"\n",
168
+ " print(\"model_name:\", model_name)\n",
169
+ "\n",
170
+ " jupyter_args = f\"--model_name={model_name} --data_path={data_path} --all_recons_path={all_recons_path} --eval_dir={eval_dir}\"\n",
171
+ " print(jupyter_args)\n",
172
+ " jupyter_args = jupyter_args.split()\n",
173
+ " \n",
174
+ " from IPython.display import clear_output # function to clear print outputs in cell\n",
175
+ " %load_ext autoreload \n",
176
+ " # this allows you to change functions in models.py or utils.py and have this notebook automatically update with your revisions\n",
177
+ " %autoreload 2 "
178
+ ]
179
+ },
180
+ {
181
+ "cell_type": "code",
182
+ "execution_count": 4,
183
+ "id": "fb8120cd-f226-4e2c-a6c5-3cd8ef6e9bc8",
184
+ "metadata": {
185
+ "tags": []
186
+ },
187
+ "outputs": [],
188
+ "source": [
189
+ "parser = argparse.ArgumentParser(description=\"Model Training Configuration\")\n",
190
+ "parser.add_argument(\n",
191
+ " \"--model_name\", type=str, default=\"testing\",\n",
192
+ " help=\"name of model, used for ckpt saving and wandb logging (if enabled)\",\n",
193
+ ")\n",
194
+ "parser.add_argument(\n",
195
+ " \"--data_path\", type=str, default=\"/weka/proj-fmri/shared/mindeyev2_dataset\",\n",
196
+ " help=\"Path to where NSD data is stored / where to download it to\",\n",
197
+ ")\n",
198
+ "parser.add_argument(\n",
199
+ " \"--all_recons_path\", type=str,\n",
200
+ " help=\"Path to where all_recons.pt is stored\",\n",
201
+ ")\n",
202
+ "\n",
203
+ "parser.add_argument(\n",
204
+ " \"--eval_dir\", type=str,\n",
205
+ " help=\"Path to where evaluations should be stored\",\n",
206
+ ")\n",
207
+ "\n",
208
+ "parser.add_argument(\n",
209
+ " \"--seed\",type=int,default=42,\n",
210
+ ")\n",
211
+ "if utils.is_interactive():\n",
212
+ " args = parser.parse_args(jupyter_args)\n",
213
+ "else:\n",
214
+ " args = parser.parse_args()\n",
215
+ "\n",
216
+ "# create global variables without the args prefix\n",
217
+ "for attribute_name in vars(args).keys():\n",
218
+ " globals()[attribute_name] = getattr(args, attribute_name)\n",
219
+ " \n",
220
+ "# seed all random functions\n",
221
+ "utils.seed_everything(seed)"
222
+ ]
223
+ },
224
+ {
225
+ "cell_type": "markdown",
226
+ "id": "95d66b33-b327-4895-a861-ecc6ccc51296",
227
+ "metadata": {
228
+ "tags": []
229
+ },
230
+ "source": [
231
+ "# Evals"
232
+ ]
233
+ },
234
+ {
235
+ "cell_type": "code",
236
+ "execution_count": 5,
237
+ "id": "be66f9c9-f25a-48d9-9e9a-272ab33d20ed",
238
+ "metadata": {
239
+ "tags": []
240
+ },
241
+ "outputs": [
242
+ {
243
+ "name": "stdout",
244
+ "output_type": "stream",
245
+ "text": [
246
+ "torch.Size([100, 3, 256, 256])\n",
247
+ "all_recons_path: evals/sub-001_ses-01_bs24_MST_paul_MSTsplit_random_seed_0/sub-001_ses-01_bs24_MST_paul_MSTsplit_random_seed_0_all_recons.pt\n"
248
+ ]
249
+ }
250
+ ],
251
+ "source": [
252
+ "if (\"remove\" in model_name and \"random\" in model_name) or \"ses-04\" in model_name:\n",
253
+ " all_images = torch.load(f\"{eval_dir}/all_images.pt\")\n",
254
+ " all_clipvoxels = torch.load(f\"{eval_dir}/all_clipvoxels.pt\")\n",
255
+ " all_predcaptions = torch.load(f\"{eval_dir}/all_predcaptions.pt\")\n",
256
+ " all_unrefinedrecons = torch.load(f\"{eval_dir}/all_recons.pt\")\n",
257
+ "elif \"ses-01\" in model_name and \"paul\" in model_name:\n",
258
+ " all_images = torch.load(f\"evals/{model_name}/{model_name}_all_images.pt\")\n",
259
+ " all_clipvoxels = torch.load(f\"evals/{model_name}/{model_name}_all_clipvoxels.pt\")\n",
260
+ " all_predcaptions = torch.load(f\"evals/{model_name}/{model_name}_all_predcaptions.pt\")\n",
261
+ " all_unrefinedrecons = torch.load(f\"evals/{model_name}/{model_name}_all_recons.pt\")\n",
262
+ "else:\n",
263
+ " all_images = torch.load(f\"{eval_dir}/{model_name}_all_images.pt\") \n",
264
+ " all_clipvoxels = torch.load(f\"{eval_dir}/{model_name}_all_clipvoxels.pt\") \n",
265
+ " all_predcaptions = torch.load(f\"{eval_dir}/{model_name}_all_predcaptions.pt\") \n",
266
+ " all_unrefinedrecons = torch.load(f\"{eval_dir}/{model_name}_all_recons.pt\") \n",
267
+ "\n",
268
+ "print(all_images.shape)\n",
269
+ "print(\"all_recons_path:\", all_recons_path)\n",
270
+ "all_recons = torch.load(all_recons_path)\n",
271
+ "\n",
272
+ "# all_blurryrecons = torch.load(f\"{eval_dir}/all_blurryrecons.pt\")"
273
+ ]
274
+ },
275
+ {
276
+ "cell_type": "code",
277
+ "execution_count": 6,
278
+ "id": "4b6c44f9-95ac-4bac-9fbf-d0b31f4f127f",
279
+ "metadata": {},
280
+ "outputs": [],
281
+ "source": [
282
+ "# if \"ses-01\" in model_name:\n",
283
+ "# paul_all_images = torch.load(f\"evals/sub-001_ses-01_bs24_MST_paul_MSTsplit/sub-001_ses-01_bs24_MST_paul_MSTsplit_all_images.pt\").to('cpu')\n",
284
+ "# paul_all_clipvoxels = torch.load(f\"evals/sub-001_ses-01_bs24_MST_paul_MSTsplit/sub-001_ses-01_bs24_MST_paul_MSTsplit_all_clipvoxels.pt\").to('cpu')\n",
285
+ "# paul_all_recons = torch.load(f\"evals/sub-001_ses-01_bs24_MST_paul_MSTsplit/sub-001_ses-01_bs24_MST_paul_MSTsplit_all_recons.pt\").to('cpu')\n",
286
+ "# # paul_all_prior_out = torch.load(f\"evals/sub-001_ses-01_bs24_MST_paul_MSTsplit/sub-001_ses-01_bs24_MST_paul_MSTsplit_all_prior_out.pt\").to('cpu')\n",
287
+ "# # all_images = torch.load(f\"{eval_dir}/all_images.pt\") \n",
288
+ "# print(paul_all_images.shape, all_images.shape)\n",
289
+ "# print(paul_all_clipvoxels.shape, all_clipvoxels.shape)\n",
290
+ "# print(torch.eq(paul_all_clipvoxels, all_clipvoxels))\n",
291
+ "# # assert torch.allclose(paul_all_images, all_images)"
292
+ ]
293
+ },
294
+ {
295
+ "cell_type": "code",
296
+ "execution_count": 7,
297
+ "id": "e8f29ac6-6561-4770-8837-8877488dce05",
298
+ "metadata": {
299
+ "tags": []
300
+ },
301
+ "outputs": [],
302
+ "source": [
303
+ "# for i in range(100):\n",
304
+ "# # print(torch.allclose(paul_all_images[i], all_images[i]))\n",
305
+ "# pass"
306
+ ]
307
+ },
308
+ {
309
+ "cell_type": "code",
310
+ "execution_count": 8,
311
+ "id": "a1382548-4b43-4101-a15b-e67b1225059c",
312
+ "metadata": {},
313
+ "outputs": [],
314
+ "source": [
315
+ "# num_images = paul_all_images.size(0)\n",
316
+ "# rows = 10 # Number of rows for the grid\n",
317
+ "# cols = 10 # Number of columns for the grid\n",
318
+ "\n",
319
+ "# fig, axes = plt.subplots(rows, cols * 2, figsize=(80, 40))\n",
320
+ "\n",
321
+ "# for i in range(num_images):\n",
322
+ "# row = i // cols\n",
323
+ "# col = (i % cols) * 2 # Adjust for side-by-side\n",
324
+ " \n",
325
+ "# # Plot correct image\n",
326
+ "# ax_correct = axes[row, col]\n",
327
+ "# ax_correct.imshow(paul_all_recons[i].permute(1, 2, 0).cpu().numpy())\n",
328
+ "# ax_correct.axis('off')\n",
329
+ "# ax_correct.set_title(f\"Correct {i}\")\n",
330
+ " \n",
331
+ "# # Plot modified image\n",
332
+ "# ax_modified = axes[row, col + 1]\n",
333
+ "# ax_modified.imshow(all_recons[i].permute(1, 2, 0).cpu().numpy())\n",
334
+ "# ax_modified.axis('off')\n",
335
+ "# ax_modified.set_title(f\"Modified {i}\")\n",
336
+ "\n",
337
+ "# plt.tight_layout()\n",
338
+ "# plt.show()"
339
+ ]
340
+ },
341
+ {
342
+ "cell_type": "code",
343
+ "execution_count": 9,
344
+ "id": "4cc551db-85c3-4696-a6fd-52b0092669eb",
345
+ "metadata": {
346
+ "tags": []
347
+ },
348
+ "outputs": [
349
+ {
350
+ "name": "stdout",
351
+ "output_type": "stream",
352
+ "text": [
353
+ "sub-001_ses-01_bs24_MST_paul_MSTsplit_random_seed_0_all_recons.pt\n",
354
+ "torch.Size([100, 3, 256, 256]) torch.Size([100, 3, 256, 256])\n"
355
+ ]
356
+ }
357
+ ],
358
+ "source": [
359
+ "model_name_plus_suffix = all_recons_path.split('/')[-1]\n",
360
+ "print(model_name_plus_suffix)\n",
361
+ "print(all_images.shape, all_recons.shape)"
362
+ ]
363
+ },
364
+ {
365
+ "cell_type": "code",
366
+ "execution_count": 10,
367
+ "id": "22e933c4-1eed-48a7-a22f-84a4606ac253",
368
+ "metadata": {},
369
+ "outputs": [
370
+ {
371
+ "name": "stdout",
372
+ "output_type": "stream",
373
+ "text": [
374
+ "sub-001_ses-01_bs24_MST_paul_MSTsplit_random_seed_0 (50, 2) torch.Size([100, 3, 256, 256]) torch.Size([100, 256, 1664])\n"
375
+ ]
376
+ }
377
+ ],
378
+ "source": [
379
+ "if \"MST\" in model_name:\n",
380
+ " if (\"remove\" in model_name and \"random\" in model_name) or \"ses-04\" in model_name or \"rishab\" in model_name:\n",
381
+ " MST_ID = np.load(f\"{eval_dir}/MST_ID.npy\")\n",
382
+ " MST_pairmate_indices = np.load(f\"{eval_dir}/MST_pairmate_indices.npy\")\n",
383
+ " elif \"paul\" in model_name:\n",
384
+ " MST_ID = np.load(f\"evals/{model_name}/{model_name}_MST_ID.npy\")\n",
385
+ " MST_pairmate_indices = np.array(utils.find_paired_indices(torch.Tensor(MST_ID)))\n",
386
+ " # print(MST_pairmate_indices)\n",
387
+ " else:\n",
388
+ " MST_ID = np.load(f\"{eval_dir}/{model_name}_MST_ID.npy\") \n",
389
+ " MST_pairmate_indices = np.load(f\"{eval_dir}/{model_name}_MST_pairmate_indices.npy\") \n",
390
+ "\n",
391
+ " # pairs = utils.find_paired_indices(torch.Tensor(MST_ID))\n",
392
+ " # if \"close_to_MST\" in model_name or (\"remove\" in model_name and \"random\" in model_name) or \"ses-0\" in model_name:\n",
393
+ " # pairs = np.array(pairs[:-1]) # index out the placeholder\n",
394
+ " # pairs = np.array(pairs)\n",
395
+ " # if \"ses-0\" in model_name:\n",
396
+ " # if \"ses-01\" in model_name or \"ses-04\" in model_name:\n",
397
+ " # print(pairs.shape)\n",
398
+ " # assert pairs.shape == (49,2)\n",
399
+ " # else:\n",
400
+ " # assert pairs.shape == (50,3)\n",
401
+ " # else:\n",
402
+ " # assert pairs.shape == (100,3)\n",
403
+ " # print(pairs)\n",
404
+ " # repeats_in_test = torch.load(f\"{eval_dir}/repeats_in_test.pt\")\n",
405
+ " # test_image_indices = torch.load(f\"{eval_dir}/test_image_indices.pt\")\n",
406
+ " all_unique_images = all_images[MST_pairmate_indices.flatten()]\n",
407
+ " all_unique_clipvoxels = all_clipvoxels[MST_pairmate_indices.flatten()]\n",
408
+ "\n",
409
+ " print(model_name, MST_pairmate_indices.shape, all_unique_images.shape, all_unique_clipvoxels.shape)"
410
+ ]
411
+ },
412
+ {
413
+ "cell_type": "code",
414
+ "execution_count": 11,
415
+ "id": "880081e4-7567-4b1e-acb0-4af863018228",
416
+ "metadata": {
417
+ "tags": []
418
+ },
419
+ "outputs": [],
420
+ "source": [
421
+ "# visualize all unique images\n",
422
+ "if plot_all:\n",
423
+ " # Plot all the MST images and pairmates\n",
424
+ " import textwrap\n",
425
+ " def wrap_title(title, wrap_width):\n",
426
+ " return \"\\n\".join(textwrap.wrap(title, wrap_width))\n",
427
+ "\n",
428
+ " size = int(np.ceil(MST_pairmate_indices.shape[0]/2)) # helps determine size of plot\n",
429
+ " fig, axes = plt.subplots(size, 4, figsize=(15, size*4))\n",
430
+ " jj=-1; kk=0;\n",
431
+ " for i, j in enumerate(all_unique_images):\n",
432
+ " jj+=1\n",
433
+ " axes[kk][jj].imshow(utils.torch_to_Image(j))\n",
434
+ " axes[kk][jj].axis('off')\n",
435
+ " if jj==3: \n",
436
+ " kk+=1; jj=-1\n",
437
+ "\n",
438
+ " fig.tight_layout()\n",
439
+ " # plt.savefig('figures/MST_2_pairmates_10-01')\n",
440
+ " plt.show()"
441
+ ]
442
+ },
443
+ {
444
+ "cell_type": "code",
445
+ "execution_count": 12,
446
+ "id": "48c8772b-871a-4031-b013-d7159bf8b74a",
447
+ "metadata": {
448
+ "tags": []
449
+ },
450
+ "outputs": [],
451
+ "source": [
452
+ "# if plot_all:\n",
453
+ "# # create full grid of recon comparisons\n",
454
+ "# from PIL import Image\n",
455
+ "\n",
456
+ "# imsize = 150\n",
457
+ "# if all_images.shape[-1] != imsize:\n",
458
+ "# all_images = transforms.Resize((imsize,imsize))(all_images).float()\n",
459
+ "# if all_recons.shape[-1] != imsize:\n",
460
+ "# all_recons = transforms.Resize((imsize,imsize))(all_recons).float()\n",
461
+ "\n",
462
+ "# num_images = all_recons.shape[0]\n",
463
+ "# num_rows = (2 * num_images + 9) // 10\n",
464
+ "\n",
465
+ "# # Interleave tensors\n",
466
+ "# merged = torch.stack([val for pair in zip(all_images, all_recons) for val in pair], dim=0)\n",
467
+ "\n",
468
+ "# # Calculate grid size\n",
469
+ "# grid = torch.zeros((num_rows * 10, 3, all_recons.shape[-1], all_recons.shape[-1]))\n",
470
+ "\n",
471
+ "# # Populate the grid\n",
472
+ "# grid[:2*num_images] = merged\n",
473
+ "# grid_images = [transforms.functional.to_pil_image(grid[i]) for i in range(num_rows * 10)]\n",
474
+ "\n",
475
+ "# # Create the grid image\n",
476
+ "# grid_image = Image.new('RGB', (all_recons.shape[-1]*10, all_recons.shape[-1] * num_rows)) # 10 images wide\n",
477
+ "\n",
478
+ "# # Paste images into the grid\n",
479
+ "# for i, img in enumerate(grid_images):\n",
480
+ "# grid_image.paste(img, (all_recons.shape[-1] * (i % 10), all_recons.shape[-1] * (i // 10)))\n",
481
+ "# grid_image\n",
482
+ "# # grid_image.save(f\"{model_name_plus_suffix[:-3]}_1000recons.png\")"
483
+ ]
484
+ },
485
+ {
486
+ "cell_type": "code",
487
+ "execution_count": 13,
488
+ "id": "f42009e9-f910-4f02-8db6-d46778aa6595",
489
+ "metadata": {
490
+ "tags": []
491
+ },
492
+ "outputs": [],
493
+ "source": [
494
+ "imsize = 256\n",
495
+ "if all_images.shape[-1] != imsize:\n",
496
+ " all_images = transforms.Resize((imsize,imsize))(all_images).float()\n",
497
+ "if all_recons.shape[-1] != imsize:\n",
498
+ " all_recons = transforms.Resize((imsize,imsize))(all_recons).float()\n",
499
+ "try:\n",
500
+ " if all_blurryrecons.shape[-1] != imsize:\n",
501
+ " all_blurryrecons = transforms.Resize((imsize,imsize))(all_blurryrecons).float()\n",
502
+ "except:\n",
503
+ " pass\n",
504
+ "\n",
505
+ "if \"enhanced\" in model_name_plus_suffix:\n",
506
+ " try:\n",
507
+ " all_recons = all_recons*.75 + all_blurryrecons*.25\n",
508
+ " print(\"weighted averaging to improve low-level evals\")\n",
509
+ " except:\n",
510
+ " pass"
511
+ ]
512
+ },
513
+ {
514
+ "cell_type": "code",
515
+ "execution_count": 14,
516
+ "id": "06e23174-a777-4dd1-8b73-6783587d8f9c",
517
+ "metadata": {
518
+ "tags": []
519
+ },
520
+ "outputs": [],
521
+ "source": [
522
+ "# visualize some images with recons and captions\n",
523
+ "if plot_all:\n",
524
+ " assert np.all(all_images.shape == all_recons.shape)\n",
525
+ " import textwrap\n",
526
+ " def wrap_title(title, wrap_width):\n",
527
+ " return \"\\n\".join(textwrap.wrap(title, wrap_width))\n",
528
+ "\n",
529
+ " fig, axes = plt.subplots(3, 4, figsize=(10, 8))\n",
530
+ " jj=-1; kk=0;\n",
531
+ " for j in np.array([0,1,2,3,4,5]):\n",
532
+ " jj+=1\n",
533
+ " # print(kk,jj)\n",
534
+ " axes[kk][jj].imshow(utils.torch_to_Image(all_images[j]))\n",
535
+ " axes[kk][jj].axis('off')\n",
536
+ " jj+=1\n",
537
+ " axes[kk][jj].imshow(utils.torch_to_Image(all_recons[j]))\n",
538
+ " axes[kk][jj].axis('off')\n",
539
+ " axes[kk][jj].set_title(wrap_title(str(all_predcaptions[[j]]),wrap_width=30), fontsize=8)\n",
540
+ " if jj==3: \n",
541
+ " kk+=1; jj=-1\n",
542
+ "\n",
543
+ " fig.tight_layout()\n",
544
+ " # plt.savefig('figures/recon_09-26')\n",
545
+ " plt.show()"
546
+ ]
547
+ },
548
+ {
549
+ "cell_type": "markdown",
550
+ "id": "5b4deb53-4d85-4292-92c5-bb59077523cf",
551
+ "metadata": {},
552
+ "source": [
553
+ "# Retrieval eval (chance = 1/100)"
554
+ ]
555
+ },
556
+ {
557
+ "cell_type": "code",
558
+ "execution_count": 15,
559
+ "id": "2e49d57a-9b65-490c-a1e0-61bd05682171",
560
+ "metadata": {
561
+ "tags": []
562
+ },
563
+ "outputs": [
564
+ {
565
+ "name": "stdout",
566
+ "output_type": "stream",
567
+ "text": [
568
+ "overall fwd percent_correct: 0.7100\n",
569
+ "overall bwd percent_correct: 0.7000\n"
570
+ ]
571
+ }
572
+ ],
573
+ "source": [
574
+ "from scipy import stats\n",
575
+ "\n",
576
+ "all_fwd_acc = []\n",
577
+ "all_bwd_acc = []\n",
578
+ "\n",
579
+ "assert len(all_unique_images) == len(all_unique_clipvoxels) \n",
580
+ "\n",
581
+ "all_percent_correct_fwds, all_percent_correct_bwds = [], []\n",
582
+ "\n",
583
+ "with torch.cuda.amp.autocast(dtype=torch.float16):\n",
584
+ " all_emb = clip_img_embedder(all_unique_images.to(device)).float() # CLIP-Image\n",
585
+ " all_emb_ = all_unique_clipvoxels # CLIP-Brain\n",
586
+ "\n",
587
+ " # flatten if necessary\n",
588
+ " all_emb = all_emb.reshape(len(all_emb),-1).to(device)\n",
589
+ " all_emb_ = all_emb_.reshape(len(all_emb_),-1).to(device)\n",
590
+ "\n",
591
+ " # l2norm \n",
592
+ " all_emb = nn.functional.normalize(all_emb,dim=-1)\n",
593
+ " all_emb_ = nn.functional.normalize(all_emb_,dim=-1)\n",
594
+ "\n",
595
+ " all_labels = torch.arange(len(all_emb)).to(device)\n",
596
+ " all_bwd_sim = utils.batchwise_cosine_similarity(all_emb,all_emb_) # clip, brain\n",
597
+ " all_fwd_sim = utils.batchwise_cosine_similarity(all_emb_,all_emb) # brain, clip\n",
598
+ "\n",
599
+ " if \"ses-0\" not in model_name or \"ses-01\" in model_name or \"ses-04\" in model_name:\n",
600
+ " assert len(all_fwd_sim) == 100\n",
601
+ " assert len(all_bwd_sim) == 100\n",
602
+ " else:\n",
603
+ " assert len(all_fwd_sim) == 50\n",
604
+ " assert len(all_bwd_sim) == 50\n",
605
+ " \n",
606
+ " all_percent_correct_fwds = utils.topk(all_fwd_sim, all_labels, k=1).item()\n",
607
+ " all_percent_correct_bwds = utils.topk(all_bwd_sim, all_labels, k=1).item()\n",
608
+ "\n",
609
+ "all_fwd_acc.append(all_percent_correct_fwds)\n",
610
+ "all_bwd_acc.append(all_percent_correct_bwds)\n",
611
+ "\n",
612
+ "all_fwd_sim = np.array(all_fwd_sim.cpu())\n",
613
+ "all_bwd_sim = np.array(all_bwd_sim.cpu())\n",
614
+ "\n",
615
+ "print(f\"overall fwd percent_correct: {all_fwd_acc[0]:.4f}\")\n",
616
+ "print(f\"overall bwd percent_correct: {all_bwd_acc[0]:.4f}\")"
617
+ ]
618
+ },
619
+ {
620
+ "cell_type": "code",
621
+ "execution_count": 16,
622
+ "id": "1a4cda50-ef4a-43d0-9c2e-53ee8509482d",
623
+ "metadata": {
624
+ "tags": []
625
+ },
626
+ "outputs": [],
627
+ "source": [
628
+ "if \"ses-0\" not in model_name:\n",
629
+ " from scipy import stats\n",
630
+ "\n",
631
+ " fwd_acc = []\n",
632
+ " bwd_acc = []\n",
633
+ " fwd_sim_halves = []\n",
634
+ " bwd_sim_halves = []\n",
635
+ "\n",
636
+ " assert len(all_unique_images) == len(all_unique_clipvoxels) \n",
637
+ "\n",
638
+ " for i in range(2): # since this is a 2-session model, we evaluate on the test set corresponding to each session and report both separately for better comparison to single-session models\n",
639
+ " percent_correct_fwds, percent_correct_bwds = [], []\n",
640
+ " # percent_correct_fwd, percent_correct_bwd = None, None\n",
641
+ "\n",
642
+ " if i==0: \n",
643
+ " all_unique_images_half = all_unique_images[:int(len(all_unique_images)/2)]\n",
644
+ " all_unique_clipvoxels_half = all_unique_clipvoxels[:int(len(all_unique_clipvoxels)/2)]\n",
645
+ " elif i==1:\n",
646
+ " all_unique_images_half = all_unique_images[int(len(all_unique_images)/2):]\n",
647
+ " all_unique_clipvoxels_half = all_unique_clipvoxels[int(len(all_unique_clipvoxels)/2):]\n",
648
+ "\n",
649
+ "\n",
650
+ " with torch.cuda.amp.autocast(dtype=torch.float16):\n",
651
+ " emb = clip_img_embedder(all_unique_images_half.to(device)).float() # CLIP-Image\n",
652
+ " emb_ = all_unique_clipvoxels_half # CLIP-Brain\n",
653
+ "\n",
654
+ " # flatten if necessary\n",
655
+ " emb = emb.reshape(len(emb),-1).to(device)\n",
656
+ " emb_ = emb_.reshape(len(emb_),-1).to(device)\n",
657
+ "\n",
658
+ " # l2norm \n",
659
+ " emb = nn.functional.normalize(emb,dim=-1)\n",
660
+ " emb_ = nn.functional.normalize(emb_,dim=-1)\n",
661
+ "\n",
662
+ " labels = torch.arange(len(emb)).to(device)\n",
663
+ " bwd_sim = utils.batchwise_cosine_similarity(emb,emb_) # clip, brain\n",
664
+ " fwd_sim = utils.batchwise_cosine_similarity(emb_,emb) # brain, clip\n",
665
+ "\n",
666
+ " assert len(fwd_sim) == 50\n",
667
+ " assert len(bwd_sim) == 50\n",
668
+ "\n",
669
+ " # percent_correct_fwds = np.append(percent_correct_fwds, utils.topk(fwd_sim, labels, k=1).item())\n",
670
+ " # percent_correct_bwds = np.append(percent_correct_bwds, utils.topk(bwd_sim, labels, k=1).item())\n",
671
+ " percent_correct_fwds = utils.topk(fwd_sim, labels, k=1).item()\n",
672
+ " percent_correct_bwds = utils.topk(bwd_sim, labels, k=1).item()\n",
673
+ "\n",
674
+ " # percent_correct_fwd = np.mean(percent_correct_fwds)\n",
675
+ " # fwd_sd = np.std(percent_correct_fwds) / np.sqrt(len(percent_correct_fwds))\n",
676
+ " # fwd_ci = stats.norm.interval(0.95, loc=percent_correct_fwd, scale=fwd_sd)\n",
677
+ "\n",
678
+ " # percent_correct_bwd = np.mean(percent_correct_bwds)\n",
679
+ " # bwd_sd = np.std(percent_correct_bwds) / np.sqrt(len(percent_correct_bwds))\n",
680
+ " # bwd_ci = stats.norm.interval(0.95, loc=percent_correct_bwd, scale=bwd_sd)\n",
681
+ "\n",
682
+ " fwd_acc.append(percent_correct_fwds)\n",
683
+ " bwd_acc.append(percent_correct_bwds)\n",
684
+ "\n",
685
+ " fwd_sim = np.array(fwd_sim.cpu())\n",
686
+ " bwd_sim = np.array(bwd_sim.cpu())\n",
687
+ " fwd_sim_halves.append(fwd_sim)\n",
688
+ " bwd_sim_halves.append(bwd_sim)\n",
689
+ "\n",
690
+ " print(f\"ses-02 fwd percent_correct: {fwd_acc[0]:.4f}; ses-03 fwd percent_correct: {fwd_acc[1]:.4f}\")\n",
691
+ " print(f\"ses-02 bwd percent_correct: {bwd_acc[0]:.4f}; ses-03 bwd percent_correct: {bwd_acc[1]:.4f} \")"
692
+ ]
693
+ },
694
+ {
695
+ "cell_type": "code",
696
+ "execution_count": 17,
697
+ "id": "40d43a70-e1db-43e5-ae84-40eb1577cf01",
698
+ "metadata": {
699
+ "tags": []
700
+ },
701
+ "outputs": [],
702
+ "source": [
703
+ "if compute_circular:\n",
704
+ " if \"ses-0\" not in model_name: # we're in a multisession model, assumed ses-02 and ses-03 for now\n",
705
+ " fwd_acc_circular = []\n",
706
+ " fwd_sim_halves_circular = []\n",
707
+ "\n",
708
+ " assert len(all_unique_images) == len(all_unique_clipvoxels) \n",
709
+ "\n",
710
+ " for i in range(2): # since this is a 2-session model, we evaluate on the test set corresponding to each session and report both separately for better comparison to single-session models\n",
711
+ " percent_correct_fwds_circular = []\n",
712
+ " # percent_correct_fwd_circular = None\n",
713
+ "\n",
714
+ " if i==0: \n",
715
+ " all_unique_images_half_circular = all_unique_images[:int(len(all_unique_images)/2)]\n",
716
+ " all_unique_clipvoxels_half_circular = all_unique_clipvoxels[:int(len(all_unique_clipvoxels)/2)]\n",
717
+ " elif i==1:\n",
718
+ " all_unique_images_half_circular = all_unique_images[int(len(all_unique_images)/2):]\n",
719
+ " all_unique_clipvoxels_half_circular = all_unique_clipvoxels[int(len(all_unique_clipvoxels)/2):]\n",
720
+ "\n",
721
+ " with torch.cuda.amp.autocast(dtype=torch.float16):\n",
722
+ " emb_circular = clip_img_embedder(all_unique_images_half_circular.to(device)).float() # CLIP-Image\n",
723
+ "\n",
724
+ " # flatten if necessary\n",
725
+ " emb_circular = emb_circular.reshape(len(emb_circular),-1).to(device)\n",
726
+ "\n",
727
+ " # l2norm \n",
728
+ " emb_circular = nn.functional.normalize(emb_circular,dim=-1)\n",
729
+ "\n",
730
+ " labels_circular = torch.arange(len(emb_circular)).to(device)\n",
731
+ " fwd_sim_circular = utils.batchwise_cosine_similarity(emb_circular,emb_circular) # clip, clip\n",
732
+ "\n",
733
+ "\n",
734
+ " if \"ses-0\" in model_name:\n",
735
+ " assert len(fwd_sim_circular) == 25\n",
736
+ " else:\n",
737
+ " assert len(fwd_sim_circular) == 50\n",
738
+ "\n",
739
+ " # percent_correct_fwds_circular = np.append(percent_correct_fwds_circular, utils.topk(fwd_sim_circular, labels_circular, k=1).item())\n",
740
+ " percent_correct_fwds_circular = utils.topk(fwd_sim_circular, labels_circular, k=1).item()\n",
741
+ "\n",
742
+ "\n",
743
+ " # percent_correct_fwd_circular = np.mean(percent_correct_fwds_circular)\n",
744
+ " # fwd_sd_circular = np.std(percent_correct_fwds_circular) / np.sqrt(len(percent_correct_fwds_circular))\n",
745
+ " # fwd_ci_circular = stats.norm.interval(0.95, loc=percent_correct_fwd_circular, scale=fwd_sd_circular)\n",
746
+ "\n",
747
+ " fwd_acc_circular.append(percent_correct_fwds_circular)\n",
748
+ "\n",
749
+ " fwd_sim_circular = np.array(fwd_sim_circular.cpu())\n",
750
+ " fwd_sim_halves_circular.append(fwd_sim_circular)\n",
751
+ "\n",
752
+ " print(f\"ses-02 fwd percent_correct: {fwd_acc_circular[0]:.4f}; ses-03 fwd percent_correct: {fwd_acc_circular[1]:.4f}\")\n",
753
+ " \n",
754
+ " else: # single session model\n",
755
+ " fwd_acc_circular = []\n",
756
+ "\n",
757
+ " assert len(all_unique_images) == len(all_unique_clipvoxels) \n",
758
+ "\n",
759
+ " percent_correct_fwds_circular = []\n",
760
+ " # percent_correct_fwd_circular = None\n",
761
+ "\n",
762
+ " with torch.cuda.amp.autocast(dtype=torch.float16):\n",
763
+ " emb_circular = clip_img_embedder(all_unique_images.to(device)).float() # CLIP-Image\n",
764
+ "\n",
765
+ " # flatten if necessary\n",
766
+ " emb_circular = emb_circular.reshape(len(emb_circular),-1).to(device)\n",
767
+ "\n",
768
+ " # l2norm \n",
769
+ " emb_circular = nn.functional.normalize(emb_circular,dim=-1)\n",
770
+ "\n",
771
+ " labels_circular = torch.arange(len(emb_circular)).to(device)\n",
772
+ " fwd_sim_circular = utils.batchwise_cosine_similarity(emb_circular,emb_circular) # clip, clip\n",
773
+ "\n",
774
+ "\n",
775
+ " if \"ses-01\" in model_name:\n",
776
+ " assert len(fwd_sim_circular) == 100\n",
777
+ " else:\n",
778
+ " assert len(fwd_sim_circular) == 50\n",
779
+ "\n",
780
+ " # percent_correct_fwds_circular = np.append(percent_correct_fwds_circular, utils.topk(fwd_sim_circular, labels_circular, k=1).item())\n",
781
+ " percent_correct_fwds_circular = utils.topk(fwd_sim_circular, labels_circular, k=1).item()\n",
782
+ "\n",
783
+ "\n",
784
+ " # percent_correct_fwd_circular = np.mean(percent_correct_fwds_circular)\n",
785
+ " # fwd_sd_circular = np.std(percent_correct_fwds_circular) / np.sqrt(len(percent_correct_fwds_circular))\n",
786
+ " # fwd_ci_circular = stats.norm.interval(0.95, loc=percent_correct_fwd_circular, scale=fwd_sd_circular)\n",
787
+ "\n",
788
+ " fwd_acc_circular.append(percent_correct_fwds_circular)\n",
789
+ "\n",
790
+ " fwd_sim_circular = np.array(fwd_sim_circular.cpu())\n",
791
+ "\n",
792
+ " print(f\"session fwd percent_correct (circular): {fwd_acc_circular[0]:.4f}\")"
793
+ ]
794
+ },
795
+ {
796
+ "cell_type": "code",
797
+ "execution_count": 18,
798
+ "id": "d086c99a-c712-4ac7-b625-a0656c9ee886",
799
+ "metadata": {
800
+ "tags": []
801
+ },
802
+ "outputs": [],
803
+ "source": [
804
+ "if compute_circular:\n",
805
+ " # print(utils.topk(torch.Tensor(fwd_sim_halves[1]).to(device), labels, k=1).item())\n",
806
+ " # ses02_top1 = torch.argsort(torch.Tensor(fwd_sim_halves[0]).to(device),axis=1)[:,-1] == labels # from utils.topk()\n",
807
+ " # ses03_top1 = torch.argsort(torch.Tensor(fwd_sim_halves[1]).to(device),axis=1)[:,-1] == labels\n",
808
+ " # top1_results = torch.cat((ses02_top1, ses03_top1))\n",
809
+ " # incorrect_idx = torch.argwhere(top1_results == False)[:,0]\n",
810
+ " # print(incorrect_idx)\n",
811
+ "\n",
812
+ " # confirm that the lure is behind the target 80% of the time in CLIP last hidden layer embeddings\n",
813
+ "\n",
814
+ " use_fwd_sim = False\n",
815
+ " use_first_half = False\n",
816
+ "\n",
817
+ " all_top_sims = [] # len(fwd_sim_halves[0]); for each image, contains the similarity to the top-n choices until it gets the correct answer. If len 1, top-1 is correct\n",
818
+ " all_pairmate_sims = [] # len(fwd_sim_halves[0]); the similarity of each image to its pairmate \n",
819
+ " all_chose_lures = [] # len(fwd_sim_halves[0]); True for each top-n choice if the lure was predicted to be more similar to the target \n",
820
+ " if \"ses-0\" not in model_name:\n",
821
+ " sim_halves = fwd_sim_halves if use_fwd_sim else bwd_sim_halves\n",
822
+ " sim_halves = sim_halves[0] if use_first_half else sim_halves[1]\n",
823
+ " else:\n",
824
+ " sim_halves = all_fwd_sim if use_fwd_sim else all_bwd_sim\n",
825
+ " for i, img in enumerate(sim_halves):\n",
826
+ " if i%2==0:\n",
827
+ " idx_to_pairmate = 1\n",
828
+ " elif i%2==1:\n",
829
+ " idx_to_pairmate = -1\n",
830
+ "\n",
831
+ " order = img.argsort()[::-1]\n",
832
+ " # print(order)\n",
833
+ " top_sim = []\n",
834
+ " chose_lure = []\n",
835
+ " for idx in order:\n",
836
+ " sim = img[idx]\n",
837
+ " pairmate_sim = img[i+idx_to_pairmate]\n",
838
+ " top_sim.append(sim) \n",
839
+ " chose_lure.append((idx, sim <= pairmate_sim))\n",
840
+ " # print(i, idx, img[idx], img[i+idx_to_pairmate])\n",
841
+ " if idx == i:\n",
842
+ " break\n",
843
+ "\n",
844
+ " all_top_sims.append(top_sim)\n",
845
+ " all_pairmate_sims.append(pairmate_sim)\n",
846
+ " all_chose_lures.append(chose_lure)\n",
847
+ "\n",
848
+ " # print(all_top_sims)\n",
849
+ " # print()\n",
850
+ " # print(all_pairmate_sims)\n",
851
+ " # print()\n",
852
+ " # print(all_chose_lures)\n",
853
+ "\n",
854
+ " where_chose_pairmate = []\n",
855
+ " for idx, i in enumerate(all_chose_lures):\n",
856
+ " for value in i:\n",
857
+ " # print(value[1])\n",
858
+ " if value[1] == True:\n",
859
+ " # print(idx, i, end='\\n')\n",
860
+ " where_chose_pairmate.append(idx)\n",
861
+ " break\n",
862
+ "\n",
863
+ " # where_chose_pairmate # trials where the pairmate was chosen ahead of the target"
864
+ ]
865
+ },
866
+ {
867
+ "cell_type": "code",
868
+ "execution_count": 19,
869
+ "id": "a5c4405c-b3b7-42fe-b622-f4cb81500464",
870
+ "metadata": {
871
+ "tags": []
872
+ },
873
+ "outputs": [],
874
+ "source": [
875
+ "# top-n predictions using CLIP brain embeddings\n",
876
+ "\n",
877
+ "if plot_all:\n",
878
+ " use_fwd_sim = True\n",
879
+ " top_n = 10 # how many of the top n images to display\n",
880
+ " print(\"Given Brain embedding, find correct Image embedding\")\n",
881
+ " fig, ax = plt.subplots(nrows=len(all_unique_images), ncols=top_n+1, figsize=(top_n*2,len(all_unique_images)*2))\n",
882
+ " for trial in range(len(all_unique_images)):\n",
883
+ " ax[trial, 0].imshow(utils.torch_to_Image(all_unique_images[trial]))\n",
884
+ " ax[trial, 0].set_title(\"original\\nimage\")\n",
885
+ " ax[trial, 0].axis(\"off\")\n",
886
+ " for attempt in range(top_n):\n",
887
+ " if trial < 50:\n",
888
+ " if \"ses-0\" not in model_name:\n",
889
+ " sim_half = fwd_sim_halves[0] if use_fwd_sim else bwd_sim_halves[0]\n",
890
+ " unique_imgs_to_plot = all_unique_images[:int(len(all_unique_images)/2)]\n",
891
+ " # unique_clipvoxels_to_plot = all_unique_clipvoxels[:int(len(all_unique_clipvoxels)/2)]\n",
892
+ " else:\n",
893
+ " sim_half = all_fwd_sim if use_fwd_sim else all_bwd_sim\n",
894
+ " unique_imgs_to_plot = all_unique_images\n",
895
+ " # unique_clipvoxels_to_plot = all_unique_clipvoxels\n",
896
+ " which = np.flip(np.argsort(sim_half[trial]))[attempt]\n",
897
+ "\n",
898
+ " elif trial >= 50:\n",
899
+ " if \"ses-0\" not in model_name:\n",
900
+ " sim_halves = fwd_sim_halves[1] if use_fwd_sim else bwd_sim_halves[1]\n",
901
+ " unique_imgs_to_plot = all_unique_images[int(len(all_unique_images)/2):]\n",
902
+ " # unique_clipvoxels_to_plot = all_unique_clipvoxels[int(len(all_unique_clipvoxels)/2):]\n",
903
+ " else:\n",
904
+ " sim_halves = all_fwd_sim if use_fwd_sim else all_bwd_sim\n",
905
+ " unique_imgs_to_plot = all_unique_images\n",
906
+ " # unique_clipvoxels_to_plot = all_unique_clipvoxels\n",
907
+ " which = np.flip(np.argsort(sim_half[trial-50]))[attempt]\n",
908
+ "\n",
909
+ " ax[trial, attempt+1].imshow(utils.torch_to_Image(unique_imgs_to_plot[which]))\n",
910
+ " ax[trial, attempt+1].set_title(f\"Top {attempt+1}\")\n",
911
+ " ax[trial, attempt+1].axis(\"off\")\n",
912
+ " fig.tight_layout()\n",
913
+ " # plt.savefig('figures/retrieval_top10')\n",
914
+ " plt.show()"
915
+ ]
916
+ },
917
+ {
918
+ "cell_type": "code",
919
+ "execution_count": 20,
920
+ "id": "05df72ce-3cdb-4ad2-a790-0835a41fb0f6",
921
+ "metadata": {
922
+ "tags": []
923
+ },
924
+ "outputs": [],
925
+ "source": [
926
+ "# similarity of each unique MST image to all others using CLIP image embeddings only (top-1 is guaranteed to be correct)\n",
927
+ "# uses last hidden layer (which may not match as well as the last layer to human semantic judgments)\n",
928
+ "if plot_all and compute_circular:\n",
929
+ " print(\"Given Brain embedding, find correct Image embedding\")\n",
930
+ " top_n = 10 # how many of the top n images to display\n",
931
+ " fig, ax = plt.subplots(nrows=len(all_unique_images), ncols=top_n+1, figsize=(top_n*2,len(all_unique_images)*2))\n",
932
+ " for trial in range(len(all_unique_images)):\n",
933
+ " ax[trial, 0].imshow(utils.torch_to_Image(all_unique_images[trial]))\n",
934
+ " ax[trial, 0].set_title(\"original\\nimage\")\n",
935
+ " ax[trial, 0].axis(\"off\")\n",
936
+ " for attempt in range(10):\n",
937
+ " if trial < 50:\n",
938
+ " if \"ses-0\" not in model_name:\n",
939
+ " sim_half_circular = fwd_sim_halves_circular[0]\n",
940
+ " unique_imgs_to_plot_circular = all_unique_images[:int(len(all_unique_images)/2)]\n",
941
+ " else:\n",
942
+ " sim_half_circular = fwd_sim_circular\n",
943
+ " unique_imgs_to_plot_circular = all_unique_images\n",
944
+ " which_circular = np.flip(np.argsort(sim_half_circular[trial]))[attempt]\n",
945
+ "\n",
946
+ " elif trial >= 50:\n",
947
+ " if \"ses-0\" not in model_name:\n",
948
+ " sim_halves_circular = fwd_sim_halves_circular[1]\n",
949
+ " unique_imgs_to_plot_circular = all_unique_images[int(len(all_unique_images)/2):]\n",
950
+ " else:\n",
951
+ " sim_halves_circular = all_fwd_sim_circular\n",
952
+ " unique_imgs_to_plot_circular = all_unique_images\n",
953
+ " which_circular = np.flip(np.argsort(sim_half_circular[trial-50]))[attempt]\n",
954
+ "\n",
955
+ " ax[trial, attempt+1].imshow(utils.torch_to_Image(unique_imgs_to_plot_circular[which_circular]))\n",
956
+ " ax[trial, attempt+1].set_title(f\"Top {attempt+1}\")\n",
957
+ " ax[trial, attempt+1].axis(\"off\")\n",
958
+ " fig.tight_layout()\n",
959
+ " # plt.savefig('figures/circular_top10')\n",
960
+ " plt.show()"
961
+ ]
962
+ },
963
+ {
964
+ "cell_type": "markdown",
965
+ "id": "0d404dec-5336-45cf-8932-833e895a9ebe",
966
+ "metadata": {
967
+ "tags": []
968
+ },
969
+ "source": [
970
+ "## MST Paired Retrieval (chance = 50%)"
971
+ ]
972
+ },
973
+ {
974
+ "cell_type": "code",
975
+ "execution_count": 21,
976
+ "id": "06667f7d-a356-4c30-8e1b-06ebd7ff1752",
977
+ "metadata": {},
978
+ "outputs": [],
979
+ "source": [
980
+ "if compute_circular:\n",
981
+ " all_top_sims_circular = [] # len(fwd_sim_halves[0]); for each image, contains the similarity to the top-n choices until it gets the correct answer. If len 1, top-1 is correct\n",
982
+ " all_pairmate_sims_circular = [] # len(fwd_sim_halves[0]); the similarity of each image to its pairmate \n",
983
+ " all_chose_lures_circular = [] # len(fwd_sim_halves[0]); True for each top-n choice if the lure was predicted to be more similar to the target \n",
984
+ "\n",
985
+ " if \"ses-0\" not in model_name:\n",
986
+ " first_half = True\n",
987
+ " sim_halves_circular = fwd_sim_halves_circular\n",
988
+ " sim_halves_circular = sim_halves_circular[0] if use_first_half else sim_halves_circular[1]\n",
989
+ " else:\n",
990
+ " sim_halves_circular = all_fwd_sim_circular\n",
991
+ " for i, img in enumerate(sim_halves_circular):\n",
992
+ " if i%2==0:\n",
993
+ " idx_to_pairmate = 1\n",
994
+ " elif i%2==1:\n",
995
+ " idx_to_pairmate = -1\n",
996
+ "\n",
997
+ " order_circular = img.argsort()[::-1]\n",
998
+ " # print(order)\n",
999
+ " top_sim_circular = []\n",
1000
+ " chose_lure_circular = []\n",
1001
+ " for idx in order_circular:\n",
1002
+ " sim_circular = img[idx]\n",
1003
+ " pairmate_sim_circular = img[i+idx_to_pairmate]\n",
1004
+ " top_sim_circular.append(sim_circular) \n",
1005
+ " chose_lure_circular.append((idx, sim_circular <= pairmate_sim_circular))\n",
1006
+ " # print(i, idx, img[idx], img[i+idx_to_pairmate])\n",
1007
+ " if idx == i:\n",
1008
+ " break\n",
1009
+ "\n",
1010
+ " all_top_sims_circular.append(top_sim_circular)\n",
1011
+ " all_pairmate_sims_circular.append(pairmate_sim_circular)\n",
1012
+ " all_chose_lures_circular.append(chose_lure_circular)\n",
1013
+ "\n",
1014
+ " bot_half = (all_pairmate_sims_circular < np.median(all_pairmate_sims_circular))[::2] # every other one since the sims are symmetric\n",
1015
+ " top_half = (all_pairmate_sims_circular > np.median(all_pairmate_sims_circular))[::2]\n",
1016
+ "\n",
1017
+ " binary_acc = []\n",
1018
+ " for i,(a,b) in enumerate(tqdm(MST_pairmate_indices,total=len(MST_pairmate_indices))):\n",
1019
+ " # print(i,a,b)\n",
1020
+ " with torch.no_grad():\n",
1021
+ " with torch.cuda.amp.autocast():\n",
1022
+ " emb_a = nn.functional.normalize(clip_img_embedder(all_images[[a]].to(device)).float().flatten(1),dim=-1)\n",
1023
+ " emb_b = nn.functional.normalize(clip_img_embedder(all_images[[b]].to(device)).float().flatten(1),dim=-1)\n",
1024
+ " emb_v = nn.functional.normalize(all_clipvoxels[[a]].flatten(1),dim=-1).to(device)\n",
1025
+ "\n",
1026
+ " a_sim = utils.pairwise_cosine_similarity(emb_v, emb_a).item()\n",
1027
+ " b_sim = utils.pairwise_cosine_similarity(emb_v, emb_b).item()\n",
1028
+ "\n",
1029
+ " binary_acc.append(a_sim > b_sim)\n",
1030
+ "\n",
1031
+ " with torch.no_grad():\n",
1032
+ " with torch.cuda.amp.autocast():\n",
1033
+ " emb_a = nn.functional.normalize(clip_img_embedder(all_images[[a]].to(device)).float().flatten(1),dim=-1)\n",
1034
+ " emb_b = nn.functional.normalize(clip_img_embedder(all_images[[b]].to(device)).float().flatten(1),dim=-1)\n",
1035
+ " emb_v = nn.functional.normalize(all_clipvoxels[[b]].flatten(1),dim=-1).to(device)\n",
1036
+ "\n",
1037
+ " a_sim = utils.pairwise_cosine_similarity(emb_v, emb_a).item()\n",
1038
+ " b_sim = utils.pairwise_cosine_similarity(emb_v, emb_b).item()\n",
1039
+ "\n",
1040
+ " binary_acc.append(a_sim < b_sim)\n",
1041
+ "\n",
1042
+ " assert len(binary_acc) == 50\n",
1043
+ " mst_score = np.mean(binary_acc)\n",
1044
+ " print(f\"session score: {np.mean(binary_acc):.4f} ± {np.std(binary_acc):.4f}\")\n"
1045
+ ]
1046
+ },
1047
+ {
1048
+ "cell_type": "code",
1049
+ "execution_count": 22,
1050
+ "id": "3468021c-660d-4205-8e5d-d75f6f3c2881",
1051
+ "metadata": {},
1052
+ "outputs": [],
1053
+ "source": [
1054
+ "# test = np.sort(MST_pairmate_indices, axis=1)\n",
1055
+ "# test"
1056
+ ]
1057
+ },
1058
+ {
1059
+ "cell_type": "code",
1060
+ "execution_count": 23,
1061
+ "id": "a032c4cc-4cf9-4b33-ac02-0e569f7757be",
1062
+ "metadata": {},
1063
+ "outputs": [],
1064
+ "source": [
1065
+ "# model_name"
1066
+ ]
1067
+ },
1068
+ {
1069
+ "cell_type": "code",
1070
+ "execution_count": 24,
1071
+ "id": "130bb4d9-b7f1-40a4-a3e2-7e3699c69c49",
1072
+ "metadata": {},
1073
+ "outputs": [],
1074
+ "source": [
1075
+ "# paul_all_images = torch.load(f\"evals/sub-001_ses-01_bs24_MST_paul_MSTsplit/sub-001_ses-01_bs24_MST_paul_MSTsplit_all_images.pt\").to('cpu')\n",
1076
+ "# paul_all_clipvoxels = torch.load(f\"evals/sub-001_ses-01_bs24_MST_paul_MSTsplit/sub-001_ses-01_bs24_MST_paul_MSTsplit_all_clipvoxels.pt\").to('cpu')\n",
1077
+ "# paul_all_recons = torch.load(f\"evals/sub-001_ses-01_bs24_MST_paul_MSTsplit/sub-001_ses-01_bs24_MST_paul_MSTsplit_all_recons.pt\").to('cpu')\n",
1078
+ "# paul_all_prior_out = torch.load(f\"evals/sub-001_ses-01_bs24_MST_paul_MSTsplit/sub-001_ses-01_bs24_MST_paul_MSTsplit_all_prior_out.pt\").to('cpu')\n",
1079
+ "# print(paul_all_images.shape, all_images.shape)\n",
1080
+ "# # assert torch.eq(paul_all_images, all_images)"
1081
+ ]
1082
+ },
1083
+ {
1084
+ "cell_type": "code",
1085
+ "execution_count": 25,
1086
+ "id": "698ffc2c-5653-49d3-9d16-38a32f9e7f6b",
1087
+ "metadata": {
1088
+ "tags": []
1089
+ },
1090
+ "outputs": [],
1091
+ "source": [
1092
+ "# print(paul_all_images.shape, all_images.shape)\n",
1093
+ "# torch.eq(paul_all_images, all_images)"
1094
+ ]
1095
+ },
1096
+ {
1097
+ "cell_type": "code",
1098
+ "execution_count": 26,
1099
+ "id": "23f26846-3170-4c91-a8e3-f98322e90dc0",
1100
+ "metadata": {
1101
+ "tags": []
1102
+ },
1103
+ "outputs": [
1104
+ {
1105
+ "name": "stdout",
1106
+ "output_type": "stream",
1107
+ "text": [
1108
+ "assuming single session\n"
1109
+ ]
1110
+ },
1111
+ {
1112
+ "name": "stderr",
1113
+ "output_type": "stream",
1114
+ "text": [
1115
+ "100%|██████████| 50/50 [00:04<00:00, 10.23it/s]"
1116
+ ]
1117
+ },
1118
+ {
1119
+ "name": "stdout",
1120
+ "output_type": "stream",
1121
+ "text": [
1122
+ "session score: 0.8900 ± 0.3129\n"
1123
+ ]
1124
+ },
1125
+ {
1126
+ "name": "stderr",
1127
+ "output_type": "stream",
1128
+ "text": [
1129
+ "\n"
1130
+ ]
1131
+ }
1132
+ ],
1133
+ "source": [
1134
+ "if \"MST\" in model_name:\n",
1135
+ " if \"ses-0\" not in model_name:\n",
1136
+ " print('assuming ses-02+ses-03 multisession model')\n",
1137
+ " mst_score = []\n",
1138
+ " for half in range(2):\n",
1139
+ " binary_acc = []\n",
1140
+ " if half==0:\n",
1141
+ " MST_pairmate_indices_half = MST_pairmate_indices[:int(len(MST_pairmate_indices)/2)]\n",
1142
+ " elif half==1:\n",
1143
+ " MST_pairmate_indices_half = MST_pairmate_indices[int(len(MST_pairmate_indices)/2):]\n",
1144
+ " for i,(a,b) in enumerate(tqdm(MST_pairmate_indices_half,total=len(MST_pairmate_indices_half))):\n",
1145
+ " # print(i,a,b)\n",
1146
+ " with torch.no_grad():\n",
1147
+ " with torch.cuda.amp.autocast():\n",
1148
+ " emb_a = nn.functional.normalize(clip_img_embedder(all_images[[a]].to(device)).float().flatten(1),dim=-1)\n",
1149
+ " emb_b = nn.functional.normalize(clip_img_embedder(all_images[[b]].to(device)).float().flatten(1),dim=-1)\n",
1150
+ " emb_v = nn.functional.normalize(all_clipvoxels[[a]].flatten(1),dim=-1).to(device)\n",
1151
+ "\n",
1152
+ " a_sim = utils.pairwise_cosine_similarity(emb_v, emb_a).item()\n",
1153
+ " b_sim = utils.pairwise_cosine_similarity(emb_v, emb_b).item()\n",
1154
+ "\n",
1155
+ " binary_acc.append(a_sim > b_sim)\n",
1156
+ "\n",
1157
+ " with torch.no_grad():\n",
1158
+ " with torch.cuda.amp.autocast():\n",
1159
+ " emb_a = nn.functional.normalize(clip_img_embedder(all_images[[a]].to(device)).float().flatten(1),dim=-1)\n",
1160
+ " emb_b = nn.functional.normalize(clip_img_embedder(all_images[[b]].to(device)).float().flatten(1),dim=-1)\n",
1161
+ " emb_v = nn.functional.normalize(all_clipvoxels[[b]].flatten(1),dim=-1).to(device)\n",
1162
+ "\n",
1163
+ " a_sim = utils.pairwise_cosine_similarity(emb_v, emb_a).item()\n",
1164
+ " b_sim = utils.pairwise_cosine_similarity(emb_v, emb_b).item()\n",
1165
+ "\n",
1166
+ " binary_acc.append(a_sim < b_sim)\n",
1167
+ "\n",
1168
+ " assert len(binary_acc) == 50 # don't want to average across both sessions; make sure it resets\n",
1169
+ " print(f\"ses-0{half+2} score: {np.mean(binary_acc):.4f} ± {np.std(binary_acc):.4f}\")\n",
1170
+ " mst_score.append((np.mean(binary_acc),np.std(binary_acc)))\n",
1171
+ "\n",
1172
+ " # print(mst_score)\n",
1173
+ " else:\n",
1174
+ " print('assuming single session')\n",
1175
+ " binary_acc = []\n",
1176
+ " for i,(a,b) in enumerate(tqdm(MST_pairmate_indices,total=len(MST_pairmate_indices))):\n",
1177
+ " # print(i,a,b)\n",
1178
+ " with torch.no_grad():\n",
1179
+ " with torch.cuda.amp.autocast():\n",
1180
+ " emb_a = nn.functional.normalize(clip_img_embedder(all_images[[a]].to(device)).float().flatten(1),dim=-1)\n",
1181
+ " emb_b = nn.functional.normalize(clip_img_embedder(all_images[[b]].to(device)).float().flatten(1),dim=-1)\n",
1182
+ " emb_v = nn.functional.normalize(all_clipvoxels[[a]].flatten(1),dim=-1).to(device)\n",
1183
+ "\n",
1184
+ " a_sim = utils.pairwise_cosine_similarity(emb_v, emb_a).item()\n",
1185
+ " b_sim = utils.pairwise_cosine_similarity(emb_v, emb_b).item()\n",
1186
+ "\n",
1187
+ " binary_acc.append(a_sim > b_sim)\n",
1188
+ "\n",
1189
+ " with torch.no_grad():\n",
1190
+ " with torch.cuda.amp.autocast():\n",
1191
+ " emb_a = nn.functional.normalize(clip_img_embedder(all_images[[a]].to(device)).float().flatten(1),dim=-1)\n",
1192
+ " emb_b = nn.functional.normalize(clip_img_embedder(all_images[[b]].to(device)).float().flatten(1),dim=-1)\n",
1193
+ " emb_v = nn.functional.normalize(all_clipvoxels[[b]].flatten(1),dim=-1).to(device)\n",
1194
+ "\n",
1195
+ " a_sim = utils.pairwise_cosine_similarity(emb_v, emb_a).item()\n",
1196
+ " b_sim = utils.pairwise_cosine_similarity(emb_v, emb_b).item()\n",
1197
+ "\n",
1198
+ " binary_acc.append(a_sim < b_sim)\n",
1199
+ " \n",
1200
+ " # assert len(binary_acc) == 50\n",
1201
+ " mst_score = np.mean(binary_acc)\n",
1202
+ " print(f\"session score: {np.mean(binary_acc):.4f} ± {np.std(binary_acc):.4f}\")\n"
1203
+ ]
1204
+ },
1205
+ {
1206
+ "cell_type": "markdown",
1207
+ "id": "0a26e124-2444-434d-a399-d03c2c90cc08",
1208
+ "metadata": {},
1209
+ "source": [
1210
+ "## 2-way identification"
1211
+ ]
1212
+ },
1213
+ {
1214
+ "cell_type": "code",
1215
+ "execution_count": 27,
1216
+ "id": "3e1778ff-5d6a-4087-b59f-0f44b9e0eada",
1217
+ "metadata": {},
1218
+ "outputs": [],
1219
+ "source": [
1220
+ "from torchvision.models.feature_extraction import create_feature_extractor, get_graph_node_names\n",
1221
+ "\n",
1222
+ "@torch.no_grad()\n",
1223
+ "def two_way_identification(all_recons, all_images, model, preprocess, feature_layer=None, return_avg=True):\n",
1224
+ " preds = model(torch.stack([preprocess(recon) for recon in all_recons], dim=0).to(device))\n",
1225
+ " reals = model(torch.stack([preprocess(indiv) for indiv in all_images], dim=0).to(device))\n",
1226
+ " if feature_layer is None:\n",
1227
+ " preds = preds.float().flatten(1).cpu().numpy()\n",
1228
+ " reals = reals.float().flatten(1).cpu().numpy()\n",
1229
+ " else:\n",
1230
+ " preds = preds[feature_layer].float().flatten(1).cpu().numpy()\n",
1231
+ " reals = reals[feature_layer].float().flatten(1).cpu().numpy()\n",
1232
+ "\n",
1233
+ " r = np.corrcoef(reals, preds)\n",
1234
+ " r = r[:len(all_images), len(all_images):]\n",
1235
+ " congruents = np.diag(r)\n",
1236
+ "\n",
1237
+ " success = r < congruents\n",
1238
+ " success_cnt = np.sum(success, 0)\n",
1239
+ "\n",
1240
+ " if return_avg:\n",
1241
+ " perf = np.mean(success_cnt) / (len(all_images)-1)\n",
1242
+ " return perf\n",
1243
+ " else:\n",
1244
+ " return success_cnt, len(all_images)-1\n",
1245
+ " \n",
1246
+ "all_recons=all_recons.to(device)\n",
1247
+ "all_images=all_images.to(device)"
1248
+ ]
1249
+ },
1250
+ {
1251
+ "cell_type": "markdown",
1252
+ "id": "df6be966-52ef-4cf6-8078-8d2d9617564b",
1253
+ "metadata": {},
1254
+ "source": [
1255
+ "## PixCorr"
1256
+ ]
1257
+ },
1258
+ {
1259
+ "cell_type": "code",
1260
+ "execution_count": 28,
1261
+ "id": "2e17ea38-a254-4e90-a910-711734fdd8eb",
1262
+ "metadata": {},
1263
+ "outputs": [
1264
+ {
1265
+ "name": "stderr",
1266
+ "output_type": "stream",
1267
+ "text": [
1268
+ "/home/ri4541/.conda/envs/rt_mindEye2/lib/python3.11/site-packages/torchvision/transforms/functional.py:1603: UserWarning: The default value of the antialias parameter of all the resizing transforms (Resize(), RandomResizedCrop(), etc.) will change from None to True in v0.17, in order to be consistent across the PIL and Tensor backends. To suppress this warning, directly pass antialias=True (recommended, future default), antialias=None (current default, which means False for Tensors and True for PIL), or antialias=False (only works on Tensors - PIL will still use antialiasing). This also applies if you are using the inference transforms from the models weights: update the call to weights.transforms(antialias=True).\n",
1269
+ " warnings.warn(\n"
1270
+ ]
1271
+ },
1272
+ {
1273
+ "name": "stdout",
1274
+ "output_type": "stream",
1275
+ "text": [
1276
+ "torch.Size([100, 541875])\n",
1277
+ "torch.Size([100, 541875])\n"
1278
+ ]
1279
+ },
1280
+ {
1281
+ "name": "stderr",
1282
+ "output_type": "stream",
1283
+ "text": [
1284
+ "100%|██████████| 100/100 [00:00<00:00, 775.96it/s]"
1285
+ ]
1286
+ },
1287
+ {
1288
+ "name": "stdout",
1289
+ "output_type": "stream",
1290
+ "text": [
1291
+ "0.19027211547192407\n"
1292
+ ]
1293
+ },
1294
+ {
1295
+ "name": "stderr",
1296
+ "output_type": "stream",
1297
+ "text": [
1298
+ "\n"
1299
+ ]
1300
+ }
1301
+ ],
1302
+ "source": [
1303
+ "preprocess = transforms.Compose([\n",
1304
+ " transforms.Resize(425, interpolation=transforms.InterpolationMode.BILINEAR),\n",
1305
+ "])\n",
1306
+ "\n",
1307
+ "# Flatten images while keeping the batch dimension\n",
1308
+ "all_images_flattened = preprocess(all_images).reshape(len(all_images), -1).cpu()\n",
1309
+ "all_recons_flattened = preprocess(all_recons).view(len(all_recons), -1).cpu()\n",
1310
+ "\n",
1311
+ "print(all_images_flattened.shape)\n",
1312
+ "print(all_recons_flattened.shape)\n",
1313
+ "\n",
1314
+ "corr_stack = []\n",
1315
+ "\n",
1316
+ "corrsum = 0\n",
1317
+ "for i in tqdm(range(len(all_images))):\n",
1318
+ " corrcoef = np.corrcoef(all_images_flattened[i], all_recons_flattened[i])[0][1]\n",
1319
+ " if np.isnan(corrcoef):\n",
1320
+ " print(\"WARNING: CORRCOEF WAS NAN\")\n",
1321
+ " corrcoef = 0\n",
1322
+ " corrsum += corrcoef\n",
1323
+ " corr_stack.append(corrcoef)\n",
1324
+ "corrmean = corrsum / len(all_images)\n",
1325
+ "\n",
1326
+ "pixcorr = corrmean\n",
1327
+ "print(pixcorr)"
1328
+ ]
1329
+ },
1330
+ {
1331
+ "cell_type": "code",
1332
+ "execution_count": 29,
1333
+ "id": "7e2cd891-db44-475d-a8c6-ab345eaa58f8",
1334
+ "metadata": {},
1335
+ "outputs": [],
1336
+ "source": [
1337
+ "# print(all_images.shape)\n",
1338
+ "# print(all_images_flattened.shape)\n",
1339
+ "# print(all_recons.shape)\n",
1340
+ "# print(all_recons_flattened.shape)\n",
1341
+ "# len(all_images)"
1342
+ ]
1343
+ },
1344
+ {
1345
+ "cell_type": "markdown",
1346
+ "id": "7a556d5b-33a2-44aa-b48d-4b168316bbdd",
1347
+ "metadata": {
1348
+ "tags": []
1349
+ },
1350
+ "source": [
1351
+ "## SSIM"
1352
+ ]
1353
+ },
1354
+ {
1355
+ "cell_type": "code",
1356
+ "execution_count": 30,
1357
+ "id": "2326fc4c-1248-4d0f-9176-218c6460f285",
1358
+ "metadata": {},
1359
+ "outputs": [
1360
+ {
1361
+ "name": "stderr",
1362
+ "output_type": "stream",
1363
+ "text": [
1364
+ "/home/ri4541/.conda/envs/rt_mindEye2/lib/python3.11/site-packages/torchvision/transforms/functional.py:1603: UserWarning: The default value of the antialias parameter of all the resizing transforms (Resize(), RandomResizedCrop(), etc.) will change from None to True in v0.17, in order to be consistent across the PIL and Tensor backends. To suppress this warning, directly pass antialias=True (recommended, future default), antialias=None (current default, which means False for Tensors and True for PIL), or antialias=False (only works on Tensors - PIL will still use antialiasing). This also applies if you are using the inference transforms from the models weights: update the call to weights.transforms(antialias=True).\n",
1365
+ " warnings.warn(\n"
1366
+ ]
1367
+ },
1368
+ {
1369
+ "name": "stdout",
1370
+ "output_type": "stream",
1371
+ "text": [
1372
+ "converted, now calculating ssim...\n"
1373
+ ]
1374
+ },
1375
+ {
1376
+ "name": "stderr",
1377
+ "output_type": "stream",
1378
+ "text": [
1379
+ "100%|██████████| 100/100 [00:00<00:00, 120.46it/s]"
1380
+ ]
1381
+ },
1382
+ {
1383
+ "name": "stdout",
1384
+ "output_type": "stream",
1385
+ "text": [
1386
+ "0.3447461015516486\n"
1387
+ ]
1388
+ },
1389
+ {
1390
+ "name": "stderr",
1391
+ "output_type": "stream",
1392
+ "text": [
1393
+ "\n"
1394
+ ]
1395
+ }
1396
+ ],
1397
+ "source": [
1398
+ "# see https://github.com/zijin-gu/meshconv-decoding/issues/3\n",
1399
+ "from skimage.color import rgb2gray\n",
1400
+ "from skimage.metrics import structural_similarity as ssim\n",
1401
+ "\n",
1402
+ "preprocess = transforms.Compose([\n",
1403
+ " transforms.Resize(425, interpolation=transforms.InterpolationMode.BILINEAR), \n",
1404
+ "])\n",
1405
+ "\n",
1406
+ "# convert image to grayscale with rgb2grey\n",
1407
+ "img_gray = rgb2gray(preprocess(all_images).permute((0,2,3,1)).cpu())\n",
1408
+ "recon_gray = rgb2gray(preprocess(all_recons).permute((0,2,3,1)).cpu())\n",
1409
+ "print(\"converted, now calculating ssim...\")\n",
1410
+ "\n",
1411
+ "ssim_score=[]\n",
1412
+ "for im,rec in tqdm(zip(img_gray,recon_gray),total=len(all_images)):\n",
1413
+ " ssim_score.append(ssim(rec, im, multichannel=True, gaussian_weights=True, sigma=1.5, use_sample_covariance=False, data_range=1.0))\n",
1414
+ "\n",
1415
+ "ssim = np.mean(ssim_score)\n",
1416
+ "print(ssim)"
1417
+ ]
1418
+ },
1419
+ {
1420
+ "cell_type": "markdown",
1421
+ "id": "35138520-ec00-48a6-90dc-249a32a783d2",
1422
+ "metadata": {},
1423
+ "source": [
1424
+ "## AlexNet"
1425
+ ]
1426
+ },
1427
+ {
1428
+ "cell_type": "code",
1429
+ "execution_count": 31,
1430
+ "id": "3b45cc6c-ab80-43e2-b446-c8fcb4fc54e4",
1431
+ "metadata": {},
1432
+ "outputs": [
1433
+ {
1434
+ "name": "stderr",
1435
+ "output_type": "stream",
1436
+ "text": [
1437
+ "/home/ri4541/.conda/envs/rt_mindEye2/lib/python3.11/site-packages/torch/overrides.py:110: UserWarning: 'has_cuda' is deprecated, please use 'torch.backends.cuda.is_built()'\n",
1438
+ " torch.has_cuda,\n",
1439
+ "/home/ri4541/.conda/envs/rt_mindEye2/lib/python3.11/site-packages/torch/overrides.py:111: UserWarning: 'has_cudnn' is deprecated, please use 'torch.backends.cudnn.is_available()'\n",
1440
+ " torch.has_cudnn,\n",
1441
+ "/home/ri4541/.conda/envs/rt_mindEye2/lib/python3.11/site-packages/torch/overrides.py:117: UserWarning: 'has_mps' is deprecated, please use 'torch.backends.mps.is_built()'\n",
1442
+ " torch.has_mps,\n",
1443
+ "/home/ri4541/.conda/envs/rt_mindEye2/lib/python3.11/site-packages/torch/overrides.py:118: UserWarning: 'has_mkldnn' is deprecated, please use 'torch.backends.mkldnn.is_available()'\n",
1444
+ " torch.has_mkldnn,\n"
1445
+ ]
1446
+ },
1447
+ {
1448
+ "name": "stdout",
1449
+ "output_type": "stream",
1450
+ "text": [
1451
+ "\n",
1452
+ "---early, AlexNet(2)---\n",
1453
+ "2-way Percent Correct: 0.8010\n",
1454
+ "\n",
1455
+ "---mid, AlexNet(5)---\n",
1456
+ "2-way Percent Correct: 0.8542\n"
1457
+ ]
1458
+ }
1459
+ ],
1460
+ "source": [
1461
+ "from torchvision.models import alexnet, AlexNet_Weights\n",
1462
+ "alex_weights = AlexNet_Weights.IMAGENET1K_V1\n",
1463
+ "\n",
1464
+ "alex_model = create_feature_extractor(alexnet(weights=alex_weights), return_nodes=['features.4','features.11']).to(device)\n",
1465
+ "alex_model.eval().requires_grad_(False).to(device)\n",
1466
+ "\n",
1467
+ "# see alex_weights.transforms()\n",
1468
+ "preprocess = transforms.Compose([\n",
1469
+ " transforms.Resize(256, interpolation=transforms.InterpolationMode.BILINEAR),\n",
1470
+ " transforms.Normalize(mean=[0.485, 0.456, 0.406],\n",
1471
+ " std=[0.229, 0.224, 0.225]),\n",
1472
+ "])\n",
1473
+ "\n",
1474
+ "layer = 'early, AlexNet(2)'\n",
1475
+ "print(f\"\\n---{layer}---\")\n",
1476
+ "all_per_correct = two_way_identification(all_recons, all_images, \n",
1477
+ " alex_model, preprocess, 'features.4')\n",
1478
+ "alexnet2 = np.mean(all_per_correct)\n",
1479
+ "print(f\"2-way Percent Correct: {alexnet2:.4f}\")\n",
1480
+ "\n",
1481
+ "layer = 'mid, AlexNet(5)'\n",
1482
+ "print(f\"\\n---{layer}---\")\n",
1483
+ "all_per_correct = two_way_identification(all_recons, all_images, \n",
1484
+ " alex_model, preprocess, 'features.11')\n",
1485
+ "alexnet5 = np.mean(all_per_correct)\n",
1486
+ "print(f\"2-way Percent Correct: {alexnet5:.4f}\")"
1487
+ ]
1488
+ },
1489
+ {
1490
+ "cell_type": "markdown",
1491
+ "id": "c296bab2-d106-469e-b997-b32d21a2cf01",
1492
+ "metadata": {},
1493
+ "source": [
1494
+ "## InceptionV3"
1495
+ ]
1496
+ },
1497
+ {
1498
+ "cell_type": "code",
1499
+ "execution_count": 32,
1500
+ "id": "5a9c1b2b-af2a-476d-a1ac-32ee915ac2ec",
1501
+ "metadata": {},
1502
+ "outputs": [
1503
+ {
1504
+ "name": "stderr",
1505
+ "output_type": "stream",
1506
+ "text": [
1507
+ "/home/ri4541/.conda/envs/rt_mindEye2/lib/python3.11/site-packages/torchvision/models/feature_extraction.py:174: UserWarning: NOTE: The nodes obtained by tracing the model in eval mode are a subsequence of those obtained in train mode. When choosing nodes for feature extraction, you may need to specify output nodes for train and eval mode separately.\n",
1508
+ " warnings.warn(msg + suggestion_msg)\n"
1509
+ ]
1510
+ },
1511
+ {
1512
+ "name": "stdout",
1513
+ "output_type": "stream",
1514
+ "text": [
1515
+ "2-way Percent Correct: 0.7205\n"
1516
+ ]
1517
+ }
1518
+ ],
1519
+ "source": [
1520
+ "from torchvision.models import inception_v3, Inception_V3_Weights\n",
1521
+ "weights = Inception_V3_Weights.DEFAULT\n",
1522
+ "inception_model = create_feature_extractor(inception_v3(weights=weights), \n",
1523
+ " return_nodes=['avgpool']).to(device)\n",
1524
+ "inception_model.eval().requires_grad_(False).to(device)\n",
1525
+ "\n",
1526
+ "# see weights.transforms()\n",
1527
+ "preprocess = transforms.Compose([\n",
1528
+ " transforms.Resize(342, interpolation=transforms.InterpolationMode.BILINEAR),\n",
1529
+ " transforms.Normalize(mean=[0.485, 0.456, 0.406],\n",
1530
+ " std=[0.229, 0.224, 0.225]),\n",
1531
+ "])\n",
1532
+ "\n",
1533
+ "all_per_correct = two_way_identification(all_recons, all_images,\n",
1534
+ " inception_model, preprocess, 'avgpool')\n",
1535
+ " \n",
1536
+ "inception = np.mean(all_per_correct)\n",
1537
+ "print(f\"2-way Percent Correct: {inception:.4f}\")"
1538
+ ]
1539
+ },
1540
+ {
1541
+ "cell_type": "markdown",
1542
+ "id": "d7a25f7f-8298-4413-b512-8a1173413e07",
1543
+ "metadata": {},
1544
+ "source": [
1545
+ "## CLIP"
1546
+ ]
1547
+ },
1548
+ {
1549
+ "cell_type": "code",
1550
+ "execution_count": 33,
1551
+ "id": "6afbf7ce-8793-4988-a328-a632acd88aa9",
1552
+ "metadata": {},
1553
+ "outputs": [
1554
+ {
1555
+ "name": "stdout",
1556
+ "output_type": "stream",
1557
+ "text": [
1558
+ "2-way Percent Correct: 0.7387\n"
1559
+ ]
1560
+ }
1561
+ ],
1562
+ "source": [
1563
+ "import clip\n",
1564
+ "clip_model, preprocess = clip.load(\"ViT-L/14\", device=device)\n",
1565
+ "\n",
1566
+ "preprocess = transforms.Compose([\n",
1567
+ " transforms.Resize(224, interpolation=transforms.InterpolationMode.BILINEAR),\n",
1568
+ " transforms.Normalize(mean=[0.48145466, 0.4578275, 0.40821073],\n",
1569
+ " std=[0.26862954, 0.26130258, 0.27577711]),\n",
1570
+ "])\n",
1571
+ "\n",
1572
+ "all_per_correct = two_way_identification(all_recons, all_images,\n",
1573
+ " clip_model.encode_image, preprocess, None) # final layer\n",
1574
+ "clip_ = np.mean(all_per_correct)\n",
1575
+ "print(f\"2-way Percent Correct: {clip_:.4f}\")"
1576
+ ]
1577
+ },
1578
+ {
1579
+ "cell_type": "markdown",
1580
+ "id": "e4fed9f8-ef1a-4c6d-a83f-2a934b6e87fd",
1581
+ "metadata": {},
1582
+ "source": [
1583
+ "## Efficient Net"
1584
+ ]
1585
+ },
1586
+ {
1587
+ "cell_type": "code",
1588
+ "execution_count": 34,
1589
+ "id": "14143c0f-1b32-43ef-98d8-8ed458df4551",
1590
+ "metadata": {},
1591
+ "outputs": [
1592
+ {
1593
+ "name": "stdout",
1594
+ "output_type": "stream",
1595
+ "text": [
1596
+ "Distance: 0.8597363629795817\n"
1597
+ ]
1598
+ }
1599
+ ],
1600
+ "source": [
1601
+ "import scipy as sp\n",
1602
+ "from torchvision.models import efficientnet_b1, EfficientNet_B1_Weights\n",
1603
+ "weights = EfficientNet_B1_Weights.DEFAULT\n",
1604
+ "eff_model = create_feature_extractor(efficientnet_b1(weights=weights), \n",
1605
+ " return_nodes=['avgpool'])\n",
1606
+ "eff_model.eval().requires_grad_(False).to(device)\n",
1607
+ "\n",
1608
+ "# see weights.transforms()\n",
1609
+ "preprocess = transforms.Compose([\n",
1610
+ " transforms.Resize(255, interpolation=transforms.InterpolationMode.BILINEAR),\n",
1611
+ " transforms.Normalize(mean=[0.485, 0.456, 0.406],\n",
1612
+ " std=[0.229, 0.224, 0.225]),\n",
1613
+ "])\n",
1614
+ "\n",
1615
+ "gt = eff_model(preprocess(all_images))['avgpool']\n",
1616
+ "gt = gt.reshape(len(gt),-1).cpu().numpy()\n",
1617
+ "fake = eff_model(preprocess(all_recons))['avgpool']\n",
1618
+ "fake = fake.reshape(len(fake),-1).cpu().numpy()\n",
1619
+ "\n",
1620
+ "effnet_nomean = np.array([sp.spatial.distance.correlation(gt[i],fake[i]) for i in range(len(gt))])\n",
1621
+ "effnet = effnet_nomean.mean()\n",
1622
+ "print(\"Distance:\",effnet)"
1623
+ ]
1624
+ },
1625
+ {
1626
+ "cell_type": "markdown",
1627
+ "id": "405f669d-cab7-4c75-90cd-651283f65a9e",
1628
+ "metadata": {},
1629
+ "source": [
1630
+ "## SwAV"
1631
+ ]
1632
+ },
1633
+ {
1634
+ "cell_type": "code",
1635
+ "execution_count": 35,
1636
+ "id": "4c60b0c4-79fe-4cff-95e9-99733c821e67",
1637
+ "metadata": {},
1638
+ "outputs": [
1639
+ {
1640
+ "name": "stderr",
1641
+ "output_type": "stream",
1642
+ "text": [
1643
+ "Using cache found in /home/ri4541/.cache/torch/hub/facebookresearch_swav_main\n",
1644
+ "/home/ri4541/.conda/envs/rt_mindEye2/lib/python3.11/site-packages/torchvision/models/_utils.py:208: UserWarning: The parameter 'pretrained' is deprecated since 0.13 and may be removed in the future, please use 'weights' instead.\n",
1645
+ " warnings.warn(\n",
1646
+ "/home/ri4541/.conda/envs/rt_mindEye2/lib/python3.11/site-packages/torchvision/models/_utils.py:223: UserWarning: Arguments other than a weight enum or `None` for 'weights' are deprecated since 0.13 and may be removed in the future. The current behavior is equivalent to passing `weights=None`.\n",
1647
+ " warnings.warn(msg)\n"
1648
+ ]
1649
+ },
1650
+ {
1651
+ "name": "stdout",
1652
+ "output_type": "stream",
1653
+ "text": [
1654
+ "Distance: 0.5120611452768613\n"
1655
+ ]
1656
+ }
1657
+ ],
1658
+ "source": [
1659
+ "swav_model = torch.hub.load('facebookresearch/swav:main', 'resnet50')\n",
1660
+ "swav_model = create_feature_extractor(swav_model, \n",
1661
+ " return_nodes=['avgpool'])\n",
1662
+ "swav_model.eval().requires_grad_(False).to(device)\n",
1663
+ "\n",
1664
+ "preprocess = transforms.Compose([\n",
1665
+ " transforms.Resize(224, interpolation=transforms.InterpolationMode.BILINEAR),\n",
1666
+ " transforms.Normalize(mean=[0.485, 0.456, 0.406],\n",
1667
+ " std=[0.229, 0.224, 0.225]),\n",
1668
+ "])\n",
1669
+ "\n",
1670
+ "gt = swav_model(preprocess(all_images))['avgpool']\n",
1671
+ "gt = gt.reshape(len(gt),-1).cpu().numpy()\n",
1672
+ "fake = swav_model(preprocess(all_recons))['avgpool']\n",
1673
+ "fake = fake.reshape(len(fake),-1).cpu().numpy()\n",
1674
+ "\n",
1675
+ "swav_nomean = np.array([sp.spatial.distance.correlation(gt[i],fake[i]) for i in range(len(gt))])\n",
1676
+ "swav = swav_nomean.mean()\n",
1677
+ "print(\"Distance:\",swav)"
1678
+ ]
1679
+ },
1680
+ {
1681
+ "cell_type": "code",
1682
+ "execution_count": 36,
1683
+ "id": "8ebeeff0-49ab-4bf5-9e12-dd45eb7ff71f",
1684
+ "metadata": {
1685
+ "tags": []
1686
+ },
1687
+ "outputs": [
1688
+ {
1689
+ "name": "stdout",
1690
+ "output_type": "stream",
1691
+ "text": [
1692
+ " 0\n",
1693
+ " metrics\n",
1694
+ " 0.80101\n",
1695
+ " 0.854242\n",
1696
+ " 0.720505\n",
1697
+ " 0.738687\n",
1698
+ " 0.859736\n",
1699
+ " 0.512061\n",
1700
+ " 0.71\n",
1701
+ " 0.7\n",
1702
+ " 0.89\n"
1703
+ ]
1704
+ },
1705
+ {
1706
+ "ename": "FileNotFoundError",
1707
+ "evalue": "[Errno 2] No such file or directory: '/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/evals/sub-001_ses-01_bs24_MST_paul_MSTsplit_random_seed_0/final_evals'",
1708
+ "output_type": "error",
1709
+ "traceback": [
1710
+ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
1711
+ "\u001b[0;31mFileNotFoundError\u001b[0m Traceback (most recent call last)",
1712
+ "Cell \u001b[0;32mIn[36], line 13\u001b[0m\n\u001b[1;32m 11\u001b[0m final_evals_path \u001b[38;5;241m=\u001b[39m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;132;01m{\u001b[39;00meval_dir\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m/final_evals\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 12\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m saving:\n\u001b[0;32m---> 13\u001b[0m \u001b[38;5;28;01mwith\u001b[39;00m \u001b[38;5;28;43mopen\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43mfinal_evals_path\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43mw\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[43m)\u001b[49m \u001b[38;5;28;01mas\u001b[39;00m f:\n\u001b[1;32m 14\u001b[0m f\u001b[38;5;241m.\u001b[39mwrite(df)\n\u001b[1;32m 15\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124m'\u001b[39m\u001b[38;5;124msaved final evals!\u001b[39m\u001b[38;5;124m'\u001b[39m)\n",
1713
+ "File \u001b[0;32m~/.conda/envs/rt_mindEye2/lib/python3.11/site-packages/IPython/core/interactiveshell.py:324\u001b[0m, in \u001b[0;36m_modified_open\u001b[0;34m(file, *args, **kwargs)\u001b[0m\n\u001b[1;32m 317\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m file \u001b[38;5;129;01min\u001b[39;00m {\u001b[38;5;241m0\u001b[39m, \u001b[38;5;241m1\u001b[39m, \u001b[38;5;241m2\u001b[39m}:\n\u001b[1;32m 318\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\n\u001b[1;32m 319\u001b[0m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mIPython won\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mt let you open fd=\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mfile\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m by default \u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 320\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mas it is likely to crash IPython. If you know what you are doing, \u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 321\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124myou can use builtins\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124m open.\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 322\u001b[0m )\n\u001b[0;32m--> 324\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mio_open\u001b[49m\u001b[43m(\u001b[49m\u001b[43mfile\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
1714
+ "\u001b[0;31mFileNotFoundError\u001b[0m: [Errno 2] No such file or directory: '/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/evals/sub-001_ses-01_bs24_MST_paul_MSTsplit_random_seed_0/final_evals'"
1715
+ ]
1716
+ }
1717
+ ],
1718
+ "source": [
1719
+ "#[pixcorr, ssim, alexnet2, alexnet5, inception, clip_, effnet, swav, percent_correct_fwd, percent_correct_bwd]\n",
1720
+ "import pandas as pd\n",
1721
+ "# pd.options.display.float_format = '{:.2%}'.format\n",
1722
+ "# pd.reset_option('all')\n",
1723
+ "if \"ses-0\" not in model_name:\n",
1724
+ " df = pd.DataFrame([\"metrics\", alexnet2, alexnet5, inception, clip_, effnet, swav, fwd_acc, bwd_acc, mst_score]).to_string(index=False)\n",
1725
+ "else:\n",
1726
+ " df = pd.DataFrame([\"metrics\", alexnet2, alexnet5, inception, clip_, effnet, swav, all_fwd_acc[0], all_bwd_acc[0], mst_score]).to_string(index=False)\n",
1727
+ "print(df)\n",
1728
+ "# print(model_name_plus_suffix)\n",
1729
+ "final_evals_path = f\"{eval_dir}/final_evals\"\n",
1730
+ "if saving:\n",
1731
+ " with open(final_evals_path, 'w') as f:\n",
1732
+ " f.write(df)\n",
1733
+ " print('saved final evals!')"
1734
+ ]
1735
+ },
1736
+ {
1737
+ "cell_type": "code",
1738
+ "execution_count": null,
1739
+ "id": "0446fb2a-fd3f-451f-b9b2-38aa95a2be8d",
1740
+ "metadata": {},
1741
+ "outputs": [],
1742
+ "source": [
1743
+ "with open(final_evals_path, 'r') as f:\n",
1744
+ " for line in f:\n",
1745
+ " print(line, end='')\n"
1746
+ ]
1747
+ },
1748
+ {
1749
+ "cell_type": "code",
1750
+ "execution_count": null,
1751
+ "id": "fd74e27c-6d79-4d49-bcc9-f6b85fadc752",
1752
+ "metadata": {},
1753
+ "outputs": [],
1754
+ "source": []
1755
+ }
1756
+ ],
1757
+ "metadata": {
1758
+ "kernelspec": {
1759
+ "display_name": "rt_mindEye2 [~/.conda/envs/rt_mindEye2/]",
1760
+ "language": "python",
1761
+ "name": "conda_rt_mindeye2"
1762
+ },
1763
+ "language_info": {
1764
+ "codemirror_mode": {
1765
+ "name": "ipython",
1766
+ "version": 3
1767
+ },
1768
+ "file_extension": ".py",
1769
+ "mimetype": "text/x-python",
1770
+ "name": "python",
1771
+ "nbconvert_exporter": "python",
1772
+ "pygments_lexer": "ipython3",
1773
+ "version": "3.11.7"
1774
+ }
1775
+ },
1776
+ "nbformat": 4,
1777
+ "nbformat_minor": 5
1778
+ }
final_evaluations_sdxl_turbo.ipynb ADDED
@@ -0,0 +1,1837 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 39,
6
+ "id": "7d5f265e-407a-40bd-92fb-a652091fd7ea",
7
+ "metadata": {
8
+ "tags": []
9
+ },
10
+ "outputs": [
11
+ {
12
+ "name": "stdout",
13
+ "output_type": "stream",
14
+ "text": [
15
+ "importing modules\n",
16
+ "LOCAL RANK 0\n",
17
+ "PID of this process = 73671\n",
18
+ "device: cuda\n",
19
+ "Distributed environment: DistributedType.NO\n",
20
+ "Num processes: 1\n",
21
+ "Process index: 0\n",
22
+ "Local process index: 0\n",
23
+ "Device: cuda\n",
24
+ "\n",
25
+ "Mixed precision type: fp16\n",
26
+ "\n",
27
+ "distributed = False num_devices = 1 local rank = 0 world size = 1\n"
28
+ ]
29
+ }
30
+ ],
31
+ "source": [
32
+ "print('importing modules')\n",
33
+ "import os\n",
34
+ "import sys\n",
35
+ "import json\n",
36
+ "import argparse\n",
37
+ "import numpy as np\n",
38
+ "import math\n",
39
+ "from einops import rearrange\n",
40
+ "import time\n",
41
+ "import random\n",
42
+ "import string\n",
43
+ "import h5py\n",
44
+ "from tqdm import tqdm\n",
45
+ "import webdataset as wds\n",
46
+ "\n",
47
+ "import matplotlib.pyplot as plt\n",
48
+ "import torch\n",
49
+ "import torch.nn as nn\n",
50
+ "from torchvision import transforms\n",
51
+ "from accelerate import Accelerator, DeepSpeedPlugin\n",
52
+ "\n",
53
+ "from generative_models.sgm.modules.encoders.modules import FrozenOpenCLIPImageEmbedder\n",
54
+ "from models import GNet8_Encoder\n",
55
+ "\n",
56
+ "# tf32 data type is faster than standard float32\n",
57
+ "torch.backends.cuda.matmul.allow_tf32 = True\n",
58
+ "\n",
59
+ "# custom functions #\n",
60
+ "import utils\n",
61
+ "\n",
62
+ "### Multi-GPU config ###\n",
63
+ "local_rank = os.getenv('RANK')\n",
64
+ "if local_rank is None: \n",
65
+ " local_rank = 0\n",
66
+ "else:\n",
67
+ " local_rank = int(local_rank)\n",
68
+ "print(\"LOCAL RANK \", local_rank) \n",
69
+ "\n",
70
+ "accelerator = Accelerator(split_batches=False, mixed_precision=\"fp16\") # ['no', 'fp8', 'fp16', 'bf16']\n",
71
+ "\n",
72
+ "print(\"PID of this process =\",os.getpid())\n",
73
+ "device = accelerator.device\n",
74
+ "print(\"device:\",device)\n",
75
+ "world_size = accelerator.state.num_processes\n",
76
+ "distributed = not accelerator.state.distributed_type == 'NO'\n",
77
+ "num_devices = torch.cuda.device_count()\n",
78
+ "if num_devices==0 or not distributed: num_devices = 1\n",
79
+ "num_workers = num_devices\n",
80
+ "print(accelerator.state)\n",
81
+ "\n",
82
+ "print(\"distributed =\",distributed, \"num_devices =\", num_devices, \"local rank =\", local_rank, \"world size =\", world_size)\n",
83
+ "print = accelerator.print # only print if local_rank=0"
84
+ ]
85
+ },
86
+ {
87
+ "cell_type": "code",
88
+ "execution_count": 40,
89
+ "id": "1b8e8d9e-2931-4546-a2ce-a7417bbe21f4",
90
+ "metadata": {
91
+ "tags": []
92
+ },
93
+ "outputs": [
94
+ {
95
+ "name": "stdout",
96
+ "output_type": "stream",
97
+ "text": [
98
+ "<utils.CLIPEncoder object at 0x7f56edac0710>\n"
99
+ ]
100
+ }
101
+ ],
102
+ "source": [
103
+ "# Load embedding model (last hidden layer)\n",
104
+ "# try:\n",
105
+ "# print(clip_img_embedder)\n",
106
+ "# except:\n",
107
+ "# clip_img_embedder = FrozenOpenCLIPImageEmbedder(\n",
108
+ "# arch=\"ViT-bigG-14\",\n",
109
+ "# version=\"laion2b_s39b_b160k\",\n",
110
+ "# output_tokens=True,\n",
111
+ "# only_tokens=True,\n",
112
+ "# )\n",
113
+ "# clip_img_embedder.to(device)\n",
114
+ "# clip_seq_dim = 256\n",
115
+ "# clip_emb_dim = 1664\n",
116
+ "\n",
117
+ "from utils import CLIPEncoder\n",
118
+ "\n",
119
+ "try:\n",
120
+ " print(clip_img_embedder)\n",
121
+ "except:\n",
122
+ " clip_img_embedder = CLIPEncoder(\n",
123
+ " model_name=\"ViT-H-14\",\n",
124
+ " pretrained=\"laion2b_s32b_b79k\",\n",
125
+ " precision=\"fp32\",\n",
126
+ " batch_size=20,\n",
127
+ " device=\"cuda\"\n",
128
+ " )\n",
129
+ "\n",
130
+ "clip_seq_dim = 1\n",
131
+ "clip_emb_dim = 1024\n",
132
+ "\n",
133
+ "\n",
134
+ "## Load embedding model (last layer)\n",
135
+ "# clip_img_embedder = FrozenOpenCLIPImageEmbedder(\n",
136
+ "# arch=\"ViT-bigG-14\",\n",
137
+ "# version=\"laion2b_s39b_b160k\",\n",
138
+ "# output_tokens=False,\n",
139
+ "# only_tokens=False,\n",
140
+ "# )\n",
141
+ "# clip_img_embedder.to(device)\n",
142
+ "# clip_seq_dim = 1\n",
143
+ "# clip_emb_dim = 1280"
144
+ ]
145
+ },
146
+ {
147
+ "cell_type": "code",
148
+ "execution_count": 41,
149
+ "id": "1ffb659a-8154-4536-ab27-2d976da1bf4e",
150
+ "metadata": {
151
+ "tags": []
152
+ },
153
+ "outputs": [
154
+ {
155
+ "name": "stdout",
156
+ "output_type": "stream",
157
+ "text": [
158
+ "model_name: testing_MST_ViT-H_1024_scalf\n",
159
+ "--model_name=testing_MST_ViT-H_1024_scalf --data_path=/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2 --all_recons_path=/home/ubuntu/real_time_mindEye2/evals/testing_MST_ViT-H_1024_scalf/testing_MST_ViT-H_1024_scalf_all_recons.pt --eval_dir=/home/ubuntu/real_time_mindEye2/evals/testing_MST_ViT-H_1024_scalf\n",
160
+ "The autoreload extension is already loaded. To reload it, use:\n",
161
+ " %reload_ext autoreload\n"
162
+ ]
163
+ }
164
+ ],
165
+ "source": [
166
+ "plot_all = False\n",
167
+ "compute_circular = False # for the circular tests looking at image similarity in clip space (without any brain data involved)\n",
168
+ "saving = True\n",
169
+ "\n",
170
+ "# if running this interactively, can specify jupyter_args here for argparser to use\n",
171
+ "if utils.is_interactive():\n",
172
+ " model_name = f\"testing_MST_ViT-H_1024_scalf\"\n",
173
+ " eval_dir = f\"/home/ubuntu/real_time_mindEye2/evals/{model_name}\" # f\"/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/evals/{model_name}\"\n",
174
+ " if (\"remove\" in model_name and \"random\" in model_name) or \"ses-04\" in model_name:\n",
175
+ " all_recons_path = f\"{eval_dir}/all_recons.pt\"\n",
176
+ " elif \"paul\" in model_name:\n",
177
+ " all_recons_path = f\"evals/{model_name}/{model_name}_all_recons.pt\"\n",
178
+ " else:\n",
179
+ " all_recons_path = f\"{eval_dir}/{model_name}_all_recons.pt\" \n",
180
+ "\n",
181
+ " data_path = \"/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2\"\n",
182
+ " print(\"model_name:\", model_name)\n",
183
+ "\n",
184
+ " jupyter_args = f\"--model_name={model_name} --data_path={data_path} --all_recons_path={all_recons_path} --eval_dir={eval_dir}\"\n",
185
+ " print(jupyter_args)\n",
186
+ " jupyter_args = jupyter_args.split()\n",
187
+ " \n",
188
+ " from IPython.display import clear_output # function to clear print outputs in cell\n",
189
+ " %load_ext autoreload \n",
190
+ " # this allows you to change functions in models.py or utils.py and have this notebook automatically update with your revisions\n",
191
+ " %autoreload 2 "
192
+ ]
193
+ },
194
+ {
195
+ "cell_type": "code",
196
+ "execution_count": 42,
197
+ "id": "fb8120cd-f226-4e2c-a6c5-3cd8ef6e9bc8",
198
+ "metadata": {
199
+ "tags": []
200
+ },
201
+ "outputs": [],
202
+ "source": [
203
+ "parser = argparse.ArgumentParser(description=\"Model Training Configuration\")\n",
204
+ "parser.add_argument(\n",
205
+ " \"--model_name\", type=str, default=\"testing\",\n",
206
+ " help=\"name of model, used for ckpt saving and wandb logging (if enabled)\",\n",
207
+ ")\n",
208
+ "parser.add_argument(\n",
209
+ " \"--data_path\", type=str, default=\"/weka/proj-fmri/shared/mindeyev2_dataset\",\n",
210
+ " help=\"Path to where NSD data is stored / where to download it to\",\n",
211
+ ")\n",
212
+ "parser.add_argument(\n",
213
+ " \"--all_recons_path\", type=str,\n",
214
+ " help=\"Path to where all_recons.pt is stored\",\n",
215
+ ")\n",
216
+ "\n",
217
+ "parser.add_argument(\n",
218
+ " \"--eval_dir\", type=str,\n",
219
+ " help=\"Path to where evaluations should be stored\",\n",
220
+ ")\n",
221
+ "\n",
222
+ "parser.add_argument(\n",
223
+ " \"--seed\",type=int,default=42,\n",
224
+ ")\n",
225
+ "if utils.is_interactive():\n",
226
+ " args = parser.parse_args(jupyter_args)\n",
227
+ "else:\n",
228
+ " args = parser.parse_args()\n",
229
+ "\n",
230
+ "# create global variables without the args prefix\n",
231
+ "for attribute_name in vars(args).keys():\n",
232
+ " globals()[attribute_name] = getattr(args, attribute_name)\n",
233
+ " \n",
234
+ "# seed all random functions\n",
235
+ "utils.seed_everything(seed)"
236
+ ]
237
+ },
238
+ {
239
+ "cell_type": "markdown",
240
+ "id": "95d66b33-b327-4895-a861-ecc6ccc51296",
241
+ "metadata": {
242
+ "tags": []
243
+ },
244
+ "source": [
245
+ "# Evals"
246
+ ]
247
+ },
248
+ {
249
+ "cell_type": "code",
250
+ "execution_count": 43,
251
+ "id": "be66f9c9-f25a-48d9-9e9a-272ab33d20ed",
252
+ "metadata": {
253
+ "tags": []
254
+ },
255
+ "outputs": [
256
+ {
257
+ "name": "stdout",
258
+ "output_type": "stream",
259
+ "text": [
260
+ "torch.Size([248, 3, 256, 256])\n",
261
+ "all_recons_path: /home/ubuntu/real_time_mindEye2/evals/testing_MST_ViT-H_1024_scalf/testing_MST_ViT-H_1024_scalf_all_recons.pt\n"
262
+ ]
263
+ }
264
+ ],
265
+ "source": [
266
+ "eval_captions = False\n",
267
+ "if (\"remove\" in model_name and \"random\" in model_name) or \"ses-04\" in model_name:\n",
268
+ " all_images = torch.load(f\"{eval_dir}/all_images.pt\")\n",
269
+ " all_clipvoxels = torch.load(f\"{eval_dir}/all_clipvoxels.pt\")\n",
270
+ " if eval_captions:\n",
271
+ " all_predcaptions = torch.load(f\"{eval_dir}/all_predcaptions.pt\")\n",
272
+ " all_unrefinedrecons = torch.load(f\"{eval_dir}/all_recons.pt\")\n",
273
+ "elif \"ses-01\" in model_name and \"paul\" in model_name:\n",
274
+ " all_images = torch.load(f\"evals/{model_name}/{model_name}_all_images.pt\")\n",
275
+ " all_clipvoxels = torch.load(f\"evals/{model_name}/{model_name}_all_clipvoxels.pt\")\n",
276
+ " if eval_captions:\n",
277
+ " all_predcaptions = torch.load(f\"evals/{model_name}/{model_name}_all_predcaptions.pt\")\n",
278
+ " all_unrefinedrecons = torch.load(f\"evals/{model_name}/{model_name}_all_recons.pt\")\n",
279
+ "else:\n",
280
+ " all_images = torch.load(f\"{eval_dir}/{model_name}_all_images.pt\") \n",
281
+ " all_clipvoxels = torch.load(f\"{eval_dir}/{model_name}_all_clipvoxels.pt\") \n",
282
+ " if eval_captions:\n",
283
+ " all_predcaptions = torch.load(f\"{eval_dir}/{model_name}_all_predcaptions.pt\") \n",
284
+ " all_unrefinedrecons = torch.load(f\"{eval_dir}/{model_name}_all_recons.pt\") \n",
285
+ "\n",
286
+ "print(all_images.shape)\n",
287
+ "print(\"all_recons_path:\", all_recons_path)\n",
288
+ "all_recons = torch.load(all_recons_path)\n",
289
+ "\n",
290
+ "# all_blurryrecons = torch.load(f\"{eval_dir}/all_blurryrecons.pt\")"
291
+ ]
292
+ },
293
+ {
294
+ "cell_type": "code",
295
+ "execution_count": 44,
296
+ "id": "4b6c44f9-95ac-4bac-9fbf-d0b31f4f127f",
297
+ "metadata": {},
298
+ "outputs": [],
299
+ "source": [
300
+ "# if \"ses-01\" in model_name:\n",
301
+ "# paul_all_images = torch.load(f\"evals/sub-001_ses-01_bs24_MST_paul_MSTsplit/sub-001_ses-01_bs24_MST_paul_MSTsplit_all_images.pt\").to('cpu')\n",
302
+ "# paul_all_clipvoxels = torch.load(f\"evals/sub-001_ses-01_bs24_MST_paul_MSTsplit/sub-001_ses-01_bs24_MST_paul_MSTsplit_all_clipvoxels.pt\").to('cpu')\n",
303
+ "# paul_all_recons = torch.load(f\"evals/sub-001_ses-01_bs24_MST_paul_MSTsplit/sub-001_ses-01_bs24_MST_paul_MSTsplit_all_recons.pt\").to('cpu')\n",
304
+ "# # paul_all_prior_out = torch.load(f\"evals/sub-001_ses-01_bs24_MST_paul_MSTsplit/sub-001_ses-01_bs24_MST_paul_MSTsplit_all_prior_out.pt\").to('cpu')\n",
305
+ "# # all_images = torch.load(f\"{eval_dir}/all_images.pt\") \n",
306
+ "# print(paul_all_images.shape, all_images.shape)\n",
307
+ "# print(paul_all_clipvoxels.shape, all_clipvoxels.shape)\n",
308
+ "# print(torch.eq(paul_all_clipvoxels, all_clipvoxels))\n",
309
+ "# # assert torch.allclose(paul_all_images, all_images)"
310
+ ]
311
+ },
312
+ {
313
+ "cell_type": "code",
314
+ "execution_count": 45,
315
+ "id": "e8f29ac6-6561-4770-8837-8877488dce05",
316
+ "metadata": {
317
+ "tags": []
318
+ },
319
+ "outputs": [],
320
+ "source": [
321
+ "# for i in range(100):\n",
322
+ "# # print(torch.allclose(paul_all_images[i], all_images[i]))\n",
323
+ "# pass"
324
+ ]
325
+ },
326
+ {
327
+ "cell_type": "code",
328
+ "execution_count": 46,
329
+ "id": "a1382548-4b43-4101-a15b-e67b1225059c",
330
+ "metadata": {},
331
+ "outputs": [],
332
+ "source": [
333
+ "# num_images = paul_all_images.size(0)\n",
334
+ "# rows = 10 # Number of rows for the grid\n",
335
+ "# cols = 10 # Number of columns for the grid\n",
336
+ "\n",
337
+ "# fig, axes = plt.subplots(rows, cols * 2, figsize=(80, 40))\n",
338
+ "\n",
339
+ "# for i in range(num_images):\n",
340
+ "# row = i // cols\n",
341
+ "# col = (i % cols) * 2 # Adjust for side-by-side\n",
342
+ " \n",
343
+ "# # Plot correct image\n",
344
+ "# ax_correct = axes[row, col]\n",
345
+ "# ax_correct.imshow(paul_all_recons[i].permute(1, 2, 0).cpu().numpy())\n",
346
+ "# ax_correct.axis('off')\n",
347
+ "# ax_correct.set_title(f\"Correct {i}\")\n",
348
+ " \n",
349
+ "# # Plot modified image\n",
350
+ "# ax_modified = axes[row, col + 1]\n",
351
+ "# ax_modified.imshow(all_recons[i].permute(1, 2, 0).cpu().numpy())\n",
352
+ "# ax_modified.axis('off')\n",
353
+ "# ax_modified.set_title(f\"Modified {i}\")\n",
354
+ "\n",
355
+ "# plt.tight_layout()\n",
356
+ "# plt.show()"
357
+ ]
358
+ },
359
+ {
360
+ "cell_type": "code",
361
+ "execution_count": 47,
362
+ "id": "4cc551db-85c3-4696-a6fd-52b0092669eb",
363
+ "metadata": {
364
+ "tags": []
365
+ },
366
+ "outputs": [
367
+ {
368
+ "name": "stdout",
369
+ "output_type": "stream",
370
+ "text": [
371
+ "testing_MST_ViT-H_1024_scalf_all_recons.pt\n",
372
+ "torch.Size([248, 3, 256, 256]) torch.Size([248, 3, 256, 256])\n"
373
+ ]
374
+ }
375
+ ],
376
+ "source": [
377
+ "model_name_plus_suffix = all_recons_path.split('/')[-1]\n",
378
+ "print(model_name_plus_suffix)\n",
379
+ "print(all_images.shape, all_recons.shape)"
380
+ ]
381
+ },
382
+ {
383
+ "cell_type": "code",
384
+ "execution_count": 49,
385
+ "id": "22e933c4-1eed-48a7-a22f-84a4606ac253",
386
+ "metadata": {},
387
+ "outputs": [
388
+ {
389
+ "name": "stdout",
390
+ "output_type": "stream",
391
+ "text": [
392
+ "testing_MST_ViT-H_1024_scalf (31, 2) torch.Size([62, 3, 256, 256]) torch.Size([62, 1, 1024])\n"
393
+ ]
394
+ }
395
+ ],
396
+ "source": [
397
+ "if \"MST\" in model_name:\n",
398
+ " if (\"remove\" in model_name and \"random\" in model_name) or \"ses-04\" in model_name or \"rishab\" in model_name:\n",
399
+ " MST_ID = np.load(f\"{eval_dir}/MST_ID.npy\")\n",
400
+ " MST_pairmate_indices = np.load(f\"{eval_dir}/MST_pairmate_indices.npy\")\n",
401
+ " elif \"paul\" in model_name:\n",
402
+ " MST_ID = np.load(f\"evals/{model_name}/{model_name}_MST_ID.npy\")\n",
403
+ " MST_pairmate_indices = np.array(utils.find_paired_indices(torch.Tensor(MST_ID)))\n",
404
+ " # print(MST_pairmate_indices)\n",
405
+ " else:\n",
406
+ " MST_ID = np.load(f\"{eval_dir}/{model_name}_MST_ID.npy\") \n",
407
+ " MST_pairmate_indices = np.load(f\"{eval_dir}/{model_name}_MST_pairmate_indices.npy\") \n",
408
+ "\n",
409
+ " # pairs = utils.find_paired_indices(torch.Tensor(MST_ID))\n",
410
+ " # if \"close_to_MST\" in model_name or (\"remove\" in model_name and \"random\" in model_name) or \"ses-0\" in model_name:\n",
411
+ " # pairs = np.array(pairs[:-1]) # index out the placeholder\n",
412
+ " # pairs = np.array(pairs)\n",
413
+ " # if \"ses-0\" in model_name:\n",
414
+ " # if \"ses-01\" in model_name or \"ses-04\" in model_name:\n",
415
+ " # print(pairs.shape)\n",
416
+ " # assert pairs.shape == (49,2)\n",
417
+ " # else:\n",
418
+ " # assert pairs.shape == (50,3)\n",
419
+ " # else:\n",
420
+ " # assert pairs.shape == (100,3)\n",
421
+ " # print(pairs)\n",
422
+ " # repeats_in_test = torch.load(f\"{eval_dir}/repeats_in_test.pt\")\n",
423
+ " # test_image_indices = torch.load(f\"{eval_dir}/test_image_indices.pt\")\n",
424
+ " all_unique_images = all_images[MST_pairmate_indices.flatten()]\n",
425
+ " all_unique_clipvoxels = all_clipvoxels[MST_pairmate_indices.flatten()]\n",
426
+ "\n",
427
+ " print(model_name, MST_pairmate_indices.shape, all_unique_images.shape, all_unique_clipvoxels.shape)"
428
+ ]
429
+ },
430
+ {
431
+ "cell_type": "code",
432
+ "execution_count": 50,
433
+ "id": "880081e4-7567-4b1e-acb0-4af863018228",
434
+ "metadata": {
435
+ "tags": []
436
+ },
437
+ "outputs": [],
438
+ "source": [
439
+ "# visualize all unique images\n",
440
+ "if plot_all:\n",
441
+ " # Plot all the MST images and pairmates\n",
442
+ " import textwrap\n",
443
+ " def wrap_title(title, wrap_width):\n",
444
+ " return \"\\n\".join(textwrap.wrap(title, wrap_width))\n",
445
+ "\n",
446
+ " size = int(np.ceil(MST_pairmate_indices.shape[0]/2)) # helps determine size of plot\n",
447
+ " fig, axes = plt.subplots(size, 4, figsize=(15, size*4))\n",
448
+ " jj=-1; kk=0;\n",
449
+ " for i, j in enumerate(all_unique_images):\n",
450
+ " jj+=1\n",
451
+ " axes[kk][jj].imshow(utils.torch_to_Image(j))\n",
452
+ " axes[kk][jj].axis('off')\n",
453
+ " if jj==3: \n",
454
+ " kk+=1; jj=-1\n",
455
+ "\n",
456
+ " fig.tight_layout()\n",
457
+ " # plt.savefig('figures/MST_2_pairmates_10-01')\n",
458
+ " plt.show()"
459
+ ]
460
+ },
461
+ {
462
+ "cell_type": "code",
463
+ "execution_count": 51,
464
+ "id": "48c8772b-871a-4031-b013-d7159bf8b74a",
465
+ "metadata": {
466
+ "tags": []
467
+ },
468
+ "outputs": [],
469
+ "source": [
470
+ "# if plot_all:\n",
471
+ "# # create full grid of recon comparisons\n",
472
+ "# from PIL import Image\n",
473
+ "\n",
474
+ "# imsize = 150\n",
475
+ "# if all_images.shape[-1] != imsize:\n",
476
+ "# all_images = transforms.Resize((imsize,imsize))(all_images).float()\n",
477
+ "# if all_recons.shape[-1] != imsize:\n",
478
+ "# all_recons = transforms.Resize((imsize,imsize))(all_recons).float()\n",
479
+ "\n",
480
+ "# num_images = all_recons.shape[0]\n",
481
+ "# num_rows = (2 * num_images + 9) // 10\n",
482
+ "\n",
483
+ "# # Interleave tensors\n",
484
+ "# merged = torch.stack([val for pair in zip(all_images, all_recons) for val in pair], dim=0)\n",
485
+ "\n",
486
+ "# # Calculate grid size\n",
487
+ "# grid = torch.zeros((num_rows * 10, 3, all_recons.shape[-1], all_recons.shape[-1]))\n",
488
+ "\n",
489
+ "# # Populate the grid\n",
490
+ "# grid[:2*num_images] = merged\n",
491
+ "# grid_images = [transforms.functional.to_pil_image(grid[i]) for i in range(num_rows * 10)]\n",
492
+ "\n",
493
+ "# # Create the grid image\n",
494
+ "# grid_image = Image.new('RGB', (all_recons.shape[-1]*10, all_recons.shape[-1] * num_rows)) # 10 images wide\n",
495
+ "\n",
496
+ "# # Paste images into the grid\n",
497
+ "# for i, img in enumerate(grid_images):\n",
498
+ "# grid_image.paste(img, (all_recons.shape[-1] * (i % 10), all_recons.shape[-1] * (i // 10)))\n",
499
+ "# grid_image\n",
500
+ "# # grid_image.save(f\"{model_name_plus_suffix[:-3]}_1000recons.png\")"
501
+ ]
502
+ },
503
+ {
504
+ "cell_type": "code",
505
+ "execution_count": 52,
506
+ "id": "f42009e9-f910-4f02-8db6-d46778aa6595",
507
+ "metadata": {
508
+ "tags": []
509
+ },
510
+ "outputs": [],
511
+ "source": [
512
+ "imsize = 256\n",
513
+ "if all_images.shape[-1] != imsize:\n",
514
+ " all_images = transforms.Resize((imsize,imsize))(all_images).float()\n",
515
+ "if all_recons.shape[-1] != imsize:\n",
516
+ " all_recons = transforms.Resize((imsize,imsize))(all_recons).float()\n",
517
+ "try:\n",
518
+ " if all_blurryrecons.shape[-1] != imsize:\n",
519
+ " all_blurryrecons = transforms.Resize((imsize,imsize))(all_blurryrecons).float()\n",
520
+ "except:\n",
521
+ " pass\n",
522
+ "\n",
523
+ "if \"enhanced\" in model_name_plus_suffix:\n",
524
+ " try:\n",
525
+ " all_recons = all_recons*.75 + all_blurryrecons*.25\n",
526
+ " print(\"weighted averaging to improve low-level evals\")\n",
527
+ " except:\n",
528
+ " pass"
529
+ ]
530
+ },
531
+ {
532
+ "cell_type": "code",
533
+ "execution_count": 53,
534
+ "id": "06e23174-a777-4dd1-8b73-6783587d8f9c",
535
+ "metadata": {
536
+ "tags": []
537
+ },
538
+ "outputs": [],
539
+ "source": [
540
+ "# visualize some images with recons and captions\n",
541
+ "if plot_all:\n",
542
+ " assert np.all(all_images.shape == all_recons.shape)\n",
543
+ " import textwrap\n",
544
+ " def wrap_title(title, wrap_width):\n",
545
+ " return \"\\n\".join(textwrap.wrap(title, wrap_width))\n",
546
+ "\n",
547
+ " fig, axes = plt.subplots(3, 4, figsize=(10, 8))\n",
548
+ " jj=-1; kk=0;\n",
549
+ " for j in np.array([0,1,2,3,4,5]):\n",
550
+ " jj+=1\n",
551
+ " # print(kk,jj)\n",
552
+ " axes[kk][jj].imshow(utils.torch_to_Image(all_images[j]))\n",
553
+ " axes[kk][jj].axis('off')\n",
554
+ " jj+=1\n",
555
+ " axes[kk][jj].imshow(utils.torch_to_Image(all_recons[j]))\n",
556
+ " axes[kk][jj].axis('off')\n",
557
+ " axes[kk][jj].set_title(wrap_title(str(all_predcaptions[[j]]),wrap_width=30), fontsize=8)\n",
558
+ " if jj==3: \n",
559
+ " kk+=1; jj=-1\n",
560
+ "\n",
561
+ " fig.tight_layout()\n",
562
+ " # plt.savefig('figures/recon_09-26')\n",
563
+ " plt.show()"
564
+ ]
565
+ },
566
+ {
567
+ "cell_type": "markdown",
568
+ "id": "5b4deb53-4d85-4292-92c5-bb59077523cf",
569
+ "metadata": {},
570
+ "source": [
571
+ "# Retrieval eval (chance = 1/100)"
572
+ ]
573
+ },
574
+ {
575
+ "cell_type": "code",
576
+ "execution_count": 54,
577
+ "id": "2e49d57a-9b65-490c-a1e0-61bd05682171",
578
+ "metadata": {
579
+ "tags": []
580
+ },
581
+ "outputs": [
582
+ {
583
+ "name": "stdout",
584
+ "output_type": "stream",
585
+ "text": [
586
+ "overall fwd percent_correct: 0.0484\n",
587
+ "overall bwd percent_correct: 0.0161\n"
588
+ ]
589
+ },
590
+ {
591
+ "name": "stderr",
592
+ "output_type": "stream",
593
+ "text": [
594
+ "/home/ubuntu/rt_mindEye2/lib/python3.11/site-packages/torchvision/transforms/functional.py:1603: UserWarning: The default value of the antialias parameter of all the resizing transforms (Resize(), RandomResizedCrop(), etc.) will change from None to True in v0.17, in order to be consistent across the PIL and Tensor backends. To suppress this warning, directly pass antialias=True (recommended, future default), antialias=None (current default, which means False for Tensors and True for PIL), or antialias=False (only works on Tensors - PIL will still use antialiasing). This also applies if you are using the inference transforms from the models weights: update the call to weights.transforms(antialias=True).\n",
595
+ " warnings.warn(\n"
596
+ ]
597
+ }
598
+ ],
599
+ "source": [
600
+ "from scipy import stats\n",
601
+ "\n",
602
+ "all_fwd_acc = []\n",
603
+ "all_bwd_acc = []\n",
604
+ "\n",
605
+ "assert len(all_unique_images) == len(all_unique_clipvoxels) \n",
606
+ "\n",
607
+ "all_percent_correct_fwds, all_percent_correct_bwds = [], []\n",
608
+ "\n",
609
+ "with torch.cuda.amp.autocast(dtype=torch.float16):\n",
610
+ " all_unique_images_resized = transforms.Resize((224,224))(all_unique_images).float() # embedder requires this shape\n",
611
+ " all_emb = clip_img_embedder(all_unique_images_resized.to(device)).float() # CLIP-Image\n",
612
+ " all_emb_ = all_unique_clipvoxels # CLIP-Brain\n",
613
+ "\n",
614
+ " # flatten if necessary\n",
615
+ " all_emb = all_emb.reshape(len(all_emb),-1).to(device)\n",
616
+ " all_emb_ = all_emb_.reshape(len(all_emb_),-1).to(device)\n",
617
+ "\n",
618
+ " # l2norm \n",
619
+ " all_emb = nn.functional.normalize(all_emb,dim=-1)\n",
620
+ " all_emb_ = nn.functional.normalize(all_emb_,dim=-1)\n",
621
+ "\n",
622
+ " all_labels = torch.arange(len(all_emb)).to(device)\n",
623
+ " all_bwd_sim = utils.batchwise_cosine_similarity(all_emb,all_emb_) # clip, brain\n",
624
+ " all_fwd_sim = utils.batchwise_cosine_similarity(all_emb_,all_emb) # brain, clip\n",
625
+ "\n",
626
+ " # if \"ses-0\" not in model_name or \"ses-01\" in model_name or \"ses-04\" in model_name:\n",
627
+ " # assert len(all_fwd_sim) == 100\n",
628
+ " # assert len(all_bwd_sim) == 100\n",
629
+ " # else:\n",
630
+ " # assert len(all_fwd_sim) == 50\n",
631
+ " # assert len(all_bwd_sim) == 50\n",
632
+ " \n",
633
+ " all_percent_correct_fwds = utils.topk(all_fwd_sim, all_labels, k=1).item()\n",
634
+ " all_percent_correct_bwds = utils.topk(all_bwd_sim, all_labels, k=1).item()\n",
635
+ "\n",
636
+ "all_fwd_acc.append(all_percent_correct_fwds)\n",
637
+ "all_bwd_acc.append(all_percent_correct_bwds)\n",
638
+ "\n",
639
+ "all_fwd_sim = np.array(all_fwd_sim.cpu())\n",
640
+ "all_bwd_sim = np.array(all_bwd_sim.cpu())\n",
641
+ "\n",
642
+ "print(f\"overall fwd percent_correct: {all_fwd_acc[0]:.4f}\")\n",
643
+ "print(f\"overall bwd percent_correct: {all_bwd_acc[0]:.4f}\")"
644
+ ]
645
+ },
646
+ {
647
+ "cell_type": "code",
648
+ "execution_count": 55,
649
+ "id": "1a4cda50-ef4a-43d0-9c2e-53ee8509482d",
650
+ "metadata": {
651
+ "tags": []
652
+ },
653
+ "outputs": [
654
+ {
655
+ "ename": "RuntimeError",
656
+ "evalue": "The size of tensor a (325) must match the size of tensor b (257) at non-singleton dimension 1",
657
+ "output_type": "error",
658
+ "traceback": [
659
+ "\u001b[31m---------------------------------------------------------------------------\u001b[39m",
660
+ "\u001b[31mRuntimeError\u001b[39m Traceback (most recent call last)",
661
+ "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[55]\u001b[39m\u001b[32m, line 24\u001b[39m\n\u001b[32m 20\u001b[39m all_unique_clipvoxels_half = all_unique_clipvoxels[\u001b[38;5;28mint\u001b[39m(\u001b[38;5;28mlen\u001b[39m(all_unique_clipvoxels)/\u001b[32m2\u001b[39m):]\n\u001b[32m 23\u001b[39m \u001b[38;5;28;01mwith\u001b[39;00m torch.cuda.amp.autocast(dtype=torch.float16):\n\u001b[32m---> \u001b[39m\u001b[32m24\u001b[39m emb = \u001b[43mclip_img_embedder\u001b[49m\u001b[43m(\u001b[49m\u001b[43mall_unique_images_half\u001b[49m\u001b[43m.\u001b[49m\u001b[43mto\u001b[49m\u001b[43m(\u001b[49m\u001b[43mdevice\u001b[49m\u001b[43m)\u001b[49m\u001b[43m)\u001b[49m.float() \u001b[38;5;66;03m# CLIP-Image\u001b[39;00m\n\u001b[32m 25\u001b[39m emb_ = all_unique_clipvoxels_half \u001b[38;5;66;03m# CLIP-Brain\u001b[39;00m\n\u001b[32m 27\u001b[39m \u001b[38;5;66;03m# flatten if necessary\u001b[39;00m\n",
662
+ "\u001b[36mFile \u001b[39m\u001b[32m~/real_time_mindEye2/utils.py:1151\u001b[39m, in \u001b[36mCLIPEncoder.__call__\u001b[39m\u001b[34m(self, image)\u001b[39m\n\u001b[32m 1150\u001b[39m \u001b[38;5;28;01mdef\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34m__call__\u001b[39m(\u001b[38;5;28mself\u001b[39m, image):\n\u001b[32m-> \u001b[39m\u001b[32m1151\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mencode_image\u001b[49m\u001b[43m(\u001b[49m\u001b[43mimage\u001b[49m\u001b[43m)\u001b[49m.unsqueeze(\u001b[32m1\u001b[39m)\n",
663
+ "\u001b[36mFile \u001b[39m\u001b[32m~/real_time_mindEye2/utils.py:1142\u001b[39m, in \u001b[36mCLIPEncoder.encode_image\u001b[39m\u001b[34m(self, image, verbose)\u001b[39m\n\u001b[32m 1140\u001b[39m batch_images = torch.stack(batch_images)\n\u001b[32m 1141\u001b[39m \u001b[38;5;28;01mwith\u001b[39;00m torch.no_grad():\n\u001b[32m-> \u001b[39m\u001b[32m1142\u001b[39m batch_features = \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mmodel\u001b[49m\u001b[43m.\u001b[49m\u001b[43mencode_image\u001b[49m\u001b[43m(\u001b[49m\n\u001b[32m 1143\u001b[39m \u001b[43m \u001b[49m\u001b[43mbatch_images\u001b[49m\u001b[43m.\u001b[49m\u001b[43mto\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mdevice\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 1144\u001b[39m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 1145\u001b[39m features.append(batch_features)\n\u001b[32m 1147\u001b[39m features = torch.cat(features, dim=\u001b[32m0\u001b[39m)\n",
664
+ "\u001b[36mFile \u001b[39m\u001b[32m~/rt_mindEye2/lib/python3.11/site-packages/open_clip/model.py:266\u001b[39m, in \u001b[36mCLIP.encode_image\u001b[39m\u001b[34m(self, image, normalize)\u001b[39m\n\u001b[32m 265\u001b[39m \u001b[38;5;28;01mdef\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34mencode_image\u001b[39m(\u001b[38;5;28mself\u001b[39m, image, normalize: \u001b[38;5;28mbool\u001b[39m = \u001b[38;5;28;01mFalse\u001b[39;00m):\n\u001b[32m--> \u001b[39m\u001b[32m266\u001b[39m features = \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mvisual\u001b[49m\u001b[43m(\u001b[49m\u001b[43mimage\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 267\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m F.normalize(features, dim=-\u001b[32m1\u001b[39m) \u001b[38;5;28;01mif\u001b[39;00m normalize \u001b[38;5;28;01melse\u001b[39;00m features\n",
665
+ "\u001b[36mFile \u001b[39m\u001b[32m~/rt_mindEye2/lib/python3.11/site-packages/torch/nn/modules/module.py:1518\u001b[39m, in \u001b[36mModule._wrapped_call_impl\u001b[39m\u001b[34m(self, *args, **kwargs)\u001b[39m\n\u001b[32m 1516\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m._compiled_call_impl(*args, **kwargs) \u001b[38;5;66;03m# type: ignore[misc]\u001b[39;00m\n\u001b[32m 1517\u001b[39m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[32m-> \u001b[39m\u001b[32m1518\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43m_call_impl\u001b[49m\u001b[43m(\u001b[49m\u001b[43m*\u001b[49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43m*\u001b[49m\u001b[43m*\u001b[49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
666
+ "\u001b[36mFile \u001b[39m\u001b[32m~/rt_mindEye2/lib/python3.11/site-packages/torch/nn/modules/module.py:1527\u001b[39m, in \u001b[36mModule._call_impl\u001b[39m\u001b[34m(self, *args, **kwargs)\u001b[39m\n\u001b[32m 1522\u001b[39m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[32m 1523\u001b[39m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[32m 1524\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m._backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m._backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m._forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m._forward_pre_hooks\n\u001b[32m 1525\u001b[39m \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[32m 1526\u001b[39m \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[32m-> \u001b[39m\u001b[32m1527\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[43m*\u001b[49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43m*\u001b[49m\u001b[43m*\u001b[49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 1529\u001b[39m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[32m 1530\u001b[39m result = \u001b[38;5;28;01mNone\u001b[39;00m\n",
667
+ "\u001b[36mFile \u001b[39m\u001b[32m~/rt_mindEye2/lib/python3.11/site-packages/open_clip/transformer.py:616\u001b[39m, in \u001b[36mVisionTransformer.forward\u001b[39m\u001b[34m(self, x)\u001b[39m\n\u001b[32m 614\u001b[39m x = torch.cat([_expand_token(\u001b[38;5;28mself\u001b[39m.class_embedding, x.shape[\u001b[32m0\u001b[39m]).to(x.dtype), x], dim=\u001b[32m1\u001b[39m)\n\u001b[32m 615\u001b[39m \u001b[38;5;66;03m# shape = [*, grid ** 2 + 1, width]\u001b[39;00m\n\u001b[32m--> \u001b[39m\u001b[32m616\u001b[39m x = \u001b[43mx\u001b[49m\u001b[43m \u001b[49m\u001b[43m+\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mpositional_embedding\u001b[49m\u001b[43m.\u001b[49m\u001b[43mto\u001b[49m\u001b[43m(\u001b[49m\u001b[43mx\u001b[49m\u001b[43m.\u001b[49m\u001b[43mdtype\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 618\u001b[39m x = \u001b[38;5;28mself\u001b[39m.patch_dropout(x)\n\u001b[32m 619\u001b[39m x = \u001b[38;5;28mself\u001b[39m.ln_pre(x)\n",
668
+ "\u001b[31mRuntimeError\u001b[39m: The size of tensor a (325) must match the size of tensor b (257) at non-singleton dimension 1"
669
+ ]
670
+ }
671
+ ],
672
+ "source": [
673
+ "if \"ses-0\" not in model_name:\n",
674
+ " from scipy import stats\n",
675
+ "\n",
676
+ " fwd_acc = []\n",
677
+ " bwd_acc = []\n",
678
+ " fwd_sim_halves = []\n",
679
+ " bwd_sim_halves = []\n",
680
+ "\n",
681
+ " assert len(all_unique_images) == len(all_unique_clipvoxels) \n",
682
+ "\n",
683
+ " for i in range(2): # since this is a 2-session model, we evaluate on the test set corresponding to each session and report both separately for better comparison to single-session models\n",
684
+ " percent_correct_fwds, percent_correct_bwds = [], []\n",
685
+ " # percent_correct_fwd, percent_correct_bwd = None, None\n",
686
+ "\n",
687
+ " if i==0: \n",
688
+ " all_unique_images_half = all_unique_images[:int(len(all_unique_images)/2)]\n",
689
+ " all_unique_clipvoxels_half = all_unique_clipvoxels[:int(len(all_unique_clipvoxels)/2)]\n",
690
+ " elif i==1:\n",
691
+ " all_unique_images_half = all_unique_images[int(len(all_unique_images)/2):]\n",
692
+ " all_unique_clipvoxels_half = all_unique_clipvoxels[int(len(all_unique_clipvoxels)/2):]\n",
693
+ "\n",
694
+ "\n",
695
+ " with torch.cuda.amp.autocast(dtype=torch.float16):\n",
696
+ " emb = clip_img_embedder(all_unique_images_half.to(device)).float() # CLIP-Image\n",
697
+ " emb_ = all_unique_clipvoxels_half # CLIP-Brain\n",
698
+ "\n",
699
+ " # flatten if necessary\n",
700
+ " emb = emb.reshape(len(emb),-1).to(device)\n",
701
+ " emb_ = emb_.reshape(len(emb_),-1).to(device)\n",
702
+ "\n",
703
+ " # l2norm \n",
704
+ " emb = nn.functional.normalize(emb,dim=-1)\n",
705
+ " emb_ = nn.functional.normalize(emb_,dim=-1)\n",
706
+ "\n",
707
+ " labels = torch.arange(len(emb)).to(device)\n",
708
+ " bwd_sim = utils.batchwise_cosine_similarity(emb,emb_) # clip, brain\n",
709
+ " fwd_sim = utils.batchwise_cosine_similarity(emb_,emb) # brain, clip\n",
710
+ "\n",
711
+ " assert len(fwd_sim) == 50\n",
712
+ " assert len(bwd_sim) == 50\n",
713
+ "\n",
714
+ " # percent_correct_fwds = np.append(percent_correct_fwds, utils.topk(fwd_sim, labels, k=1).item())\n",
715
+ " # percent_correct_bwds = np.append(percent_correct_bwds, utils.topk(bwd_sim, labels, k=1).item())\n",
716
+ " percent_correct_fwds = utils.topk(fwd_sim, labels, k=1).item()\n",
717
+ " percent_correct_bwds = utils.topk(bwd_sim, labels, k=1).item()\n",
718
+ "\n",
719
+ " # percent_correct_fwd = np.mean(percent_correct_fwds)\n",
720
+ " # fwd_sd = np.std(percent_correct_fwds) / np.sqrt(len(percent_correct_fwds))\n",
721
+ " # fwd_ci = stats.norm.interval(0.95, loc=percent_correct_fwd, scale=fwd_sd)\n",
722
+ "\n",
723
+ " # percent_correct_bwd = np.mean(percent_correct_bwds)\n",
724
+ " # bwd_sd = np.std(percent_correct_bwds) / np.sqrt(len(percent_correct_bwds))\n",
725
+ " # bwd_ci = stats.norm.interval(0.95, loc=percent_correct_bwd, scale=bwd_sd)\n",
726
+ "\n",
727
+ " fwd_acc.append(percent_correct_fwds)\n",
728
+ " bwd_acc.append(percent_correct_bwds)\n",
729
+ "\n",
730
+ " fwd_sim = np.array(fwd_sim.cpu())\n",
731
+ " bwd_sim = np.array(bwd_sim.cpu())\n",
732
+ " fwd_sim_halves.append(fwd_sim)\n",
733
+ " bwd_sim_halves.append(bwd_sim)\n",
734
+ "\n",
735
+ " print(f\"ses-02 fwd percent_correct: {fwd_acc[0]:.4f}; ses-03 fwd percent_correct: {fwd_acc[1]:.4f}\")\n",
736
+ " print(f\"ses-02 bwd percent_correct: {bwd_acc[0]:.4f}; ses-03 bwd percent_correct: {bwd_acc[1]:.4f} \")"
737
+ ]
738
+ },
739
+ {
740
+ "cell_type": "code",
741
+ "execution_count": 56,
742
+ "id": "40d43a70-e1db-43e5-ae84-40eb1577cf01",
743
+ "metadata": {
744
+ "tags": []
745
+ },
746
+ "outputs": [],
747
+ "source": [
748
+ "if compute_circular:\n",
749
+ " if \"ses-0\" not in model_name: # we're in a multisession model, assumed ses-02 and ses-03 for now\n",
750
+ " fwd_acc_circular = []\n",
751
+ " fwd_sim_halves_circular = []\n",
752
+ "\n",
753
+ " assert len(all_unique_images) == len(all_unique_clipvoxels) \n",
754
+ "\n",
755
+ " for i in range(2): # since this is a 2-session model, we evaluate on the test set corresponding to each session and report both separately for better comparison to single-session models\n",
756
+ " percent_correct_fwds_circular = []\n",
757
+ " # percent_correct_fwd_circular = None\n",
758
+ "\n",
759
+ " if i==0: \n",
760
+ " all_unique_images_half_circular = all_unique_images[:int(len(all_unique_images)/2)]\n",
761
+ " all_unique_clipvoxels_half_circular = all_unique_clipvoxels[:int(len(all_unique_clipvoxels)/2)]\n",
762
+ " elif i==1:\n",
763
+ " all_unique_images_half_circular = all_unique_images[int(len(all_unique_images)/2):]\n",
764
+ " all_unique_clipvoxels_half_circular = all_unique_clipvoxels[int(len(all_unique_clipvoxels)/2):]\n",
765
+ "\n",
766
+ " with torch.cuda.amp.autocast(dtype=torch.float16):\n",
767
+ " emb_circular = clip_img_embedder(all_unique_images_half_circular.to(device)).float() # CLIP-Image\n",
768
+ "\n",
769
+ " # flatten if necessary\n",
770
+ " emb_circular = emb_circular.reshape(len(emb_circular),-1).to(device)\n",
771
+ "\n",
772
+ " # l2norm \n",
773
+ " emb_circular = nn.functional.normalize(emb_circular,dim=-1)\n",
774
+ "\n",
775
+ " labels_circular = torch.arange(len(emb_circular)).to(device)\n",
776
+ " fwd_sim_circular = utils.batchwise_cosine_similarity(emb_circular,emb_circular) # clip, clip\n",
777
+ "\n",
778
+ "\n",
779
+ " if \"ses-0\" in model_name:\n",
780
+ " assert len(fwd_sim_circular) == 25\n",
781
+ " else:\n",
782
+ " assert len(fwd_sim_circular) == 50\n",
783
+ "\n",
784
+ " # percent_correct_fwds_circular = np.append(percent_correct_fwds_circular, utils.topk(fwd_sim_circular, labels_circular, k=1).item())\n",
785
+ " percent_correct_fwds_circular = utils.topk(fwd_sim_circular, labels_circular, k=1).item()\n",
786
+ "\n",
787
+ "\n",
788
+ " # percent_correct_fwd_circular = np.mean(percent_correct_fwds_circular)\n",
789
+ " # fwd_sd_circular = np.std(percent_correct_fwds_circular) / np.sqrt(len(percent_correct_fwds_circular))\n",
790
+ " # fwd_ci_circular = stats.norm.interval(0.95, loc=percent_correct_fwd_circular, scale=fwd_sd_circular)\n",
791
+ "\n",
792
+ " fwd_acc_circular.append(percent_correct_fwds_circular)\n",
793
+ "\n",
794
+ " fwd_sim_circular = np.array(fwd_sim_circular.cpu())\n",
795
+ " fwd_sim_halves_circular.append(fwd_sim_circular)\n",
796
+ "\n",
797
+ " print(f\"ses-02 fwd percent_correct: {fwd_acc_circular[0]:.4f}; ses-03 fwd percent_correct: {fwd_acc_circular[1]:.4f}\")\n",
798
+ " \n",
799
+ " else: # single session model\n",
800
+ " fwd_acc_circular = []\n",
801
+ "\n",
802
+ " assert len(all_unique_images) == len(all_unique_clipvoxels) \n",
803
+ "\n",
804
+ " percent_correct_fwds_circular = []\n",
805
+ " # percent_correct_fwd_circular = None\n",
806
+ "\n",
807
+ " with torch.cuda.amp.autocast(dtype=torch.float16):\n",
808
+ " emb_circular = clip_img_embedder(all_unique_images.to(device)).float() # CLIP-Image\n",
809
+ "\n",
810
+ " # flatten if necessary\n",
811
+ " emb_circular = emb_circular.reshape(len(emb_circular),-1).to(device)\n",
812
+ "\n",
813
+ " # l2norm \n",
814
+ " emb_circular = nn.functional.normalize(emb_circular,dim=-1)\n",
815
+ "\n",
816
+ " labels_circular = torch.arange(len(emb_circular)).to(device)\n",
817
+ " fwd_sim_circular = utils.batchwise_cosine_similarity(emb_circular,emb_circular) # clip, clip\n",
818
+ "\n",
819
+ "\n",
820
+ " if \"ses-01\" in model_name:\n",
821
+ " assert len(fwd_sim_circular) == 100\n",
822
+ " else:\n",
823
+ " assert len(fwd_sim_circular) == 50\n",
824
+ "\n",
825
+ " # percent_correct_fwds_circular = np.append(percent_correct_fwds_circular, utils.topk(fwd_sim_circular, labels_circular, k=1).item())\n",
826
+ " percent_correct_fwds_circular = utils.topk(fwd_sim_circular, labels_circular, k=1).item()\n",
827
+ "\n",
828
+ "\n",
829
+ " # percent_correct_fwd_circular = np.mean(percent_correct_fwds_circular)\n",
830
+ " # fwd_sd_circular = np.std(percent_correct_fwds_circular) / np.sqrt(len(percent_correct_fwds_circular))\n",
831
+ " # fwd_ci_circular = stats.norm.interval(0.95, loc=percent_correct_fwd_circular, scale=fwd_sd_circular)\n",
832
+ "\n",
833
+ " fwd_acc_circular.append(percent_correct_fwds_circular)\n",
834
+ "\n",
835
+ " fwd_sim_circular = np.array(fwd_sim_circular.cpu())\n",
836
+ "\n",
837
+ " print(f\"session fwd percent_correct (circular): {fwd_acc_circular[0]:.4f}\")"
838
+ ]
839
+ },
840
+ {
841
+ "cell_type": "code",
842
+ "execution_count": 57,
843
+ "id": "d086c99a-c712-4ac7-b625-a0656c9ee886",
844
+ "metadata": {
845
+ "tags": []
846
+ },
847
+ "outputs": [],
848
+ "source": [
849
+ "if compute_circular:\n",
850
+ " # print(utils.topk(torch.Tensor(fwd_sim_halves[1]).to(device), labels, k=1).item())\n",
851
+ " # ses02_top1 = torch.argsort(torch.Tensor(fwd_sim_halves[0]).to(device),axis=1)[:,-1] == labels # from utils.topk()\n",
852
+ " # ses03_top1 = torch.argsort(torch.Tensor(fwd_sim_halves[1]).to(device),axis=1)[:,-1] == labels\n",
853
+ " # top1_results = torch.cat((ses02_top1, ses03_top1))\n",
854
+ " # incorrect_idx = torch.argwhere(top1_results == False)[:,0]\n",
855
+ " # print(incorrect_idx)\n",
856
+ "\n",
857
+ " # confirm that the lure is behind the target 80% of the time in CLIP last hidden layer embeddings\n",
858
+ "\n",
859
+ " use_fwd_sim = False\n",
860
+ " use_first_half = False\n",
861
+ "\n",
862
+ " all_top_sims = [] # len(fwd_sim_halves[0]); for each image, contains the similarity to the top-n choices until it gets the correct answer. If len 1, top-1 is correct\n",
863
+ " all_pairmate_sims = [] # len(fwd_sim_halves[0]); the similarity of each image to its pairmate \n",
864
+ " all_chose_lures = [] # len(fwd_sim_halves[0]); True for each top-n choice if the lure was predicted to be more similar to the target \n",
865
+ " if \"ses-0\" not in model_name:\n",
866
+ " sim_halves = fwd_sim_halves if use_fwd_sim else bwd_sim_halves\n",
867
+ " sim_halves = sim_halves[0] if use_first_half else sim_halves[1]\n",
868
+ " else:\n",
869
+ " sim_halves = all_fwd_sim if use_fwd_sim else all_bwd_sim\n",
870
+ " for i, img in enumerate(sim_halves):\n",
871
+ " if i%2==0:\n",
872
+ " idx_to_pairmate = 1\n",
873
+ " elif i%2==1:\n",
874
+ " idx_to_pairmate = -1\n",
875
+ "\n",
876
+ " order = img.argsort()[::-1]\n",
877
+ " # print(order)\n",
878
+ " top_sim = []\n",
879
+ " chose_lure = []\n",
880
+ " for idx in order:\n",
881
+ " sim = img[idx]\n",
882
+ " pairmate_sim = img[i+idx_to_pairmate]\n",
883
+ " top_sim.append(sim) \n",
884
+ " chose_lure.append((idx, sim <= pairmate_sim))\n",
885
+ " # print(i, idx, img[idx], img[i+idx_to_pairmate])\n",
886
+ " if idx == i:\n",
887
+ " break\n",
888
+ "\n",
889
+ " all_top_sims.append(top_sim)\n",
890
+ " all_pairmate_sims.append(pairmate_sim)\n",
891
+ " all_chose_lures.append(chose_lure)\n",
892
+ "\n",
893
+ " # print(all_top_sims)\n",
894
+ " # print()\n",
895
+ " # print(all_pairmate_sims)\n",
896
+ " # print()\n",
897
+ " # print(all_chose_lures)\n",
898
+ "\n",
899
+ " where_chose_pairmate = []\n",
900
+ " for idx, i in enumerate(all_chose_lures):\n",
901
+ " for value in i:\n",
902
+ " # print(value[1])\n",
903
+ " if value[1] == True:\n",
904
+ " # print(idx, i, end='\\n')\n",
905
+ " where_chose_pairmate.append(idx)\n",
906
+ " break\n",
907
+ "\n",
908
+ " # where_chose_pairmate # trials where the pairmate was chosen ahead of the target"
909
+ ]
910
+ },
911
+ {
912
+ "cell_type": "code",
913
+ "execution_count": 58,
914
+ "id": "a5c4405c-b3b7-42fe-b622-f4cb81500464",
915
+ "metadata": {
916
+ "tags": []
917
+ },
918
+ "outputs": [],
919
+ "source": [
920
+ "# top-n predictions using CLIP brain embeddings\n",
921
+ "\n",
922
+ "if plot_all:\n",
923
+ " use_fwd_sim = True\n",
924
+ " top_n = 10 # how many of the top n images to display\n",
925
+ " print(\"Given Brain embedding, find correct Image embedding\")\n",
926
+ " fig, ax = plt.subplots(nrows=len(all_unique_images), ncols=top_n+1, figsize=(top_n*2,len(all_unique_images)*2))\n",
927
+ " for trial in range(len(all_unique_images)):\n",
928
+ " ax[trial, 0].imshow(utils.torch_to_Image(all_unique_images[trial]))\n",
929
+ " ax[trial, 0].set_title(\"original\\nimage\")\n",
930
+ " ax[trial, 0].axis(\"off\")\n",
931
+ " for attempt in range(top_n):\n",
932
+ " if trial < 50:\n",
933
+ " if \"ses-0\" not in model_name:\n",
934
+ " sim_half = fwd_sim_halves[0] if use_fwd_sim else bwd_sim_halves[0]\n",
935
+ " unique_imgs_to_plot = all_unique_images[:int(len(all_unique_images)/2)]\n",
936
+ " # unique_clipvoxels_to_plot = all_unique_clipvoxels[:int(len(all_unique_clipvoxels)/2)]\n",
937
+ " else:\n",
938
+ " sim_half = all_fwd_sim if use_fwd_sim else all_bwd_sim\n",
939
+ " unique_imgs_to_plot = all_unique_images\n",
940
+ " # unique_clipvoxels_to_plot = all_unique_clipvoxels\n",
941
+ " which = np.flip(np.argsort(sim_half[trial]))[attempt]\n",
942
+ "\n",
943
+ " elif trial >= 50:\n",
944
+ " if \"ses-0\" not in model_name:\n",
945
+ " sim_halves = fwd_sim_halves[1] if use_fwd_sim else bwd_sim_halves[1]\n",
946
+ " unique_imgs_to_plot = all_unique_images[int(len(all_unique_images)/2):]\n",
947
+ " # unique_clipvoxels_to_plot = all_unique_clipvoxels[int(len(all_unique_clipvoxels)/2):]\n",
948
+ " else:\n",
949
+ " sim_halves = all_fwd_sim if use_fwd_sim else all_bwd_sim\n",
950
+ " unique_imgs_to_plot = all_unique_images\n",
951
+ " # unique_clipvoxels_to_plot = all_unique_clipvoxels\n",
952
+ " which = np.flip(np.argsort(sim_half[trial-50]))[attempt]\n",
953
+ "\n",
954
+ " ax[trial, attempt+1].imshow(utils.torch_to_Image(unique_imgs_to_plot[which]))\n",
955
+ " ax[trial, attempt+1].set_title(f\"Top {attempt+1}\")\n",
956
+ " ax[trial, attempt+1].axis(\"off\")\n",
957
+ " fig.tight_layout()\n",
958
+ " # plt.savefig('figures/retrieval_top10')\n",
959
+ " plt.show()"
960
+ ]
961
+ },
962
+ {
963
+ "cell_type": "code",
964
+ "execution_count": 59,
965
+ "id": "05df72ce-3cdb-4ad2-a790-0835a41fb0f6",
966
+ "metadata": {
967
+ "tags": []
968
+ },
969
+ "outputs": [],
970
+ "source": [
971
+ "# similarity of each unique MST image to all others using CLIP image embeddings only (top-1 is guaranteed to be correct)\n",
972
+ "# uses last hidden layer (which may not match as well as the last layer to human semantic judgments)\n",
973
+ "if plot_all and compute_circular:\n",
974
+ " print(\"Given Brain embedding, find correct Image embedding\")\n",
975
+ " top_n = 10 # how many of the top n images to display\n",
976
+ " fig, ax = plt.subplots(nrows=len(all_unique_images), ncols=top_n+1, figsize=(top_n*2,len(all_unique_images)*2))\n",
977
+ " for trial in range(len(all_unique_images)):\n",
978
+ " ax[trial, 0].imshow(utils.torch_to_Image(all_unique_images[trial]))\n",
979
+ " ax[trial, 0].set_title(\"original\\nimage\")\n",
980
+ " ax[trial, 0].axis(\"off\")\n",
981
+ " for attempt in range(10):\n",
982
+ " if trial < 50:\n",
983
+ " if \"ses-0\" not in model_name:\n",
984
+ " sim_half_circular = fwd_sim_halves_circular[0]\n",
985
+ " unique_imgs_to_plot_circular = all_unique_images[:int(len(all_unique_images)/2)]\n",
986
+ " else:\n",
987
+ " sim_half_circular = fwd_sim_circular\n",
988
+ " unique_imgs_to_plot_circular = all_unique_images\n",
989
+ " which_circular = np.flip(np.argsort(sim_half_circular[trial]))[attempt]\n",
990
+ "\n",
991
+ " elif trial >= 50:\n",
992
+ " if \"ses-0\" not in model_name:\n",
993
+ " sim_halves_circular = fwd_sim_halves_circular[1]\n",
994
+ " unique_imgs_to_plot_circular = all_unique_images[int(len(all_unique_images)/2):]\n",
995
+ " else:\n",
996
+ " sim_halves_circular = all_fwd_sim_circular\n",
997
+ " unique_imgs_to_plot_circular = all_unique_images\n",
998
+ " which_circular = np.flip(np.argsort(sim_half_circular[trial-50]))[attempt]\n",
999
+ "\n",
1000
+ " ax[trial, attempt+1].imshow(utils.torch_to_Image(unique_imgs_to_plot_circular[which_circular]))\n",
1001
+ " ax[trial, attempt+1].set_title(f\"Top {attempt+1}\")\n",
1002
+ " ax[trial, attempt+1].axis(\"off\")\n",
1003
+ " fig.tight_layout()\n",
1004
+ " # plt.savefig('figures/circular_top10')\n",
1005
+ " plt.show()"
1006
+ ]
1007
+ },
1008
+ {
1009
+ "cell_type": "markdown",
1010
+ "id": "0d404dec-5336-45cf-8932-833e895a9ebe",
1011
+ "metadata": {
1012
+ "tags": []
1013
+ },
1014
+ "source": [
1015
+ "## MST Paired Retrieval (chance = 50%)"
1016
+ ]
1017
+ },
1018
+ {
1019
+ "cell_type": "code",
1020
+ "execution_count": 60,
1021
+ "id": "06667f7d-a356-4c30-8e1b-06ebd7ff1752",
1022
+ "metadata": {},
1023
+ "outputs": [],
1024
+ "source": [
1025
+ "if compute_circular:\n",
1026
+ " all_top_sims_circular = [] # len(fwd_sim_halves[0]); for each image, contains the similarity to the top-n choices until it gets the correct answer. If len 1, top-1 is correct\n",
1027
+ " all_pairmate_sims_circular = [] # len(fwd_sim_halves[0]); the similarity of each image to its pairmate \n",
1028
+ " all_chose_lures_circular = [] # len(fwd_sim_halves[0]); True for each top-n choice if the lure was predicted to be more similar to the target \n",
1029
+ "\n",
1030
+ " if \"ses-0\" not in model_name:\n",
1031
+ " first_half = True\n",
1032
+ " sim_halves_circular = fwd_sim_halves_circular\n",
1033
+ " sim_halves_circular = sim_halves_circular[0] if use_first_half else sim_halves_circular[1]\n",
1034
+ " else:\n",
1035
+ " sim_halves_circular = all_fwd_sim_circular\n",
1036
+ " for i, img in enumerate(sim_halves_circular):\n",
1037
+ " if i%2==0:\n",
1038
+ " idx_to_pairmate = 1\n",
1039
+ " elif i%2==1:\n",
1040
+ " idx_to_pairmate = -1\n",
1041
+ "\n",
1042
+ " order_circular = img.argsort()[::-1]\n",
1043
+ " # print(order)\n",
1044
+ " top_sim_circular = []\n",
1045
+ " chose_lure_circular = []\n",
1046
+ " for idx in order_circular:\n",
1047
+ " sim_circular = img[idx]\n",
1048
+ " pairmate_sim_circular = img[i+idx_to_pairmate]\n",
1049
+ " top_sim_circular.append(sim_circular) \n",
1050
+ " chose_lure_circular.append((idx, sim_circular <= pairmate_sim_circular))\n",
1051
+ " # print(i, idx, img[idx], img[i+idx_to_pairmate])\n",
1052
+ " if idx == i:\n",
1053
+ " break\n",
1054
+ "\n",
1055
+ " all_top_sims_circular.append(top_sim_circular)\n",
1056
+ " all_pairmate_sims_circular.append(pairmate_sim_circular)\n",
1057
+ " all_chose_lures_circular.append(chose_lure_circular)\n",
1058
+ "\n",
1059
+ " bot_half = (all_pairmate_sims_circular < np.median(all_pairmate_sims_circular))[::2] # every other one since the sims are symmetric\n",
1060
+ " top_half = (all_pairmate_sims_circular > np.median(all_pairmate_sims_circular))[::2]\n",
1061
+ "\n",
1062
+ " binary_acc = []\n",
1063
+ " for i,(a,b) in enumerate(tqdm(MST_pairmate_indices,total=len(MST_pairmate_indices))):\n",
1064
+ " # print(i,a,b)\n",
1065
+ " with torch.no_grad():\n",
1066
+ " with torch.cuda.amp.autocast():\n",
1067
+ " emb_a = nn.functional.normalize(clip_img_embedder(all_images[[a]].to(device)).float().flatten(1),dim=-1)\n",
1068
+ " emb_b = nn.functional.normalize(clip_img_embedder(all_images[[b]].to(device)).float().flatten(1),dim=-1)\n",
1069
+ " emb_v = nn.functional.normalize(all_clipvoxels[[a]].flatten(1),dim=-1).to(device)\n",
1070
+ "\n",
1071
+ " a_sim = utils.pairwise_cosine_similarity(emb_v, emb_a).item()\n",
1072
+ " b_sim = utils.pairwise_cosine_similarity(emb_v, emb_b).item()\n",
1073
+ "\n",
1074
+ " binary_acc.append(a_sim > b_sim)\n",
1075
+ "\n",
1076
+ " with torch.no_grad():\n",
1077
+ " with torch.cuda.amp.autocast():\n",
1078
+ " emb_a = nn.functional.normalize(clip_img_embedder(all_images[[a]].to(device)).float().flatten(1),dim=-1)\n",
1079
+ " emb_b = nn.functional.normalize(clip_img_embedder(all_images[[b]].to(device)).float().flatten(1),dim=-1)\n",
1080
+ " emb_v = nn.functional.normalize(all_clipvoxels[[b]].flatten(1),dim=-1).to(device)\n",
1081
+ "\n",
1082
+ " a_sim = utils.pairwise_cosine_similarity(emb_v, emb_a).item()\n",
1083
+ " b_sim = utils.pairwise_cosine_similarity(emb_v, emb_b).item()\n",
1084
+ "\n",
1085
+ " binary_acc.append(a_sim < b_sim)\n",
1086
+ "\n",
1087
+ " assert len(binary_acc) == 50\n",
1088
+ " mst_score = np.mean(binary_acc)\n",
1089
+ " print(f\"session score: {np.mean(binary_acc):.4f} ± {np.std(binary_acc):.4f}\")\n"
1090
+ ]
1091
+ },
1092
+ {
1093
+ "cell_type": "code",
1094
+ "execution_count": 61,
1095
+ "id": "3468021c-660d-4205-8e5d-d75f6f3c2881",
1096
+ "metadata": {},
1097
+ "outputs": [],
1098
+ "source": [
1099
+ "# test = np.sort(MST_pairmate_indices, axis=1)\n",
1100
+ "# test"
1101
+ ]
1102
+ },
1103
+ {
1104
+ "cell_type": "code",
1105
+ "execution_count": 62,
1106
+ "id": "a032c4cc-4cf9-4b33-ac02-0e569f7757be",
1107
+ "metadata": {},
1108
+ "outputs": [],
1109
+ "source": [
1110
+ "# model_name"
1111
+ ]
1112
+ },
1113
+ {
1114
+ "cell_type": "code",
1115
+ "execution_count": 63,
1116
+ "id": "130bb4d9-b7f1-40a4-a3e2-7e3699c69c49",
1117
+ "metadata": {},
1118
+ "outputs": [],
1119
+ "source": [
1120
+ "# paul_all_images = torch.load(f\"evals/sub-001_ses-01_bs24_MST_paul_MSTsplit/sub-001_ses-01_bs24_MST_paul_MSTsplit_all_images.pt\").to('cpu')\n",
1121
+ "# paul_all_clipvoxels = torch.load(f\"evals/sub-001_ses-01_bs24_MST_paul_MSTsplit/sub-001_ses-01_bs24_MST_paul_MSTsplit_all_clipvoxels.pt\").to('cpu')\n",
1122
+ "# paul_all_recons = torch.load(f\"evals/sub-001_ses-01_bs24_MST_paul_MSTsplit/sub-001_ses-01_bs24_MST_paul_MSTsplit_all_recons.pt\").to('cpu')\n",
1123
+ "# paul_all_prior_out = torch.load(f\"evals/sub-001_ses-01_bs24_MST_paul_MSTsplit/sub-001_ses-01_bs24_MST_paul_MSTsplit_all_prior_out.pt\").to('cpu')\n",
1124
+ "# print(paul_all_images.shape, all_images.shape)\n",
1125
+ "# # assert torch.eq(paul_all_images, all_images)"
1126
+ ]
1127
+ },
1128
+ {
1129
+ "cell_type": "code",
1130
+ "execution_count": 64,
1131
+ "id": "698ffc2c-5653-49d3-9d16-38a32f9e7f6b",
1132
+ "metadata": {
1133
+ "tags": []
1134
+ },
1135
+ "outputs": [],
1136
+ "source": [
1137
+ "# print(paul_all_images.shape, all_images.shape)\n",
1138
+ "# torch.eq(paul_all_images, all_images)"
1139
+ ]
1140
+ },
1141
+ {
1142
+ "cell_type": "code",
1143
+ "execution_count": 65,
1144
+ "id": "23f26846-3170-4c91-a8e3-f98322e90dc0",
1145
+ "metadata": {
1146
+ "tags": []
1147
+ },
1148
+ "outputs": [
1149
+ {
1150
+ "name": "stdout",
1151
+ "output_type": "stream",
1152
+ "text": [
1153
+ "assuming ses-02+ses-03 multisession model\n"
1154
+ ]
1155
+ },
1156
+ {
1157
+ "name": "stderr",
1158
+ "output_type": "stream",
1159
+ "text": [
1160
+ "100%|██████████| 15/15 [00:03<00:00, 4.28it/s]\n"
1161
+ ]
1162
+ },
1163
+ {
1164
+ "ename": "AssertionError",
1165
+ "evalue": "",
1166
+ "output_type": "error",
1167
+ "traceback": [
1168
+ "\u001b[31m---------------------------------------------------------------------------\u001b[39m",
1169
+ "\u001b[31mAssertionError\u001b[39m Traceback (most recent call last)",
1170
+ "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[65]\u001b[39m\u001b[32m, line 36\u001b[39m\n\u001b[32m 32\u001b[39m b_sim = utils.pairwise_cosine_similarity(emb_v, emb_b).item()\n\u001b[32m 34\u001b[39m binary_acc.append(a_sim < b_sim)\n\u001b[32m---> \u001b[39m\u001b[32m36\u001b[39m \u001b[38;5;28;01massert\u001b[39;00m \u001b[38;5;28mlen\u001b[39m(binary_acc) == \u001b[32m50\u001b[39m \u001b[38;5;66;03m# don't want to average across both sessions; make sure it resets\u001b[39;00m\n\u001b[32m 37\u001b[39m \u001b[38;5;28mprint\u001b[39m(\u001b[33mf\u001b[39m\u001b[33m\"\u001b[39m\u001b[33mses-0\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mhalf+\u001b[32m2\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m score: \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mnp.mean(binary_acc)\u001b[38;5;132;01m:\u001b[39;00m\u001b[33m.4f\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m ± \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mnp.std(binary_acc)\u001b[38;5;132;01m:\u001b[39;00m\u001b[33m.4f\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m\"\u001b[39m)\n\u001b[32m 38\u001b[39m mst_score.append((np.mean(binary_acc),np.std(binary_acc)))\n",
1171
+ "\u001b[31mAssertionError\u001b[39m: "
1172
+ ]
1173
+ }
1174
+ ],
1175
+ "source": [
1176
+ "all_images_resized = transforms.Resize((224,224))(all_images).float()\n",
1177
+ "if \"MST\" in model_name:\n",
1178
+ " if \"ses-0\" not in model_name:\n",
1179
+ " print('assuming ses-02+ses-03 multisession model')\n",
1180
+ " mst_score = []\n",
1181
+ " for half in range(2):\n",
1182
+ " binary_acc = []\n",
1183
+ " if half==0:\n",
1184
+ " MST_pairmate_indices_half = MST_pairmate_indices[:int(len(MST_pairmate_indices)/2)]\n",
1185
+ " elif half==1:\n",
1186
+ " MST_pairmate_indices_half = MST_pairmate_indices[int(len(MST_pairmate_indices)/2):]\n",
1187
+ " for i,(a,b) in enumerate(tqdm(MST_pairmate_indices_half,total=len(MST_pairmate_indices_half))):\n",
1188
+ " # print(i,a,b)\n",
1189
+ " with torch.no_grad():\n",
1190
+ " with torch.cuda.amp.autocast():\n",
1191
+ " emb_a = nn.functional.normalize(clip_img_embedder(all_images_resized[[a]].to(device)).float().flatten(1),dim=-1)\n",
1192
+ " emb_b = nn.functional.normalize(clip_img_embedder(all_images_resized[[b]].to(device)).float().flatten(1),dim=-1)\n",
1193
+ " emb_v = nn.functional.normalize(all_clipvoxels[[a]].flatten(1),dim=-1).to(device)\n",
1194
+ "\n",
1195
+ " a_sim = utils.pairwise_cosine_similarity(emb_v, emb_a).item()\n",
1196
+ " b_sim = utils.pairwise_cosine_similarity(emb_v, emb_b).item()\n",
1197
+ "\n",
1198
+ " binary_acc.append(a_sim > b_sim)\n",
1199
+ "\n",
1200
+ " with torch.no_grad():\n",
1201
+ " with torch.cuda.amp.autocast():\n",
1202
+ " emb_a = nn.functional.normalize(clip_img_embedder(all_images_resized[[a]].to(device)).float().flatten(1),dim=-1)\n",
1203
+ " emb_b = nn.functional.normalize(clip_img_embedder(all_images_resized[[b]].to(device)).float().flatten(1),dim=-1)\n",
1204
+ " emb_v = nn.functional.normalize(all_clipvoxels[[b]].flatten(1),dim=-1).to(device)\n",
1205
+ "\n",
1206
+ " a_sim = utils.pairwise_cosine_similarity(emb_v, emb_a).item()\n",
1207
+ " b_sim = utils.pairwise_cosine_similarity(emb_v, emb_b).item()\n",
1208
+ "\n",
1209
+ " binary_acc.append(a_sim < b_sim)\n",
1210
+ "\n",
1211
+ " assert len(binary_acc) == 50 # don't want to average across both sessions; make sure it resets\n",
1212
+ " print(f\"ses-0{half+2} score: {np.mean(binary_acc):.4f} ± {np.std(binary_acc):.4f}\")\n",
1213
+ " mst_score.append((np.mean(binary_acc),np.std(binary_acc)))\n",
1214
+ "\n",
1215
+ " # print(mst_score)\n",
1216
+ " else:\n",
1217
+ " print('assuming single session')\n",
1218
+ " binary_acc = []\n",
1219
+ " for i,(a,b) in enumerate(tqdm(MST_pairmate_indices,total=len(MST_pairmate_indices))):\n",
1220
+ " # print(i,a,b)\n",
1221
+ " with torch.no_grad():\n",
1222
+ " with torch.cuda.amp.autocast():\n",
1223
+ " emb_a = nn.functional.normalize(clip_img_embedder(all_images_resized[[a]].to(device)).float().flatten(1),dim=-1)\n",
1224
+ " emb_b = nn.functional.normalize(clip_img_embedder(all_images_resized[[b]].to(device)).float().flatten(1),dim=-1)\n",
1225
+ " emb_v = nn.functional.normalize(all_clipvoxels[[a]].flatten(1),dim=-1).to(device)\n",
1226
+ "\n",
1227
+ " a_sim = utils.pairwise_cosine_similarity(emb_v, emb_a).item()\n",
1228
+ " b_sim = utils.pairwise_cosine_similarity(emb_v, emb_b).item()\n",
1229
+ "\n",
1230
+ " binary_acc.append(a_sim > b_sim)\n",
1231
+ "\n",
1232
+ " with torch.no_grad():\n",
1233
+ " with torch.cuda.amp.autocast():\n",
1234
+ " emb_a = nn.functional.normalize(clip_img_embedder(all_images_resized[[a]].to(device)).float().flatten(1),dim=-1)\n",
1235
+ " emb_b = nn.functional.normalize(clip_img_embedder(all_images_resized[[b]].to(device)).float().flatten(1),dim=-1)\n",
1236
+ " emb_v = nn.functional.normalize(all_clipvoxels[[b]].flatten(1),dim=-1).to(device)\n",
1237
+ "\n",
1238
+ " a_sim = utils.pairwise_cosine_similarity(emb_v, emb_a).item()\n",
1239
+ " b_sim = utils.pairwise_cosine_similarity(emb_v, emb_b).item()\n",
1240
+ "\n",
1241
+ " binary_acc.append(a_sim < b_sim)\n",
1242
+ " \n",
1243
+ " # assert len(binary_acc) == 50\n",
1244
+ " mst_score = np.mean(binary_acc)\n",
1245
+ " print(f\"session score: {np.mean(binary_acc):.4f} ± {np.std(binary_acc):.4f}\")\n"
1246
+ ]
1247
+ },
1248
+ {
1249
+ "cell_type": "markdown",
1250
+ "id": "0a26e124-2444-434d-a399-d03c2c90cc08",
1251
+ "metadata": {},
1252
+ "source": [
1253
+ "## 2-way identification"
1254
+ ]
1255
+ },
1256
+ {
1257
+ "cell_type": "code",
1258
+ "execution_count": 66,
1259
+ "id": "3e1778ff-5d6a-4087-b59f-0f44b9e0eada",
1260
+ "metadata": {},
1261
+ "outputs": [],
1262
+ "source": [
1263
+ "from torchvision.models.feature_extraction import create_feature_extractor, get_graph_node_names\n",
1264
+ "\n",
1265
+ "@torch.no_grad()\n",
1266
+ "def two_way_identification(all_recons, all_images, model, preprocess, feature_layer=None, return_avg=True):\n",
1267
+ " preds = model(torch.stack([preprocess(recon) for recon in all_recons], dim=0).to(device))\n",
1268
+ " reals = model(torch.stack([preprocess(indiv) for indiv in all_images], dim=0).to(device))\n",
1269
+ " if feature_layer is None:\n",
1270
+ " preds = preds.float().flatten(1).cpu().numpy()\n",
1271
+ " reals = reals.float().flatten(1).cpu().numpy()\n",
1272
+ " else:\n",
1273
+ " preds = preds[feature_layer].float().flatten(1).cpu().numpy()\n",
1274
+ " reals = reals[feature_layer].float().flatten(1).cpu().numpy()\n",
1275
+ "\n",
1276
+ " r = np.corrcoef(reals, preds)\n",
1277
+ " r = r[:len(all_images), len(all_images):]\n",
1278
+ " congruents = np.diag(r)\n",
1279
+ "\n",
1280
+ " success = r < congruents\n",
1281
+ " success_cnt = np.sum(success, 0)\n",
1282
+ "\n",
1283
+ " if return_avg:\n",
1284
+ " perf = np.mean(success_cnt) / (len(all_images)-1)\n",
1285
+ " return perf\n",
1286
+ " else:\n",
1287
+ " return success_cnt, len(all_images)-1\n",
1288
+ " \n",
1289
+ "all_recons=all_recons.to(device)\n",
1290
+ "all_images=all_images.to(device)"
1291
+ ]
1292
+ },
1293
+ {
1294
+ "cell_type": "markdown",
1295
+ "id": "df6be966-52ef-4cf6-8078-8d2d9617564b",
1296
+ "metadata": {},
1297
+ "source": [
1298
+ "## PixCorr"
1299
+ ]
1300
+ },
1301
+ {
1302
+ "cell_type": "code",
1303
+ "execution_count": 67,
1304
+ "id": "2e17ea38-a254-4e90-a910-711734fdd8eb",
1305
+ "metadata": {},
1306
+ "outputs": [
1307
+ {
1308
+ "name": "stdout",
1309
+ "output_type": "stream",
1310
+ "text": [
1311
+ "torch.Size([248, 541875])\n",
1312
+ "torch.Size([248, 541875])\n"
1313
+ ]
1314
+ },
1315
+ {
1316
+ "name": "stderr",
1317
+ "output_type": "stream",
1318
+ "text": [
1319
+ "100%|██████████| 248/248 [00:01<00:00, 133.19it/s]"
1320
+ ]
1321
+ },
1322
+ {
1323
+ "name": "stdout",
1324
+ "output_type": "stream",
1325
+ "text": [
1326
+ "0.05185274977219404\n"
1327
+ ]
1328
+ },
1329
+ {
1330
+ "name": "stderr",
1331
+ "output_type": "stream",
1332
+ "text": [
1333
+ "\n"
1334
+ ]
1335
+ }
1336
+ ],
1337
+ "source": [
1338
+ "preprocess = transforms.Compose([\n",
1339
+ " transforms.Resize(425, interpolation=transforms.InterpolationMode.BILINEAR),\n",
1340
+ "])\n",
1341
+ "\n",
1342
+ "# Flatten images while keeping the batch dimension\n",
1343
+ "all_images_flattened = preprocess(all_images).reshape(len(all_images), -1).cpu()\n",
1344
+ "all_recons_flattened = preprocess(all_recons).view(len(all_recons), -1).cpu()\n",
1345
+ "\n",
1346
+ "print(all_images_flattened.shape)\n",
1347
+ "print(all_recons_flattened.shape)\n",
1348
+ "\n",
1349
+ "corr_stack = []\n",
1350
+ "\n",
1351
+ "corrsum = 0\n",
1352
+ "for i in tqdm(range(len(all_images))):\n",
1353
+ " corrcoef = np.corrcoef(all_images_flattened[i], all_recons_flattened[i])[0][1]\n",
1354
+ " if np.isnan(corrcoef):\n",
1355
+ " print(\"WARNING: CORRCOEF WAS NAN\")\n",
1356
+ " corrcoef = 0\n",
1357
+ " corrsum += corrcoef\n",
1358
+ " corr_stack.append(corrcoef)\n",
1359
+ "corrmean = corrsum / len(all_images)\n",
1360
+ "\n",
1361
+ "pixcorr = corrmean\n",
1362
+ "print(pixcorr)"
1363
+ ]
1364
+ },
1365
+ {
1366
+ "cell_type": "code",
1367
+ "execution_count": 68,
1368
+ "id": "7e2cd891-db44-475d-a8c6-ab345eaa58f8",
1369
+ "metadata": {},
1370
+ "outputs": [],
1371
+ "source": [
1372
+ "# print(all_images.shape)\n",
1373
+ "# print(all_images_flattened.shape)\n",
1374
+ "# print(all_recons.shape)\n",
1375
+ "# print(all_recons_flattened.shape)\n",
1376
+ "# len(all_images)"
1377
+ ]
1378
+ },
1379
+ {
1380
+ "cell_type": "markdown",
1381
+ "id": "7a556d5b-33a2-44aa-b48d-4b168316bbdd",
1382
+ "metadata": {
1383
+ "tags": []
1384
+ },
1385
+ "source": [
1386
+ "## SSIM"
1387
+ ]
1388
+ },
1389
+ {
1390
+ "cell_type": "code",
1391
+ "execution_count": 69,
1392
+ "id": "2326fc4c-1248-4d0f-9176-218c6460f285",
1393
+ "metadata": {},
1394
+ "outputs": [
1395
+ {
1396
+ "name": "stdout",
1397
+ "output_type": "stream",
1398
+ "text": [
1399
+ "converted, now calculating ssim...\n"
1400
+ ]
1401
+ },
1402
+ {
1403
+ "name": "stderr",
1404
+ "output_type": "stream",
1405
+ "text": [
1406
+ "100%|██████████| 248/248 [00:03<00:00, 69.48it/s]"
1407
+ ]
1408
+ },
1409
+ {
1410
+ "name": "stdout",
1411
+ "output_type": "stream",
1412
+ "text": [
1413
+ "0.4141594942526016\n"
1414
+ ]
1415
+ },
1416
+ {
1417
+ "name": "stderr",
1418
+ "output_type": "stream",
1419
+ "text": [
1420
+ "\n"
1421
+ ]
1422
+ }
1423
+ ],
1424
+ "source": [
1425
+ "# see https://github.com/zijin-gu/meshconv-decoding/issues/3\n",
1426
+ "from skimage.color import rgb2gray\n",
1427
+ "from skimage.metrics import structural_similarity as ssim\n",
1428
+ "\n",
1429
+ "preprocess = transforms.Compose([\n",
1430
+ " transforms.Resize(425, interpolation=transforms.InterpolationMode.BILINEAR), \n",
1431
+ "])\n",
1432
+ "\n",
1433
+ "# convert image to grayscale with rgb2grey\n",
1434
+ "img_gray = rgb2gray(preprocess(all_images).permute((0,2,3,1)).cpu())\n",
1435
+ "recon_gray = rgb2gray(preprocess(all_recons).permute((0,2,3,1)).cpu())\n",
1436
+ "print(\"converted, now calculating ssim...\")\n",
1437
+ "\n",
1438
+ "ssim_score=[]\n",
1439
+ "for im,rec in tqdm(zip(img_gray,recon_gray),total=len(all_images)):\n",
1440
+ " ssim_score.append(ssim(rec, im, multichannel=True, gaussian_weights=True, sigma=1.5, use_sample_covariance=False, data_range=1.0))\n",
1441
+ "\n",
1442
+ "ssim = np.mean(ssim_score)\n",
1443
+ "print(ssim)"
1444
+ ]
1445
+ },
1446
+ {
1447
+ "cell_type": "markdown",
1448
+ "id": "35138520-ec00-48a6-90dc-249a32a783d2",
1449
+ "metadata": {},
1450
+ "source": [
1451
+ "## AlexNet"
1452
+ ]
1453
+ },
1454
+ {
1455
+ "cell_type": "code",
1456
+ "execution_count": 70,
1457
+ "id": "3b45cc6c-ab80-43e2-b446-c8fcb4fc54e4",
1458
+ "metadata": {},
1459
+ "outputs": [
1460
+ {
1461
+ "name": "stdout",
1462
+ "output_type": "stream",
1463
+ "text": [
1464
+ "\n",
1465
+ "---early, AlexNet(2)---\n",
1466
+ "2-way Percent Correct: 0.5106\n",
1467
+ "\n",
1468
+ "---mid, AlexNet(5)---\n",
1469
+ "2-way Percent Correct: 0.4975\n"
1470
+ ]
1471
+ }
1472
+ ],
1473
+ "source": [
1474
+ "from torchvision.models import alexnet, AlexNet_Weights\n",
1475
+ "alex_weights = AlexNet_Weights.IMAGENET1K_V1\n",
1476
+ "\n",
1477
+ "alex_model = create_feature_extractor(alexnet(weights=alex_weights), return_nodes=['features.4','features.11']).to(device)\n",
1478
+ "alex_model.eval().requires_grad_(False).to(device)\n",
1479
+ "\n",
1480
+ "# see alex_weights.transforms()\n",
1481
+ "preprocess = transforms.Compose([\n",
1482
+ " transforms.Resize(256, interpolation=transforms.InterpolationMode.BILINEAR),\n",
1483
+ " transforms.Normalize(mean=[0.485, 0.456, 0.406],\n",
1484
+ " std=[0.229, 0.224, 0.225]),\n",
1485
+ "])\n",
1486
+ "\n",
1487
+ "layer = 'early, AlexNet(2)'\n",
1488
+ "print(f\"\\n---{layer}---\")\n",
1489
+ "all_per_correct = two_way_identification(all_recons, all_images, \n",
1490
+ " alex_model, preprocess, 'features.4')\n",
1491
+ "alexnet2 = np.mean(all_per_correct)\n",
1492
+ "print(f\"2-way Percent Correct: {alexnet2:.4f}\")\n",
1493
+ "\n",
1494
+ "layer = 'mid, AlexNet(5)'\n",
1495
+ "print(f\"\\n---{layer}---\")\n",
1496
+ "all_per_correct = two_way_identification(all_recons, all_images, \n",
1497
+ " alex_model, preprocess, 'features.11')\n",
1498
+ "alexnet5 = np.mean(all_per_correct)\n",
1499
+ "print(f\"2-way Percent Correct: {alexnet5:.4f}\")"
1500
+ ]
1501
+ },
1502
+ {
1503
+ "cell_type": "markdown",
1504
+ "id": "c296bab2-d106-469e-b997-b32d21a2cf01",
1505
+ "metadata": {},
1506
+ "source": [
1507
+ "## InceptionV3"
1508
+ ]
1509
+ },
1510
+ {
1511
+ "cell_type": "code",
1512
+ "execution_count": 71,
1513
+ "id": "5a9c1b2b-af2a-476d-a1ac-32ee915ac2ec",
1514
+ "metadata": {},
1515
+ "outputs": [
1516
+ {
1517
+ "name": "stderr",
1518
+ "output_type": "stream",
1519
+ "text": [
1520
+ "/home/ubuntu/rt_mindEye2/lib/python3.11/site-packages/torchvision/models/feature_extraction.py:174: UserWarning: NOTE: The nodes obtained by tracing the model in eval mode are a subsequence of those obtained in train mode. When choosing nodes for feature extraction, you may need to specify output nodes for train and eval mode separately.\n",
1521
+ " warnings.warn(msg + suggestion_msg)\n"
1522
+ ]
1523
+ },
1524
+ {
1525
+ "name": "stdout",
1526
+ "output_type": "stream",
1527
+ "text": [
1528
+ "2-way Percent Correct: 0.5077\n"
1529
+ ]
1530
+ }
1531
+ ],
1532
+ "source": [
1533
+ "from torchvision.models import inception_v3, Inception_V3_Weights\n",
1534
+ "weights = Inception_V3_Weights.DEFAULT\n",
1535
+ "inception_model = create_feature_extractor(inception_v3(weights=weights), \n",
1536
+ " return_nodes=['avgpool']).to(device)\n",
1537
+ "inception_model.eval().requires_grad_(False).to(device)\n",
1538
+ "\n",
1539
+ "# see weights.transforms()\n",
1540
+ "preprocess = transforms.Compose([\n",
1541
+ " transforms.Resize(342, interpolation=transforms.InterpolationMode.BILINEAR),\n",
1542
+ " transforms.Normalize(mean=[0.485, 0.456, 0.406],\n",
1543
+ " std=[0.229, 0.224, 0.225]),\n",
1544
+ "])\n",
1545
+ "\n",
1546
+ "all_per_correct = two_way_identification(all_recons, all_images,\n",
1547
+ " inception_model, preprocess, 'avgpool')\n",
1548
+ " \n",
1549
+ "inception = np.mean(all_per_correct)\n",
1550
+ "print(f\"2-way Percent Correct: {inception:.4f}\")"
1551
+ ]
1552
+ },
1553
+ {
1554
+ "cell_type": "markdown",
1555
+ "id": "d7a25f7f-8298-4413-b512-8a1173413e07",
1556
+ "metadata": {},
1557
+ "source": [
1558
+ "## CLIP"
1559
+ ]
1560
+ },
1561
+ {
1562
+ "cell_type": "code",
1563
+ "execution_count": 72,
1564
+ "id": "6afbf7ce-8793-4988-a328-a632acd88aa9",
1565
+ "metadata": {},
1566
+ "outputs": [
1567
+ {
1568
+ "name": "stdout",
1569
+ "output_type": "stream",
1570
+ "text": [
1571
+ "2-way Percent Correct: 0.5193\n"
1572
+ ]
1573
+ }
1574
+ ],
1575
+ "source": [
1576
+ "import clip\n",
1577
+ "clip_model, preprocess = clip.load(\"ViT-L/14\", device=device)\n",
1578
+ "\n",
1579
+ "preprocess = transforms.Compose([\n",
1580
+ " transforms.Resize(224, interpolation=transforms.InterpolationMode.BILINEAR),\n",
1581
+ " transforms.Normalize(mean=[0.48145466, 0.4578275, 0.40821073],\n",
1582
+ " std=[0.26862954, 0.26130258, 0.27577711]),\n",
1583
+ "])\n",
1584
+ "\n",
1585
+ "all_per_correct = two_way_identification(all_recons, all_images,\n",
1586
+ " clip_model.encode_image, preprocess, None) # final layer\n",
1587
+ "clip_ = np.mean(all_per_correct)\n",
1588
+ "print(f\"2-way Percent Correct: {clip_:.4f}\")"
1589
+ ]
1590
+ },
1591
+ {
1592
+ "cell_type": "markdown",
1593
+ "id": "e4fed9f8-ef1a-4c6d-a83f-2a934b6e87fd",
1594
+ "metadata": {},
1595
+ "source": [
1596
+ "## Efficient Net"
1597
+ ]
1598
+ },
1599
+ {
1600
+ "cell_type": "code",
1601
+ "execution_count": 73,
1602
+ "id": "14143c0f-1b32-43ef-98d8-8ed458df4551",
1603
+ "metadata": {},
1604
+ "outputs": [
1605
+ {
1606
+ "name": "stdout",
1607
+ "output_type": "stream",
1608
+ "text": [
1609
+ "Distance: 0.9543885725097773\n"
1610
+ ]
1611
+ }
1612
+ ],
1613
+ "source": [
1614
+ "import scipy as sp\n",
1615
+ "from torchvision.models import efficientnet_b1, EfficientNet_B1_Weights\n",
1616
+ "weights = EfficientNet_B1_Weights.DEFAULT\n",
1617
+ "eff_model = create_feature_extractor(efficientnet_b1(weights=weights), \n",
1618
+ " return_nodes=['avgpool'])\n",
1619
+ "eff_model.eval().requires_grad_(False).to(device)\n",
1620
+ "\n",
1621
+ "# see weights.transforms()\n",
1622
+ "preprocess = transforms.Compose([\n",
1623
+ " transforms.Resize(255, interpolation=transforms.InterpolationMode.BILINEAR),\n",
1624
+ " transforms.Normalize(mean=[0.485, 0.456, 0.406],\n",
1625
+ " std=[0.229, 0.224, 0.225]),\n",
1626
+ "])\n",
1627
+ "\n",
1628
+ "gt = eff_model(preprocess(all_images))['avgpool']\n",
1629
+ "gt = gt.reshape(len(gt),-1).cpu().numpy()\n",
1630
+ "fake = eff_model(preprocess(all_recons))['avgpool']\n",
1631
+ "fake = fake.reshape(len(fake),-1).cpu().numpy()\n",
1632
+ "\n",
1633
+ "effnet_nomean = np.array([sp.spatial.distance.correlation(gt[i],fake[i]) for i in range(len(gt))])\n",
1634
+ "effnet = effnet_nomean.mean()\n",
1635
+ "print(\"Distance:\",effnet)"
1636
+ ]
1637
+ },
1638
+ {
1639
+ "cell_type": "markdown",
1640
+ "id": "405f669d-cab7-4c75-90cd-651283f65a9e",
1641
+ "metadata": {},
1642
+ "source": [
1643
+ "## SwAV"
1644
+ ]
1645
+ },
1646
+ {
1647
+ "cell_type": "code",
1648
+ "execution_count": 74,
1649
+ "id": "4c60b0c4-79fe-4cff-95e9-99733c821e67",
1650
+ "metadata": {},
1651
+ "outputs": [
1652
+ {
1653
+ "name": "stderr",
1654
+ "output_type": "stream",
1655
+ "text": [
1656
+ "Using cache found in /home/ubuntu/.cache/torch/hub/facebookresearch_swav_main\n",
1657
+ "/home/ubuntu/rt_mindEye2/lib/python3.11/site-packages/torchvision/models/_utils.py:208: UserWarning: The parameter 'pretrained' is deprecated since 0.13 and may be removed in the future, please use 'weights' instead.\n",
1658
+ " warnings.warn(\n",
1659
+ "/home/ubuntu/rt_mindEye2/lib/python3.11/site-packages/torchvision/models/_utils.py:223: UserWarning: Arguments other than a weight enum or `None` for 'weights' are deprecated since 0.13 and may be removed in the future. The current behavior is equivalent to passing `weights=None`.\n",
1660
+ " warnings.warn(msg)\n"
1661
+ ]
1662
+ },
1663
+ {
1664
+ "name": "stdout",
1665
+ "output_type": "stream",
1666
+ "text": [
1667
+ "Distance: 0.6520650061534471\n"
1668
+ ]
1669
+ }
1670
+ ],
1671
+ "source": [
1672
+ "swav_model = torch.hub.load('facebookresearch/swav:main', 'resnet50')\n",
1673
+ "swav_model = create_feature_extractor(swav_model, \n",
1674
+ " return_nodes=['avgpool'])\n",
1675
+ "swav_model.eval().requires_grad_(False).to(device)\n",
1676
+ "\n",
1677
+ "preprocess = transforms.Compose([\n",
1678
+ " transforms.Resize(224, interpolation=transforms.InterpolationMode.BILINEAR),\n",
1679
+ " transforms.Normalize(mean=[0.485, 0.456, 0.406],\n",
1680
+ " std=[0.229, 0.224, 0.225]),\n",
1681
+ "])\n",
1682
+ "\n",
1683
+ "gt = swav_model(preprocess(all_images))['avgpool']\n",
1684
+ "gt = gt.reshape(len(gt),-1).cpu().numpy()\n",
1685
+ "fake = swav_model(preprocess(all_recons))['avgpool']\n",
1686
+ "fake = fake.reshape(len(fake),-1).cpu().numpy()\n",
1687
+ "\n",
1688
+ "swav_nomean = np.array([sp.spatial.distance.correlation(gt[i],fake[i]) for i in range(len(gt))])\n",
1689
+ "swav = swav_nomean.mean()\n",
1690
+ "print(\"Distance:\",swav)"
1691
+ ]
1692
+ },
1693
+ {
1694
+ "cell_type": "code",
1695
+ "execution_count": 75,
1696
+ "id": "8ebeeff0-49ab-4bf5-9e12-dd45eb7ff71f",
1697
+ "metadata": {
1698
+ "tags": []
1699
+ },
1700
+ "outputs": [
1701
+ {
1702
+ "name": "stdout",
1703
+ "output_type": "stream",
1704
+ "text": [
1705
+ " Metric Value\n",
1706
+ " alexnet2 0.510644\n",
1707
+ " alexnet5 0.497519\n",
1708
+ " inception 0.507689\n",
1709
+ " clip_ 0.519329\n",
1710
+ " effnet 0.954389\n",
1711
+ " swav 0.652065\n",
1712
+ " pixcorr 0.051853\n",
1713
+ " ssim 0.414159\n",
1714
+ "percent_correct_fwd 0.048387\n",
1715
+ "percent_correct_bwd 0.016129\n",
1716
+ " mst_score []\n",
1717
+ "Saved final evals!\n"
1718
+ ]
1719
+ }
1720
+ ],
1721
+ "source": [
1722
+ "import pandas as pd\n",
1723
+ "\n",
1724
+ "# Define metric names\n",
1725
+ "metric_names = [\n",
1726
+ " \"alexnet2\", \"alexnet5\", \"inception\", \"clip_\", \"effnet\", \"swav\", \"pixcorr\", \"ssim\",\n",
1727
+ " \"percent_correct_fwd\", \"percent_correct_bwd\", \"mst_score\"\n",
1728
+ "]\n",
1729
+ "\n",
1730
+ "# Define values depending on session\n",
1731
+ "if \"ses-0\" not in model_name and False:\n",
1732
+ " values = [alexnet2, alexnet5, inception, clip_, effnet, swav, pixcorr, ssim, fwd_acc, bwd_acc, mst_score]\n",
1733
+ "else:\n",
1734
+ " values = [alexnet2, alexnet5, inception, clip_, effnet, swav, pixcorr, ssim, all_fwd_acc[0], all_bwd_acc[0], mst_score]\n",
1735
+ "\n",
1736
+ "# Create the DataFrame\n",
1737
+ "df = pd.DataFrame({\n",
1738
+ " \"Metric\": metric_names,\n",
1739
+ " \"Value\": values\n",
1740
+ "})\n",
1741
+ "\n",
1742
+ "# Optional formatting\n",
1743
+ "# pd.options.display.float_format = '{:.2%}'.format\n",
1744
+ "\n",
1745
+ "# Print the DataFrame nicely\n",
1746
+ "print(df.to_string(index=False))\n",
1747
+ "\n",
1748
+ "# Save to file if needed\n",
1749
+ "final_evals_path = f\"{eval_dir}/final_evals\"\n",
1750
+ "if saving:\n",
1751
+ " df.to_csv(final_evals_path, index=False)\n",
1752
+ " print('Saved final evals!')\n"
1753
+ ]
1754
+ },
1755
+ {
1756
+ "cell_type": "code",
1757
+ "execution_count": 76,
1758
+ "id": "8dd57b7a",
1759
+ "metadata": {},
1760
+ "outputs": [
1761
+ {
1762
+ "data": {
1763
+ "text/plain": [
1764
+ "torch.Size([62, 1, 1024])"
1765
+ ]
1766
+ },
1767
+ "execution_count": 76,
1768
+ "metadata": {},
1769
+ "output_type": "execute_result"
1770
+ }
1771
+ ],
1772
+ "source": [
1773
+ "all_unique_clipvoxels.shape"
1774
+ ]
1775
+ },
1776
+ {
1777
+ "cell_type": "code",
1778
+ "execution_count": 77,
1779
+ "id": "0446fb2a-fd3f-451f-b9b2-38aa95a2be8d",
1780
+ "metadata": {},
1781
+ "outputs": [
1782
+ {
1783
+ "name": "stdout",
1784
+ "output_type": "stream",
1785
+ "text": [
1786
+ "Metric,Value\n",
1787
+ "alexnet2,0.5106438552958077\n",
1788
+ "alexnet5,0.49751861042183626\n",
1789
+ "inception,0.5076890427060207\n",
1790
+ "clip_,0.519328718819381\n",
1791
+ "effnet,0.9543885725097773\n",
1792
+ "swav,0.6520650061534471\n",
1793
+ "pixcorr,0.05185274977219404\n",
1794
+ "ssim,0.4141594942526016\n",
1795
+ "percent_correct_fwd,0.04838709533214569\n",
1796
+ "percent_correct_bwd,0.016129031777381897\n",
1797
+ "mst_score,[]\n"
1798
+ ]
1799
+ }
1800
+ ],
1801
+ "source": [
1802
+ "with open(final_evals_path, 'r') as f:\n",
1803
+ " for line in f:\n",
1804
+ " print(line, end='')\n"
1805
+ ]
1806
+ },
1807
+ {
1808
+ "cell_type": "code",
1809
+ "execution_count": null,
1810
+ "id": "fd74e27c-6d79-4d49-bcc9-f6b85fadc752",
1811
+ "metadata": {},
1812
+ "outputs": [],
1813
+ "source": []
1814
+ }
1815
+ ],
1816
+ "metadata": {
1817
+ "kernelspec": {
1818
+ "display_name": "rt_mindEye2",
1819
+ "language": "python",
1820
+ "name": "python3"
1821
+ },
1822
+ "language_info": {
1823
+ "codemirror_mode": {
1824
+ "name": "ipython",
1825
+ "version": 3
1826
+ },
1827
+ "file_extension": ".py",
1828
+ "mimetype": "text/x-python",
1829
+ "name": "python",
1830
+ "nbconvert_exporter": "python",
1831
+ "pygments_lexer": "ipython3",
1832
+ "version": "3.11.13"
1833
+ }
1834
+ },
1835
+ "nbformat": 4,
1836
+ "nbformat_minor": 5
1837
+ }
main-finetune-rt-preproc.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
main-finetune-rt-preproc.py ADDED
@@ -0,0 +1,2260 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding: utf-8
3
+
4
+ # #### This notebook fine-tunes MindEye on training sessions using real-time compatible preprocessing to produce a checkpoint that can be used in the real-time scan
5
+
6
+ # # Import packages & functions
7
+
8
+ # In[1]:
9
+
10
+
11
+ print("importing modules")
12
+ import os
13
+ import sys
14
+ import json
15
+ import argparse
16
+ import numpy as np
17
+ import time
18
+ import random
19
+ import string
20
+ import h5py
21
+ from tqdm import tqdm
22
+ import webdataset as wds
23
+ from PIL import Image
24
+ import pandas as pd
25
+ import nibabel as nib
26
+ import nilearn
27
+
28
+ import matplotlib.pyplot as plt
29
+ import torch
30
+ import torch.nn as nn
31
+ from torchvision import transforms
32
+
33
+ # tf32 data type is faster than standard float32
34
+ torch.backends.cuda.matmul.allow_tf32 = True
35
+
36
+ import utils
37
+ from utils import load_preprocess_betas, resample, applyxfm, apply_thresh, resample_betas
38
+
39
+ # imports utils from mindeye_preproc as "preproc"
40
+ import importlib.util
41
+ parent_utils_path = "/home/ri4541/mindeye_preproc/analysis/utils.py"
42
+ spec = importlib.util.spec_from_file_location("utils", parent_utils_path)
43
+ preproc = importlib.util.module_from_spec(spec)
44
+ parent_dir = os.path.dirname(parent_utils_path)
45
+ if parent_dir not in sys.path:
46
+ sys.path.append(parent_dir)
47
+ spec.loader.exec_module(preproc)
48
+
49
+ if utils.is_interactive():
50
+ from IPython.display import clear_output # function to clear print outputs in cell
51
+ get_ipython().run_line_magic('load_ext', 'autoreload')
52
+ # this allows you to change functions in models.py or utils.py and have this notebook automatically update with your revisions
53
+ get_ipython().run_line_magic('autoreload', '2')
54
+
55
+ seed = utils.get_slurm_seed()
56
+
57
+
58
+ # # Princeton data prep
59
+
60
+ # ## Load Data & Design
61
+
62
+ # In[2]:
63
+
64
+
65
+ if utils.is_interactive():
66
+ sub = "sub-005"
67
+ session = "all"
68
+ task = 'C' # 'study' or 'A'; used to search for functional run in bids format
69
+ func_task_name = 'C'
70
+ else:
71
+ sub = os.environ["SUB"]
72
+ session = os.environ["SESSION"]
73
+ task = os.environ["TASK"]
74
+ func_task_name = 'C'
75
+
76
+ if session == "all":
77
+ ses_list = ["ses-01", "ses-02"] # list of actual session IDs
78
+ design_ses_list = ["ses-01", "ses-02"] # list of session IDs to search for design matrix
79
+ else:
80
+ ses_list = [session]
81
+ design_ses_list = [session]
82
+
83
+ task_name = f"_task-{task}" if task != 'study' else ''
84
+ resample_voxel_size = False
85
+ resample_post_glmsingle = False # do you want to do voxel resampling here? if resample_voxel_size = True and resample_post_glmsingle = False, assume the resampling has been done prior to GLMsingle, so just use resampled directory but otherwise proceed as normal
86
+ load_from_resampled_file = False # do you want to load resampled data from file? if True, assume resampling was done in this notebook before, and that we're not using the GLMsingle resampled data
87
+
88
+ train_test_split = 'MST' # 'MST', 'orig', 'unique'
89
+ remove_close_to_MST = False
90
+ remove_random_n = False
91
+
92
+ if remove_close_to_MST or remove_random_n:
93
+ assert remove_close_to_MST != remove_random_n # don't remove both sets of images
94
+
95
+ n_to_remove = 0
96
+ if remove_random_n:
97
+ assert train_test_split == 'MST' # MST images are excluded from the n images removed, so only makes sense if they're not in the training set
98
+ n_to_remove = 150
99
+
100
+ if resample_voxel_size:
101
+ # voxel size was unchanged in glmsingle, want to perform resampling here
102
+ resampled_vox_size = 2.5
103
+ resample_method = "sinc" # {trilinear,nearestneighbour,sinc,spline}, credit: https://johnmuschelli.com/fslr/reference/flirt.help.html
104
+
105
+ # file name helper variables
106
+ vox_dim_str = str(resampled_vox_size).replace('.', '_') # in case the voxel size has a decimal, replace with an underscore
107
+ resampled_suffix = f"resampled_{vox_dim_str}mm_{resample_method}"
108
+ mask_resampled_suffix = resampled_suffix
109
+ if resample_post_glmsingle:
110
+ resampled_suffix += '_postglmsingle'
111
+ else:
112
+ resampled_suffix += '_preglmsingle'
113
+
114
+
115
+ # In[3]:
116
+
117
+
118
+ session_label = preproc.get_session_label(ses_list)
119
+ print('session label:', session_label)
120
+ n_runs, _ = preproc.get_runs_per_session(sub, session, ses_list)
121
+
122
+
123
+ # In[4]:
124
+
125
+
126
+ if utils.is_interactive():
127
+ glmsingle_path = f"/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/glmsingle_{sub}_{session_label}_task-{task}"
128
+ else:
129
+ glmsingle_path = os.environ["glmsingle_path"]
130
+
131
+ designdir = "/home/ri4541/real_time_mindEye2"
132
+ print(glmsingle_path)
133
+
134
+ if resample_voxel_size:
135
+ # option 1: we are using original (non-resampled) GLMsingle outputs and doing the resampling here
136
+ # option 2: doing resampling pre-GLMsingle and using those outputs; no resampling involved here
137
+ if resample_post_glmsingle:
138
+ # option 1
139
+ orig_glmsingle_path = glmsingle_path
140
+ glmsingle_path += f"_{resampled_suffix}"
141
+ print("resampled glmsingle path:", glmsingle_path)
142
+ if load_from_resampled_file:
143
+ # resampling is already done; load from file
144
+ assert os.path.exists(glmsingle_path) # the new directory must have been created if we reached here
145
+ else:
146
+ # don't load from file; do resampling here
147
+ os.makedirs(glmsingle_path,exist_ok=True)
148
+ else:
149
+ # option 2
150
+ glmsingle_path += f"_{resampled_suffix}"
151
+ print("glmsingle path:", glmsingle_path)
152
+
153
+ assert os.path.exists(glmsingle_path)
154
+ print("glmsingle path exists!")
155
+
156
+
157
+ # In[5]:
158
+
159
+
160
+ data, starts, images, is_new_run, image_names, unique_images, len_unique_images = preproc.load_design_files(
161
+ sub=sub,
162
+ session=session,
163
+ func_task_name=task,
164
+ designdir=designdir,
165
+ design_ses_list=design_ses_list
166
+ )
167
+
168
+ if sub == 'sub-001':
169
+ if session == 'ses-01':
170
+ assert image_names[0] == 'images/image_686_seed_1.png'
171
+ elif session in ('ses-02', 'all'):
172
+ assert image_names[0] == 'all_stimuli/special515/special_40840.jpg'
173
+ elif session == 'ses-03':
174
+ assert image_names[0] == 'all_stimuli/special515/special_69839.jpg'
175
+ elif session == 'ses-04':
176
+ assert image_names[0] == 'all_stimuli/rtmindeye_stimuli/image_686_seed_1.png'
177
+ elif sub == 'sub-003':
178
+ assert image_names[0] == 'all_stimuli/rtmindeye_stimuli/image_686_seed_1.png'
179
+
180
+ unique_images = np.unique(image_names.astype(str))
181
+ unique_images = unique_images[(unique_images!="nan")]
182
+ len_unique_images = len(unique_images)
183
+ print("n_runs",n_runs)
184
+
185
+ if (sub == 'sub-001' and session == 'ses-04') or (sub == 'sub-003' and session == 'ses-01'):
186
+ assert len(unique_images) == 851
187
+
188
+ print(image_names[:4])
189
+ print(starts[:4])
190
+ print(is_new_run[:4])
191
+
192
+ if remove_random_n:
193
+ # want to remove 150 imgs
194
+ # 100 special515 imgs are repeated 3x (300 total)
195
+ # all other train imgs are only shown once (558 total)
196
+ # of the 150, want to sample proportionally since we're cutting all repeats for special515
197
+ # so take out 51 (17 unique) from special515 and 99 from rest = removing 150 total
198
+ np.random.seed(seed)
199
+ options_to_remove = [x for x in set(image_names) if str(x) != 'nan' and x != 'blank.jpg' and 'MST_pairs' not in x and 'special515' not in x and list(image_names).count(x)==1] # all the imgs that only appear once (this is O(N^2) b/c of count() within list comprehension but image_names is a relatively small list)
200
+ options_to_remove_special515 = [x for x in set(image_names) if str(x) != 'nan' and x != 'blank.jpg' and 'MST_pairs' not in x and 'special515' in x and list(image_names).count(x)>1] # all the special515 images that are repeated (count()>1 necessary because there are special515 that are not repeated)
201
+ imgs_to_remove = np.random.choice(options_to_remove, size=99, replace=False)
202
+ imgs_to_remove = np.append(imgs_to_remove, np.random.choice(options_to_remove_special515, size=17, replace=False))
203
+
204
+ image_idx = np.array([]) # contains the unique index of each presented image
205
+ vox_image_names = np.array([]) # contains the names of the images corresponding to image_idx
206
+ all_MST_images = dict()
207
+ for i, im in enumerate(image_names):
208
+ # skip if blank, nan
209
+ if im == "blank.jpg":
210
+ i+=1
211
+ continue
212
+ if str(im) == "nan":
213
+ i+=1
214
+ continue
215
+ vox_image_names = np.append(vox_image_names, im)
216
+ if remove_close_to_MST: # optionally skip close_to_MST images
217
+ if "closest_pairs" in im:
218
+ i+=1
219
+ continue
220
+ elif remove_random_n:
221
+ if im in imgs_to_remove:
222
+ i+=1
223
+ continue
224
+
225
+ image_idx_ = np.where(im==unique_images)[0].item()
226
+ image_idx = np.append(image_idx, image_idx_)
227
+
228
+ if (sub == 'sub-001' and session == 'ses-04') or (sub == 'sub-003' and session == 'ses-01'): # MST images are ones that matched these image titles
229
+ import re
230
+ if ('w_' in im or 'paired_image_' in im or re.match(r'all_stimuli/rtmindeye_stimuli/\d{1,2}_\d{1,3}\.png$', im) or re.match(r'images/\d{1,2}_\d{1,3}\.png$', im)):
231
+ # the regexp here looks for **_***.png, allows 1-2 chars before underscore and 1-3 chars after it
232
+ # print(im)
233
+ all_MST_images[i] = im
234
+ i+=1
235
+ elif 'MST' in im:
236
+ all_MST_images[i] = im
237
+ i+=1
238
+
239
+ image_idx = torch.Tensor(image_idx).long()
240
+ # for im in new_image_names[MST_images]:
241
+ # assert 'MST_pairs' in im
242
+ # assert len(all_MST_images) == 300
243
+
244
+ unique_MST_images = np.unique(list(all_MST_images.values()))
245
+
246
+ MST_ID = np.array([], dtype=int)
247
+ if remove_close_to_MST:
248
+ close_to_MST_idx = np.array([], dtype=int)
249
+ if remove_random_n:
250
+ random_n_idx = np.array([], dtype=int)
251
+
252
+ vox_idx = np.array([], dtype=int)
253
+ j=0 # this is a counter keeping track of the remove_random_n used later to index vox based on the removed images; unused otherwise
254
+ for i, im in enumerate(image_names): # need unique_MST_images to be defined, so repeating the same loop structure
255
+ # skip if blank, nan
256
+ if im == "blank.jpg":
257
+ i+=1
258
+ continue
259
+ if str(im) == "nan":
260
+ i+=1
261
+ continue
262
+ if remove_close_to_MST: # optionally skip close_to_MST images
263
+ if "closest_pairs" in im:
264
+ close_to_MST_idx = np.append(close_to_MST_idx, i)
265
+ i+=1
266
+ continue
267
+ if remove_random_n:
268
+ if im in imgs_to_remove:
269
+ vox_idx = np.append(vox_idx, j)
270
+ i+=1
271
+ j+=1
272
+ continue
273
+ j+=1
274
+ curr = np.where(im == unique_MST_images)
275
+ # print(curr)
276
+ if curr[0].size == 0:
277
+ MST_ID = np.append(MST_ID, np.array(len(unique_MST_images))) # add a value that should be out of range based on the for loop, will index it out later
278
+ else:
279
+ MST_ID = np.append(MST_ID, curr)
280
+
281
+ assert len(MST_ID) == len(image_idx)
282
+ # assert len(np.argwhere(pd.isna(data['current_image']))) + len(np.argwhere(data['current_image'] == 'blank.jpg')) + len(image_idx) == len(data)
283
+ # MST_ID = torch.tensor(MST_ID[MST_ID != len(unique_MST_images)], dtype=torch.uint8) # torch.tensor (lowercase) allows dtype kwarg, Tensor (uppercase) is an alias for torch.FloatTensor
284
+ print(MST_ID.shape)
285
+ if (sub == 'sub-001' and session == 'ses-04') or (sub == 'sub-003' and session == 'ses-01'):
286
+ assert len(all_MST_images) == 100
287
+
288
+
289
+ # ## Load images
290
+
291
+ # In[6]:
292
+
293
+
294
+ import imageio.v2 as imageio
295
+ resize_transform = transforms.Resize((224, 224))
296
+ MST_images = []
297
+ images = None
298
+ for im_name in tqdm(image_idx):
299
+ if sub == 'sub-001' and session == 'ses-01':
300
+ image_file = f"all_stimuli/rtmindeye_stimuli/{unique_images[im_name]}"
301
+ else:
302
+ image_file = f"{unique_images[im_name]}"
303
+ im = imageio.imread(image_file)
304
+ im = torch.Tensor(im / 255).permute(2,0,1)
305
+ im = resize_transform(im.unsqueeze(0))
306
+ if images is None:
307
+ images = im
308
+ else:
309
+ images = torch.vstack((images, im))
310
+ if (sub == 'sub-001' and session == 'ses-04') or (sub == 'sub-003' and session == 'ses-01'):
311
+ if ('w_' in image_file or 'paired_image_' in image_file or re.match(r'all_stimuli/rtmindeye_stimuli/\d{1,2}_\d{1,3}\.png$', image_file) or re.match(r'all_stimuli/rtmindeye_stimuli/images/\d{1,2}_\d{1,3}\.png$', image_file)):
312
+ MST_images.append(True)
313
+ else:
314
+ MST_images.append(False)
315
+ else:
316
+ if ("MST_pairs" in image_file): # ("_seed_" not in unique_images[im_name]) and (unique_images[im_name] != "blank.jpg")
317
+ MST_images.append(True)
318
+ else:
319
+ MST_images.append(False)
320
+
321
+ print("images", images.shape)
322
+ MST_images = np.array(MST_images)
323
+ print("MST_images", len(MST_images))
324
+ if (sub == 'sub-001' and session == 'ses-04') or (sub == 'sub-003' and session == 'ses-01'):
325
+ assert len(MST_images[MST_images==True]) == 100
326
+ print("MST_images==True", len(MST_images[MST_images==True]))
327
+
328
+
329
+ # In[7]:
330
+
331
+
332
+ # want IDs of pairmates based on MST_images
333
+ # create "MST_pairmates" which is a 25x2 array with indices of the 25 pairs based on MST_images == True
334
+
335
+ assert unique_MST_images.shape[0] % 2 == 0 # make sure it's divisible by 2
336
+ MST_pairmate_names = unique_MST_images.reshape(int(unique_MST_images.shape[0]/2),2)
337
+ # print(MST_pairmate_names)
338
+
339
+ MST_pairmate_indices = np.empty(shape=MST_pairmate_names.shape, dtype=int)
340
+ for p, pair in enumerate(MST_pairmate_names):
341
+ for i, im in enumerate(pair):
342
+ MST_pairmate_indices[p][i] = np.where(np.isin(list(all_MST_images.values()), im))[0][0] # just take the first repeated instance of an image
343
+
344
+ print(MST_pairmate_indices.shape, MST_pairmate_indices)
345
+
346
+
347
+ # In[8]:
348
+
349
+
350
+ if (sub == 'sub-001' and session in ('ses-02', 'ses-03', 'all')):
351
+ # MST_pairs contains the indices of repeats based on all_MST_images
352
+ # all_MST_images contains the indices of images from image_names
353
+ MST_pairs = utils.find_paired_indices(torch.tensor(MST_ID))
354
+ MST_pairs = np.array(sorted(MST_pairs[:-1], key=lambda x: x[0])) # we added a fake value as a placeholder so index out the last group of pairs
355
+
356
+ # assert images[MST_pairs]
357
+
358
+ fig, ax = plt.subplots(1, 3, figsize=(10,4))
359
+ fig.suptitle('Sample MST pairs')
360
+
361
+ ax[0].imshow(images[MST_pairs[-1][0]].permute(1,2,0).numpy())
362
+ ax[0].set_title(f"Trial 0")
363
+
364
+ ax[1].imshow(images[MST_pairs[-1][1]].permute(1,2,0).numpy())
365
+ ax[1].set_title(f"Trial 1")
366
+
367
+ ax[2].imshow(images[MST_pairs[-1][2]].permute(1,2,0).numpy())
368
+ ax[2].set_title(f"Trial 2")
369
+
370
+ plt.setp(ax, xticks=[], yticks=[])
371
+ plt.tight_layout()
372
+ plt.show()
373
+
374
+
375
+ # In[9]:
376
+
377
+
378
+ # pairs has the indices of all repeated images
379
+ pairs = utils.find_paired_indices(image_idx)
380
+ pairs = sorted(pairs, key=lambda x: x[0])
381
+
382
+ fig, axes = plt.subplots(1, 3, figsize=(6, 2)) # 1 row, 3 columns
383
+ for i, ax in enumerate(axes):
384
+ ax.imshow(images[i].permute(1, 2, 0).numpy())
385
+ ax.set_title(f"Trial {i}")
386
+ ax.axis("off") # Hide axes for better visualization
387
+
388
+ plt.tight_layout()
389
+ # output_path = os.path.join(output_dir, "trials_plot.png")
390
+ # plt.savefig(output_path, dpi=300) # Save figure
391
+ plt.show()
392
+
393
+
394
+ # In[10]:
395
+
396
+
397
+ p=0
398
+
399
+ # plot 2 repeats (anything in pairs should have 2 repeats, even if there's more)
400
+ fig, ax = plt.subplots(1, 2, figsize=(10,8))
401
+
402
+ ax[0].imshow(images[pairs[p][0]].permute(1,2,0).numpy())
403
+ ax[0].set_title(f"Repeat 1")
404
+
405
+ ax[1].imshow(images[pairs[p][1]].permute(1,2,0).numpy())
406
+ ax[1].set_title(f"Repeat 2")
407
+
408
+ plt.setp(ax, xticks=[], yticks=[])
409
+ plt.tight_layout()
410
+ plt.show()
411
+
412
+
413
+ # In[11]:
414
+
415
+
416
+ def get_image_pairs(sub, session, func_task_name, designdir):
417
+ """Loads design files and processes image pairs for a given session."""
418
+ _, _, _, _, image_names, unique_images, _ = preproc.load_design_files(
419
+ sub=sub,
420
+ session=session,
421
+ func_task_name=func_task_name,
422
+ designdir=designdir,
423
+ design_ses_list=[session] # Ensure it's a list
424
+ )
425
+ return utils.process_images(image_names, unique_images)
426
+
427
+
428
+ # In[12]:
429
+
430
+
431
+ from collections import defaultdict
432
+
433
+ all_dicts = []
434
+ for s_idx, s in enumerate(ses_list):
435
+ im, vo, _ = get_image_pairs(sub, s, func_task_name, designdir)
436
+ assert len(im) == len(vo)
437
+ all_dicts.append({k:v for k,v in enumerate(vo)})
438
+
439
+ # for the train set (ses-01-02 non-MST)
440
+ image_to_indices = defaultdict(lambda: [[] for _ in range(len(ses_list))])
441
+ for ses_idx, idx_to_name in enumerate(all_dicts):
442
+ for idx, name in idx_to_name.items():
443
+ image_to_indices[name][ses_idx].append(idx)
444
+
445
+ image_to_indices = dict(image_to_indices)
446
+
447
+ # for the test set (ses-03)
448
+ # test_image_to_indices = defaultdict(lambda: [[] for _ in range(len([ses_list[-1]]))])
449
+ # for ses_idx, idx_to_name in enumerate([all_dicts[-1]]):
450
+ # for idx, name in idx_to_name.items():
451
+ # test_image_to_indices[name][ses_idx].append(idx)
452
+
453
+ # test_image_to_indices = dict(test_image_to_indices)
454
+
455
+ if sub == 'sub-005' and len(ses_list) > 1:
456
+ session_length = 693
457
+ for image, session_indices_list in image_to_indices.items():
458
+ new_indices_list = []
459
+ for idx, indices in enumerate(session_indices_list):
460
+ offset = idx * session_length
461
+ new_indices = [i + offset for i in indices]
462
+ new_indices_list.append(new_indices)
463
+ image_to_indices[image] = new_indices_list
464
+
465
+ import itertools
466
+ assert max(itertools.chain.from_iterable(list(image_to_indices.values())))[0] == (len(ses_list)*session_length) - 1
467
+
468
+
469
+ # In[13]:
470
+
471
+
472
+ if resample_voxel_size:
473
+ from nilearn.masking import apply_mask, unmask
474
+ ref_name = f'{glmsingle_path}/boldref_resampled.nii.gz'
475
+ omat_name = f'{glmsingle_path}/boldref_omat'
476
+
477
+
478
+ # In[14]:
479
+
480
+
481
+ from nilearn.plotting import plot_roi
482
+
483
+ print('loading brain mask')
484
+ avg_mask = nib.load('/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/glmsingle_sub-005_task-C/sub-005_final_brain.nii.gz')
485
+ final_mask = nib.load('/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/glmsingle_sub-005_task-C/sub-005_final_mask.nii.gz')
486
+
487
+ # mask info
488
+ dimsize=avg_mask.header.get_zooms()
489
+ affine_mat = avg_mask.affine
490
+ brain=avg_mask.get_fdata()
491
+ xyz=brain.shape #xyz dimensionality of brain mask and epi data
492
+
493
+ print('Mask dimensions:', dimsize)
494
+ print('')
495
+ print('Affine:')
496
+ print(affine_mat)
497
+ print('')
498
+ print(f'There are {int(np.sum(brain))} voxels in the included brain mask\n')
499
+
500
+ plot_roi(final_mask, bg_img=avg_mask)
501
+ plt.show()
502
+
503
+
504
+ # In[15]:
505
+
506
+
507
+ # # create union of ses-01 and ses-02 reliability masks and plot against avg_mask
508
+ # rel_masks = []
509
+ # rel_masks.append(np.load('/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/glmsingle_sub-005_task-C/rel_mask_from_ses-01_to_ses-03.npy'))
510
+ # rel_masks.append(np.load('/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/glmsingle_sub-005_task-C/rel_mask_from_ses-02_to_ses-03.npy'))
511
+ # rel_masks = np.array(rel_masks)
512
+ # for r in rel_masks:
513
+ # assert r.shape[0] == int(final_mask.get_fdata().sum())
514
+ # assert r.dtype == bool
515
+
516
+ # assert len(rel_masks) == 2 # should be the case if there's 2 training sessions
517
+ # union_mask = np.logical_or(rel_masks[0], rel_masks[1])
518
+ # assert union_mask.sum() > rel_masks[0].sum()
519
+ # assert union_mask.sum() > rel_masks[1].sum()
520
+ # print(f'there are {union_mask.sum()} reliable voxels based on the union mask out of {int(final_mask.get_fdata().sum())} voxels in the nsdgeneral roi')
521
+ # print(f'{(union_mask.sum() / int(final_mask.get_fdata().sum())):.2%} of the voxels in the roi were selected')
522
+ # path = f'/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/glmsingle_sub-005_task-C/union_mask_from_{session_label}.npy'
523
+ path = f'/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/glmsingle_sub-005_task-C/union_mask_from_ses-01-02.npy'
524
+ # np.save(f'/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/glmsingle_sub-005_task-C/union_mask_from_{session_label}.npy', union_mask)
525
+ # print(f'saved union mask to {path}!')
526
+ union_mask = np.load(path)
527
+
528
+
529
+ # ## Proceed to fine-tuning using real-time compatible betas
530
+
531
+ # In[16]:
532
+
533
+
534
+ ##
535
+
536
+
537
+ # ## load betas from ses-01 and 02 that were computed based on real-time preprocessing that is identical to that used in the real-time session
538
+
539
+ # In[17]:
540
+
541
+
542
+ ses_vox = []
543
+ runs_per_session = int(n_runs/len(ses_list))
544
+ for s in ses_list:
545
+ ses_vox_path = f"/scratch/gpfs/ri4541/MindEyeV2/src/3t/derivatives/{sub}_{s}_task-{func_task_name}_run-{runs_per_session}_recons/betas_run-{runs_per_session}.npy" # assumes each session list has the same number of runs
546
+ ses_vox.append(np.load(ses_vox_path))
547
+
548
+ vox = np.concatenate(ses_vox).squeeze(1)
549
+ print(vox.shape)
550
+ assert len(vox) == len(image_idx)
551
+
552
+
553
+ # In[18]:
554
+
555
+
556
+ ses_mask = []
557
+
558
+ for s in ses_list:
559
+ ses_mask_path = f'/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/glmsingle_sub-005_{s}_task-C/sub-005_{s}_task-C_brain.nii.gz'
560
+ ses_mask.append(nib.load(ses_mask_path))
561
+
562
+ assert np.all(ses_mask[-1].affine == final_mask.affine)
563
+ assert np.all(ses_mask[-1].shape == final_mask.shape)
564
+
565
+
566
+ # In[19]:
567
+
568
+
569
+ # # get vox into the same shape as the union mask
570
+ # v = nilearn.masking.unmask(vox, ses_mask) # move back to 3D based on own session mask
571
+ # final_mask = nilearn.masking.intersect_masks([avg_mask, roi])
572
+ # vox = nilearn.masking.apply_mask(vox, final_mask) # re-flatten based on final mask so everything is in the same shape now
573
+ # print(vox.shape)
574
+
575
+
576
+ # In[20]:
577
+
578
+
579
+ pairs_homog = np.array([[p[0], p[1]] for p in pairs])
580
+
581
+
582
+ # In[21]:
583
+
584
+
585
+ same_corrs = []
586
+ diff_corrs = []
587
+ for isamp, samp in enumerate(vox[pairs_homog]):
588
+ avg_same_img = []
589
+ for i in range(samp.shape[0]):
590
+ for j in range(i, samp.shape[0]):
591
+ if i != j:
592
+ avg_same_img.append(np.array([np.corrcoef(samp[i, :], samp[j, :])[0,1]]))
593
+
594
+ same_corrs.append(np.mean(avg_same_img))
595
+
596
+ avg_diff_img = []
597
+ for isamp_j, samp_j in enumerate(vox[pairs_homog]):
598
+ if isamp_j != isamp:
599
+ for i in range(samp_j.shape[0]):
600
+ for j in range(i, samp_j.shape[0]):
601
+ if i != j:
602
+ avg_diff_img.append(np.array([np.corrcoef(samp[i, :], samp_j[j, :])[0,1]]))
603
+
604
+ # print(len(avg_diff_img))
605
+ diff_corrs.append(np.mean(avg_diff_img))
606
+
607
+
608
+ print(len(same_corrs), len(diff_corrs))
609
+ same_corrs = np.array(same_corrs)
610
+ diff_corrs = np.array(diff_corrs)
611
+
612
+
613
+ plt.figure(figsize=(5,4))
614
+ plt.title(f"{sub}_{session} same/diff Pearson corr.")
615
+ plt.plot(np.sort(same_corrs),c='blue',label='same')
616
+ plt.plot(np.sort(diff_corrs),c='cyan',label='diff')
617
+ plt.axhline(0,c='k',ls='--')
618
+ plt.legend()
619
+ plt.xlabel("sample")
620
+ plt.ylabel("Pearson R")
621
+ plt.show()
622
+
623
+
624
+ # In[22]:
625
+
626
+
627
+ vox_pairs = utils.zscore(vox[pairs_homog])
628
+ plt.figure(figsize=(5,4))
629
+ plt.title(f"{sub}_{session} same minus diff difference Pearson corr.")
630
+ plt.plot(np.sort(same_corrs) - np.sort(diff_corrs),c='cyan',label='difference')
631
+ plt.axhline(0,c='k',ls='--')
632
+ plt.legend()
633
+ plt.xlabel("sample")
634
+ plt.ylabel("Pearson R")
635
+ plt.show()
636
+
637
+
638
+ # # Training MindEye
639
+
640
+ # In[23]:
641
+
642
+
643
+ utils.seed_everything(seed)
644
+
645
+ if train_test_split == 'orig':
646
+ # train = all images except images that were repeated
647
+ # test = average of the same-image presentations
648
+ imageTrain = np.arange(len(images))
649
+ train_image_indices = np.array([item for item in imageTrain if item not in pairs.flatten()])
650
+ test_image_indices = pairs
651
+ print(len(train_image_indices), len(test_image_indices))
652
+ assert len(train_image_indices) + len(test_image_indices) == len(image_idx)
653
+ elif train_test_split == 'MST':
654
+ # non-MST images are the train split
655
+ # MST images are the test split
656
+ MST_idx = np.array([v for k,v in image_to_indices.items() if 'MST_pairs' in k])
657
+ non_MST_idx = [v for k,v in image_to_indices.items() if 'MST_pairs' not in k]
658
+ non_MST_idx = np.array([z for y in non_MST_idx for x in y for z in x]) # flatten the indices
659
+ train_image_indices = non_MST_idx
660
+ test_image_indices = MST_idx.flatten() # MST_idx contains the mapping for the different test sets; test_image_indices has all MST indices combined
661
+ print(len(train_image_indices), len(test_image_indices))
662
+ assert len(train_image_indices) + len(test_image_indices) == len(vox)
663
+ elif train_test_split == 'unique':
664
+ imageTest = np.arange(len(images))
665
+ train_image_indices = pairs.flatten()
666
+ test_image_indices = np.array([item for item in imageTest if item not in pairs.flatten()])
667
+ print(len(train_image_indices), len(test_image_indices))
668
+ assert len(train_image_indices) + len(test_image_indices) == len(image_idx)
669
+ else:
670
+ raise Exception("invalid train_test_split")
671
+
672
+ # TODO add assertion that verifies file names in train and test don't overlap, guards against repeats
673
+
674
+ for i in train_image_indices:
675
+ assert i not in test_image_indices
676
+
677
+
678
+ # In[24]:
679
+
680
+
681
+ ses_split = vox[train_image_indices].shape[0] // 2
682
+
683
+ train_mean_s1 = np.mean(vox[train_image_indices][:ses_split], axis=0)
684
+ train_std_s1 = np.std(vox[train_image_indices][:ses_split], axis=0)
685
+ train_mean_s2 = np.mean(vox[train_image_indices][ses_split:], axis=0)
686
+ train_std_s2 = np.std(vox[train_image_indices][ses_split:], axis=0)
687
+
688
+
689
+ vox[:ses_split] = utils.zscore(vox[:ses_split],train_mean=train_mean_s1,train_std=train_std_s1)
690
+ vox[ses_split:] = utils.zscore(vox[ses_split:],train_mean=train_mean_s2,train_std=train_std_s2)
691
+
692
+ print("voxels have been zscored")
693
+ print("ses-01:", vox[:ses_split,0].mean(), vox[:ses_split,0].std())
694
+ print("ses-02:", vox[ses_split:,0].mean(), vox[ses_split:,0].std())
695
+ print("vox", vox.shape)
696
+
697
+
698
+ # In[25]:
699
+
700
+
701
+ # save the mean and std from ses-01 and 02
702
+ train_test_mean_s1 = np.mean(vox[:ses_split], axis=0)
703
+ train_test_std_s1 = np.std(vox[:ses_split], axis=0)
704
+ train_test_mean_s2 = np.mean(vox[ses_split:], axis=0)
705
+ train_test_std_s2 = np.std(vox[ses_split:], axis=0)
706
+ print(train_test_mean_s1.shape)
707
+ assert np.all(train_test_mean_s1.shape == train_test_std_s1.shape)
708
+ assert np.all(train_test_mean_s1.shape == train_test_mean_s2.shape)
709
+ assert np.all(train_test_mean_s1.shape == train_test_std_s2.shape)
710
+
711
+
712
+ # In[26]:
713
+
714
+
715
+ # for idx in deleted_indices:
716
+ # # check image names to be deleted match
717
+ # original_name = vox_image_dict[idx]
718
+ # matching_indices = [i for i in deleted_indices if vox_image_dict[i] == original_name]
719
+ # assert all(vox_image_dict[i] == original_name for i in matching_indices), \
720
+ # f"Mismatch in image names for deleted indices {matching_indices}"
721
+
722
+ # # check image data to be deleted match
723
+ # base_image = images[matching_indices[0]] # Reference image
724
+ # for i in matching_indices[1:]:
725
+ # assert np.array_equal(base_image, images[i]), \
726
+ # f"Mismatch in image data for {vox_image_dict[i]} at index {i}"
727
+
728
+ # images = images[kept_indices]
729
+
730
+
731
+ # In[27]:
732
+
733
+
734
+ images = torch.Tensor(images)
735
+ vox = torch.Tensor(vox)
736
+ assert len(images) == len(vox)
737
+
738
+
739
+ # In[28]:
740
+
741
+
742
+ ### Multi-GPU config ###
743
+ from accelerate import Accelerator, DeepSpeedPlugin
744
+
745
+ local_rank = os.getenv('RANK')
746
+ if local_rank is None:
747
+ local_rank = 0
748
+ else:
749
+ local_rank = int(local_rank)
750
+ print("LOCAL RANK ", local_rank)
751
+
752
+ data_type = torch.float32 # change depending on your mixed_precision
753
+
754
+ accelerator = Accelerator(split_batches=False)
755
+ batch_size = 8
756
+
757
+
758
+ # In[29]:
759
+
760
+
761
+ print("PID of this process =",os.getpid())
762
+ device = accelerator.device
763
+ print("device:",device)
764
+ world_size = accelerator.state.num_processes
765
+ distributed = not accelerator.state.distributed_type == 'NO'
766
+ num_devices = torch.cuda.device_count()
767
+ global_batch_size = batch_size * num_devices
768
+ print("global_batch_size", global_batch_size)
769
+ if num_devices==0 or not distributed: num_devices = 1
770
+ num_workers = num_devices
771
+ print(accelerator.state)
772
+
773
+ # set data_type to match your mixed precision (automatically set based on deepspeed config)
774
+ if accelerator.mixed_precision == "bf16":
775
+ data_type = torch.bfloat16
776
+ elif accelerator.mixed_precision == "fp16":
777
+ data_type = torch.float16
778
+ else:
779
+ data_type = torch.float32
780
+
781
+ print("distributed =",distributed, "num_devices =", num_devices, "local rank =", local_rank, "world size =", world_size, "data_type =", data_type)
782
+ print = accelerator.print # only print if local_rank=0
783
+
784
+
785
+ # ## Configurations
786
+
787
+ # In[30]:
788
+
789
+
790
+ # if running this interactively, can specify jupyter_args here for argparser to use
791
+ if utils.is_interactive():
792
+ model_name = 'testing_MST' # 'sub-001_multi_bs24_MST_rishab_MSTsplit_remove_150_random_seed_0'
793
+ print("model_name:", model_name)
794
+
795
+ # global_batch_size and batch_size should already be defined in the above cells
796
+ # other variables can be specified in the following string:
797
+ # jupyter_args = f"--data_path=/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2 --model_name={model_name}"
798
+
799
+ jupyter_args = f"--data_path=/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2 \
800
+ --model_name={model_name} \
801
+ --no-multi_subject --subj=1 --batch_size={batch_size} \
802
+ --hidden_dim=1024 --clip_scale=1. \
803
+ --no-blurry_recon --blur_scale=.5 \
804
+ --no-use_prior --prior_scale=30 \
805
+ --n_blocks=4 --max_lr=3e-4 --mixup_pct=.33 --num_epochs=30 --no-use_image_aug \
806
+ --ckpt_interval=999 --no-ckpt_saving --new_test \
807
+ --multisubject_ckpt=/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/train_logs/multisubject_subj01_1024hid_nolow_300ep"
808
+ print(jupyter_args)
809
+ jupyter_args = jupyter_args.split()
810
+
811
+
812
+ # In[31]:
813
+
814
+
815
+ parser = argparse.ArgumentParser(description="Model Training Configuration")
816
+ parser.add_argument(
817
+ "--model_name", type=str, default="testing",
818
+ help="name of model, used for ckpt saving and wandb logging (if enabled)",
819
+ )
820
+ parser.add_argument(
821
+ "--data_path", type=str, default="/weka/proj-fmri/shared/natural-scenes-dataset",
822
+ help="Path to where NSD data is stored / where to download it to",
823
+ )
824
+ parser.add_argument(
825
+ "--subj",type=int, default=1, choices=[1,2,3,4,5,6,7,8],
826
+ help="Validate on which subject?",
827
+ )
828
+ parser.add_argument(
829
+ "--multisubject_ckpt", type=str, default=None,
830
+ help="Path to pre-trained multisubject model to finetune a single subject from. multisubject must be False.",
831
+ )
832
+ parser.add_argument(
833
+ "--num_sessions", type=int, default=0,
834
+ help="Number of training sessions to include (if multi_subject, this variable doesnt matter)",
835
+ )
836
+ parser.add_argument(
837
+ "--use_prior",action=argparse.BooleanOptionalAction,default=False,
838
+ help="whether to train diffusion prior (True) or just rely on retrieval part of the pipeline (False)",
839
+ )
840
+ parser.add_argument(
841
+ "--batch_size", type=int, default=32,
842
+ help="Batch size can be increased by 10x if only training v2c and not diffusion diffuser",
843
+ )
844
+ parser.add_argument(
845
+ "--wandb_log",action=argparse.BooleanOptionalAction,default=False,
846
+ help="whether to log to wandb",
847
+ )
848
+ parser.add_argument(
849
+ "--resume_from_ckpt",action=argparse.BooleanOptionalAction,default=False,
850
+ help="if not using wandb and want to resume from a ckpt",
851
+ )
852
+ parser.add_argument(
853
+ "--wandb_project",type=str,default="stability",
854
+ help="wandb project name",
855
+ )
856
+ parser.add_argument(
857
+ "--mixup_pct",type=float,default=.33,
858
+ help="proportion of way through training when to switch from BiMixCo to SoftCLIP",
859
+ )
860
+ parser.add_argument(
861
+ "--low_mem",action=argparse.BooleanOptionalAction,default=False,
862
+ help="whether to preload images to cpu to speed things up but consume more memory",
863
+ )
864
+ parser.add_argument(
865
+ "--blurry_recon",action=argparse.BooleanOptionalAction,default=True,
866
+ help="whether to output blurry reconstructions",
867
+ )
868
+ parser.add_argument(
869
+ "--blur_scale",type=float,default=.5,
870
+ help="multiply loss from blurry recons by this number",
871
+ )
872
+ parser.add_argument(
873
+ "--clip_scale",type=float,default=1.,
874
+ help="multiply contrastive loss by this number",
875
+ )
876
+ parser.add_argument(
877
+ "--prior_scale",type=float,default=30,
878
+ help="multiply diffusion prior loss by this",
879
+ )
880
+ parser.add_argument(
881
+ "--use_image_aug",action=argparse.BooleanOptionalAction,default=True,
882
+ help="whether to use image augmentation",
883
+ )
884
+ parser.add_argument(
885
+ "--num_epochs",type=int,default=120,
886
+ help="number of epochs of training",
887
+ )
888
+ parser.add_argument(
889
+ "--multi_subject",action=argparse.BooleanOptionalAction,default=False,
890
+ )
891
+ parser.add_argument(
892
+ "--new_test",action=argparse.BooleanOptionalAction,default=True,
893
+ )
894
+ parser.add_argument(
895
+ "--n_blocks",type=int,default=2,
896
+ )
897
+ parser.add_argument(
898
+ "--hidden_dim",type=int,default=1024,
899
+ )
900
+ parser.add_argument(
901
+ "--seq_past",type=int,default=0,
902
+ )
903
+ parser.add_argument(
904
+ "--seq_future",type=int,default=0,
905
+ )
906
+ parser.add_argument(
907
+ "--lr_scheduler_type",type=str,default='cycle',choices=['cycle','linear'],
908
+ )
909
+ parser.add_argument(
910
+ "--ckpt_saving",action=argparse.BooleanOptionalAction,default=True,
911
+ )
912
+ parser.add_argument(
913
+ "--ckpt_interval",type=int,default=5,
914
+ help="save backup ckpt and reconstruct every x epochs",
915
+ )
916
+ parser.add_argument(
917
+ "--seed",type=int,default=42,
918
+ )
919
+ parser.add_argument(
920
+ "--max_lr",type=float,default=3e-4,
921
+ )
922
+
923
+ if utils.is_interactive():
924
+ args = parser.parse_args(jupyter_args)
925
+ else:
926
+ args = parser.parse_args()
927
+
928
+ # create global variables without the args prefix
929
+ for attribute_name in vars(args).keys():
930
+ globals()[attribute_name] = getattr(args, attribute_name)
931
+
932
+ outdir = os.path.abspath(f'/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/train_logs/{model_name}')
933
+ if not os.path.exists(outdir) and ckpt_saving:
934
+ os.makedirs(outdir,exist_ok=True)
935
+
936
+ if use_image_aug or blurry_recon:
937
+ import kornia
938
+ import kornia.augmentation as K
939
+ from kornia.augmentation.container import AugmentationSequential
940
+ if use_image_aug:
941
+ img_augment = AugmentationSequential(
942
+ kornia.augmentation.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.1, p=0.3),
943
+ same_on_batch=False,
944
+ data_keys=["input"],
945
+ )
946
+ # Define the blurring augmentations
947
+ blur_augment = K.RandomGaussianBlur(kernel_size=(21, 21), sigma=(51.0, 51.0), p=1.)
948
+
949
+ if multi_subject:
950
+ subj_list = np.arange(1,9)
951
+ subj_list = subj_list[subj_list != subj]
952
+ else:
953
+ subj_list = [subj]
954
+
955
+ print("subj_list", subj_list, "num_sessions", num_sessions)
956
+
957
+
958
+ # ## Prep data, models, and dataloaders
959
+
960
+ # In[32]:
961
+
962
+
963
+ if ckpt_saving:
964
+ # save MST_ID for 2-alternative forced-choice retrieval evaluation
965
+ if 'MST' in model_name:
966
+ eval_dir = os.environ["eval_dir"]
967
+ print('saving MST info in', eval_dir)
968
+ # Saving ##
969
+ if not os.path.exists(eval_dir):
970
+ os.mkdir(eval_dir)
971
+
972
+ np.save(f"{eval_dir}/MST_ID.npy", MST_ID)
973
+ np.save(f"{eval_dir}/MST_pairmate_indices.npy", MST_pairmate_indices)
974
+
975
+ if remove_random_n:
976
+ np.save(f"{eval_dir}/imgs_to_remove.npy", imgs_to_remove)
977
+
978
+ np.save(f"{eval_dir}/train_image_indices.npy", train_image_indices)
979
+ np.save(f"{eval_dir}/test_image_indices.npy", test_image_indices)
980
+ np.save(f"{eval_dir}/images.npy", images)
981
+ np.save(f"{eval_dir}/vox.npy", vox)
982
+
983
+ np.save(f'{eval_dir}/train_test_mean_s1.npy', train_test_mean_s1)
984
+ np.save(f'{eval_dir}/train_test_std_s1.npy', train_test_std_s1)
985
+ np.save(f'{eval_dir}/train_test_mean_s2.npy', train_test_mean_s2)
986
+ np.save(f'{eval_dir}/train_test_std_s2.npy', train_test_std_s2)
987
+
988
+
989
+ # ### Creating wds dataloader, preload betas and all 73k possible images
990
+
991
+ # In[33]:
992
+
993
+
994
+ def my_split_by_node(urls): return urls
995
+ num_voxels_list = []
996
+
997
+ if multi_subject:
998
+ nsessions_allsubj=np.array([40, 40, 32, 30, 40, 32, 40, 30])
999
+ num_samples_per_epoch = (750*40) // num_devices
1000
+ else:
1001
+ # num_samples_per_epoch = (750*num_sessions) // num_devices
1002
+ num_samples_per_epoch = len(train_image_indices)
1003
+
1004
+ print("dividing batch size by subj_list, which will then be concatenated across subj during training...")
1005
+ batch_size = batch_size // len(subj_list)
1006
+
1007
+ num_iterations_per_epoch = num_samples_per_epoch // (batch_size*len(subj_list))
1008
+
1009
+ print("batch_size =", batch_size, "num_iterations_per_epoch =",num_iterations_per_epoch, "num_samples_per_epoch =",num_samples_per_epoch)
1010
+
1011
+
1012
+ # In[34]:
1013
+
1014
+
1015
+ train_data = {}
1016
+ train_dl = {}
1017
+
1018
+ train_data[f'subj0{subj}'] = torch.utils.data.TensorDataset(torch.tensor(train_image_indices))
1019
+ test_data = torch.utils.data.TensorDataset(torch.tensor(test_image_indices))
1020
+
1021
+
1022
+ # In[35]:
1023
+
1024
+
1025
+ num_voxels = {}
1026
+ voxels = {}
1027
+ for s in subj_list:
1028
+ print(f"Training with {num_sessions} sessions")
1029
+ train_dl = torch.utils.data.DataLoader(train_data[f'subj0{s}'], batch_size=batch_size, shuffle=True, drop_last=True, pin_memory=True)
1030
+
1031
+ num_voxels_list.append(vox[0].shape[-1])
1032
+ num_voxels[f'subj0{s}'] = vox[0].shape[-1]
1033
+ voxels[f'subj0{s}'] = vox
1034
+ print(f"num_voxels for subj0{s}: {num_voxels[f'subj0{s}']}")
1035
+
1036
+ print("Loaded all subj train dls and vox!\n")
1037
+
1038
+ # Validate only on one subject
1039
+ if multi_subject:
1040
+ subj = subj_list[0] # cant validate on the actual held out person so picking first in subj_list
1041
+ test_dl = torch.utils.data.DataLoader(test_data, batch_size=24, shuffle=False, drop_last=True, pin_memory=True)
1042
+
1043
+ print(f"Loaded test dl for subj{subj}!\n")
1044
+
1045
+
1046
+ # ## Load models
1047
+
1048
+ # ### CLIP image embeddings model
1049
+
1050
+ # In[36]:
1051
+
1052
+
1053
+ ## USING OpenCLIP ViT-bigG ###
1054
+ sys.path.append('generative_models/')
1055
+ import sgm
1056
+ from generative_models.sgm.modules.encoders.modules import FrozenOpenCLIPImageEmbedder
1057
+ # from generative_models.sgm.models.diffusion import DiffusionEngine
1058
+ # from omegaconf import OmegaConf
1059
+
1060
+ try:
1061
+ print(clip_img_embedder)
1062
+ except:
1063
+ clip_img_embedder = FrozenOpenCLIPImageEmbedder(
1064
+ arch="ViT-bigG-14",
1065
+ version="laion2b_s39b_b160k",
1066
+ output_tokens=True,
1067
+ only_tokens=True,
1068
+ )
1069
+ clip_img_embedder.to(device)
1070
+ clip_seq_dim = 256
1071
+ clip_emb_dim = 1664
1072
+
1073
+ # ## USING OPEN AI CLIP ViT-L ###
1074
+ # import clip
1075
+ # try:
1076
+ # print(clip_model)
1077
+ # except:
1078
+ # clip_model, preprocess = clip.load("ViT-L/14", device=device)
1079
+ # preprocess = transforms.Compose([
1080
+ # transforms.Resize(224, interpolation=transforms.InterpolationMode.BILINEAR),
1081
+ # transforms.Normalize(mean=[0.48145466, 0.4578275, 0.40821073],
1082
+ # std=[0.26862954, 0.26130258, 0.27577711]),
1083
+ # ])
1084
+ # def clip_img_embedder(image):
1085
+ # preproc_img = preprocess(image)
1086
+ # return clip_model.encode_image(preproc_img)
1087
+ # clip_seq_dim = 1
1088
+ # clip_emb_dim = 768
1089
+
1090
+
1091
+ # ### MindEye modules
1092
+
1093
+ # In[37]:
1094
+
1095
+
1096
+ model = utils.prepare_model_and_training(
1097
+ num_voxels_list=num_voxels_list,
1098
+ n_blocks=n_blocks,
1099
+ hidden_dim=hidden_dim,
1100
+ clip_emb_dim=clip_emb_dim,
1101
+ clip_seq_dim=clip_seq_dim,
1102
+ use_prior=use_prior,
1103
+ clip_scale=clip_scale
1104
+ )
1105
+
1106
+
1107
+ # In[38]:
1108
+
1109
+
1110
+ # test on subject 1 with fake data
1111
+ b = torch.randn((2,1,num_voxels_list[0]))
1112
+ print(b.shape, model.ridge(b,0).shape)
1113
+
1114
+
1115
+ # In[39]:
1116
+
1117
+
1118
+ # test that the model works on some fake data
1119
+ b = torch.randn((2,1,hidden_dim))
1120
+ print("b.shape",b.shape)
1121
+
1122
+ backbone_, clip_, blur_ = model.backbone(b)
1123
+ print(backbone_.shape, clip_.shape, blur_[0].shape, blur_[1].shape)
1124
+
1125
+
1126
+ # ### Adding diffusion prior + unCLIP if use_prior=True
1127
+
1128
+ # In[40]:
1129
+
1130
+
1131
+ if use_prior:
1132
+ from models import *
1133
+
1134
+ # setup diffusion prior network
1135
+ out_dim = clip_emb_dim
1136
+ depth = 6
1137
+ dim_head = 52
1138
+ heads = clip_emb_dim//52 # heads * dim_head = clip_emb_dim
1139
+ timesteps = 100
1140
+
1141
+ prior_network = VersatileDiffusionPriorNetwork(
1142
+ dim=out_dim,
1143
+ depth=depth,
1144
+ dim_head=dim_head,
1145
+ heads=heads,
1146
+ causal=False,
1147
+ num_tokens = clip_seq_dim,
1148
+ learned_query_mode="pos_emb"
1149
+ )
1150
+
1151
+ model.diffusion_prior = BrainDiffusionPrior(
1152
+ net=prior_network,
1153
+ image_embed_dim=out_dim,
1154
+ condition_on_text_encodings=False,
1155
+ timesteps=timesteps,
1156
+ cond_drop_prob=0.2,
1157
+ image_embed_scale=None,
1158
+ )
1159
+
1160
+ utils.count_params(model.diffusion_prior)
1161
+ utils.count_params(model)
1162
+
1163
+
1164
+ # ### Setup optimizer / lr / ckpt saving
1165
+
1166
+ # In[41]:
1167
+
1168
+
1169
+ no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
1170
+
1171
+ opt_grouped_parameters = [
1172
+ {'params': [p for n, p in model.ridge.named_parameters()], 'weight_decay': 1e-2},
1173
+ {'params': [p for n, p in model.backbone.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': 1e-2},
1174
+ {'params': [p for n, p in model.backbone.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0},
1175
+ ]
1176
+ # model.backbone.requires_grad_(False)
1177
+
1178
+ if use_prior:
1179
+ opt_grouped_parameters.extend([
1180
+ {'params': [p for n, p in model.diffusion_prior.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': 1e-2},
1181
+ {'params': [p for n, p in model.diffusion_prior.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
1182
+ ])
1183
+
1184
+ optimizer = torch.optim.AdamW(opt_grouped_parameters, lr=max_lr)
1185
+
1186
+ if lr_scheduler_type == 'linear':
1187
+ lr_scheduler = torch.optim.lr_scheduler.LinearLR(
1188
+ optimizer,
1189
+ total_iters=int(np.floor(num_epochs*num_iterations_per_epoch)),
1190
+ last_epoch=-1
1191
+ )
1192
+ elif lr_scheduler_type == 'cycle':
1193
+ if num_iterations_per_epoch==0:
1194
+ num_iterations_per_epoch=1
1195
+ total_steps=int(np.floor(num_epochs*num_iterations_per_epoch))
1196
+ print("total_steps", total_steps)
1197
+ lr_scheduler = torch.optim.lr_scheduler.OneCycleLR(
1198
+ optimizer,
1199
+ max_lr=max_lr,
1200
+ total_steps=total_steps,
1201
+ final_div_factor=1000,
1202
+ last_epoch=-1, pct_start=2/num_epochs
1203
+ )
1204
+
1205
+ def save_ckpt(tag):
1206
+ ckpt_path = outdir+f'/{tag}.pth'
1207
+ if accelerator.is_main_process:
1208
+ unwrapped_model = accelerator.unwrap_model(model)
1209
+ torch.save({
1210
+ 'epoch': epoch,
1211
+ 'model_state_dict': unwrapped_model.state_dict(),
1212
+ 'optimizer_state_dict': optimizer.state_dict(),
1213
+ 'lr_scheduler': lr_scheduler.state_dict(),
1214
+ 'train_losses': losses,
1215
+ 'test_losses': test_losses,
1216
+ 'lrs': lrs,
1217
+ }, ckpt_path)
1218
+ print(f"\n---saved {outdir}/{tag} ckpt!---\n")
1219
+
1220
+ def load_ckpt(tag,load_lr=True,load_optimizer=True,load_epoch=True,strict=True,outdir=outdir,multisubj_loading=False):
1221
+ print(f"\n---loading {outdir}/{tag}.pth ckpt---\n")
1222
+ checkpoint = torch.load(outdir+'/last.pth', map_location='cpu')
1223
+ state_dict = checkpoint['model_state_dict']
1224
+ if multisubj_loading: # remove incompatible ridge layer that will otherwise error
1225
+ state_dict.pop('ridge.linears.0.weight',None)
1226
+ model.load_state_dict(state_dict, strict=strict)
1227
+ if load_epoch:
1228
+ globals()["epoch"] = checkpoint['epoch']
1229
+ print("Epoch",epoch)
1230
+ if load_optimizer:
1231
+ optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
1232
+ if load_lr:
1233
+ lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
1234
+ del checkpoint
1235
+
1236
+ print("\nDone with model preparations!")
1237
+ num_params = utils.count_params(model)
1238
+
1239
+
1240
+ # # Wandb
1241
+
1242
+ # In[42]:
1243
+
1244
+
1245
+ if local_rank==0 and wandb_log: # only use main process for wandb logging
1246
+ import wandb
1247
+ import time
1248
+
1249
+ wandb_project = 'rtmindeye'
1250
+ print(f"wandb {wandb_project} run {model_name}")
1251
+
1252
+ # Need to configure wandb beforehand in terminal with "wandb init"!
1253
+ wandb_config = {
1254
+ "model_name": model_name,
1255
+ "global_batch_size": global_batch_size,
1256
+ "batch_size": batch_size,
1257
+ "num_epochs": num_epochs,
1258
+ "num_sessions": num_sessions,
1259
+ "num_params": num_params,
1260
+ "clip_scale": clip_scale,
1261
+ "prior_scale": prior_scale,
1262
+ "blur_scale": blur_scale,
1263
+ "use_image_aug": use_image_aug,
1264
+ "max_lr": max_lr,
1265
+ "mixup_pct": mixup_pct,
1266
+ "num_samples_per_epoch": num_samples_per_epoch,
1267
+ "ckpt_interval": ckpt_interval,
1268
+ "ckpt_saving": ckpt_saving,
1269
+ "seed": seed, # SLURM array task ID
1270
+ "distributed": distributed,
1271
+ "num_devices": num_devices,
1272
+ "world_size": world_size,
1273
+ }
1274
+ print("wandb_config:\n", wandb_config)
1275
+ print("wandb_id:", model_name)
1276
+
1277
+ # Initialize wandb
1278
+ wandb.init(
1279
+ id=model_name,
1280
+ project=wandb_project,
1281
+ name=model_name,
1282
+ config=wandb_config,
1283
+ resume="allow",
1284
+ save_code=True,
1285
+ )
1286
+
1287
+ # Get SLURM job & array ID
1288
+ slurm_job_id = utils.get_slurm_job()
1289
+ slurm_array_id = seed # seed corresponds to SLURM_ARRAY_TASK_ID
1290
+
1291
+ # Define SLURM log paths
1292
+ log_dir = "slurms"
1293
+ log_files = [
1294
+ f"{log_dir}/{slurm_job_id}_{slurm_array_id}.out",
1295
+ f"{log_dir}/{slurm_job_id}_{slurm_array_id}.err",
1296
+ ]
1297
+
1298
+ # Ensure logs exist before logging them
1299
+ for log_file in log_files:
1300
+ wait_time = 0
1301
+ while not os.path.exists(log_file) and wait_time < 60: # Wait max 60s
1302
+ time.sleep(5)
1303
+ wait_time += 5
1304
+
1305
+ # Log SLURM logs as artifacts
1306
+ artifact = wandb.Artifact(f"slurm_logs_{slurm_job_id}_{slurm_array_id}", type="logs")
1307
+ for log_file in log_files:
1308
+ if os.path.exists(log_file):
1309
+ artifact.add_file(log_file)
1310
+
1311
+ wandb.log_artifact(artifact)
1312
+ else:
1313
+ wandb_log = False
1314
+
1315
+
1316
+ # # Train the model
1317
+
1318
+ # In[43]:
1319
+
1320
+
1321
+ epoch = 0
1322
+ losses, test_losses, lrs = [], [], []
1323
+ best_test_loss = 1e9
1324
+ torch.cuda.empty_cache()
1325
+
1326
+
1327
+ # In[44]:
1328
+
1329
+
1330
+ # load multisubject stage1 ckpt if set
1331
+ if multisubject_ckpt is not None and not resume_from_ckpt:
1332
+ load_ckpt("last",outdir=multisubject_ckpt,load_lr=False,load_optimizer=False,load_epoch=False,strict=False,multisubj_loading=True)
1333
+
1334
+
1335
+ # In[45]:
1336
+
1337
+
1338
+ # checkpoint = torch.load(multisubject_ckpt+'/last.pth', map_location='cpu')
1339
+ # state_dict = checkpoint['model_state_dict']
1340
+ # model.load_state_dict(state_dict, strict=False)
1341
+
1342
+
1343
+ # In[46]:
1344
+
1345
+
1346
+ # train_dls = [train_dl[f'subj0{s}'] for s in subj_list]
1347
+
1348
+ model, optimizer, train_dl, lr_scheduler = accelerator.prepare(model, optimizer, train_dl, lr_scheduler)
1349
+ # leaving out test_dl since we will only have local_rank 0 device do evals
1350
+
1351
+
1352
+ # In[47]:
1353
+
1354
+
1355
+ print(f"{model_name} starting with epoch {epoch} / {num_epochs}")
1356
+ progress_bar = tqdm(range(epoch,num_epochs), ncols=1200, disable=(local_rank!=0))
1357
+ test_image, test_voxel = None, None
1358
+ mse = nn.MSELoss()
1359
+ l1 = nn.L1Loss()
1360
+ soft_loss_temps = utils.cosine_anneal(0.004, 0.0075, num_epochs - int(mixup_pct * num_epochs))
1361
+ skip_train = True if epoch>=(num_epochs-1) else False # skip training if you are resuming from a fully trained model
1362
+
1363
+ for epoch in progress_bar:
1364
+ model.train()
1365
+
1366
+ fwd_percent_correct = 0.
1367
+ bwd_percent_correct = 0.
1368
+ test_fwd_percent_correct = 0.
1369
+ test_bwd_percent_correct = 0.
1370
+
1371
+ recon_cossim = 0.
1372
+ test_recon_cossim = 0.
1373
+ recon_mse = 0.
1374
+ test_recon_mse = 0.
1375
+
1376
+ loss_clip_total = 0.
1377
+ loss_blurry_total = 0.
1378
+ loss_blurry_cont_total = 0.
1379
+ test_loss_clip_total = 0.
1380
+
1381
+ loss_prior_total = 0.
1382
+ test_loss_prior_total = 0.
1383
+
1384
+ blurry_pixcorr = 0.
1385
+ test_blurry_pixcorr = 0.
1386
+
1387
+ # you now have voxel_iters and image_iters with num_iterations_per_epoch batches each
1388
+ for train_i, behav in enumerate(train_dl):
1389
+ with torch.cuda.amp.autocast(dtype=data_type):
1390
+ optimizer.zero_grad()
1391
+ loss = 0.
1392
+
1393
+ behav = behav[0]
1394
+
1395
+ image = images[behav.long().cpu()].to(device)
1396
+ voxel = vox[behav.long().cpu()]
1397
+ # voxel = (voxel - train_mean) / train_std
1398
+ voxel = torch.Tensor(voxel).unsqueeze(1).to(device)
1399
+
1400
+ if use_image_aug:
1401
+ image = img_augment(image)
1402
+
1403
+ clip_target = clip_img_embedder(image)
1404
+ assert not torch.any(torch.isnan(clip_target))
1405
+
1406
+ if epoch < int(mixup_pct * num_epochs):
1407
+ voxel, perm, betas, select = utils.mixco(voxel)
1408
+
1409
+ voxel_ridge = model.ridge(voxel,0) #[model.ridge(voxel_list[si],si) for si,s in enumerate(subj_list)]
1410
+ # voxel_ridge = torch.cat(voxel_ridge_list, dim=0)
1411
+
1412
+ backbone, clip_voxels, blurry_image_enc_ = model.backbone(voxel_ridge)
1413
+
1414
+ if clip_scale>0:
1415
+ clip_voxels_norm = nn.functional.normalize(clip_voxels.flatten(1), dim=-1)
1416
+ clip_target_norm = nn.functional.normalize(clip_target.flatten(1), dim=-1)
1417
+
1418
+ if use_prior:
1419
+ loss_prior, prior_out = model.diffusion_prior(text_embed=backbone, image_embed=clip_target)
1420
+ loss_prior_total += loss_prior.item()
1421
+ loss_prior *= prior_scale
1422
+ loss += loss_prior
1423
+
1424
+ recon_cossim += nn.functional.cosine_similarity(prior_out, clip_target).mean().item()
1425
+ recon_mse += mse(prior_out, clip_target).item()
1426
+
1427
+ if clip_scale>0:
1428
+ if epoch < int(mixup_pct * num_epochs):
1429
+ loss_clip = utils.mixco_nce(
1430
+ clip_voxels_norm,
1431
+ clip_target_norm,
1432
+ temp=.006,
1433
+ perm=perm, betas=betas, select=select)
1434
+ else:
1435
+ epoch_temp = soft_loss_temps[epoch-int(mixup_pct*num_epochs)]
1436
+ loss_clip = utils.soft_clip_loss(
1437
+ clip_voxels_norm,
1438
+ clip_target_norm,
1439
+ temp=epoch_temp)
1440
+
1441
+ loss_clip_total += loss_clip.item()
1442
+ loss_clip *= clip_scale
1443
+ loss += loss_clip
1444
+
1445
+ if blurry_recon:
1446
+ image_enc_pred, transformer_feats = blurry_image_enc_
1447
+
1448
+ image_enc = autoenc.encode(2*image-1).latent_dist.mode() * 0.18215
1449
+ loss_blurry = l1(image_enc_pred, image_enc)
1450
+ loss_blurry_total += loss_blurry.item()
1451
+
1452
+ if epoch < int(mixup_pct * num_epochs):
1453
+ image_enc_shuf = image_enc[perm]
1454
+ betas_shape = [-1] + [1]*(len(image_enc.shape)-1)
1455
+ image_enc[select] = image_enc[select] * betas[select].reshape(*betas_shape) + \
1456
+ image_enc_shuf[select] * (1 - betas[select]).reshape(*betas_shape)
1457
+
1458
+ image_norm = (image - mean)/std
1459
+ image_aug = (blur_augs(image) - mean)/std
1460
+ _, cnx_embeds = cnx(image_norm)
1461
+ _, cnx_aug_embeds = cnx(image_aug)
1462
+
1463
+ cont_loss = utils.soft_cont_loss(
1464
+ nn.functional.normalize(transformer_feats.reshape(-1, transformer_feats.shape[-1]), dim=-1),
1465
+ nn.functional.normalize(cnx_embeds.reshape(-1, cnx_embeds.shape[-1]), dim=-1),
1466
+ nn.functional.normalize(cnx_aug_embeds.reshape(-1, cnx_embeds.shape[-1]), dim=-1),
1467
+ temp=0.2)
1468
+ loss_blurry_cont_total += cont_loss.item()
1469
+
1470
+ loss += (loss_blurry + 0.1*cont_loss) * blur_scale #/.18215
1471
+
1472
+ if clip_scale>0:
1473
+ # forward and backward top 1 accuracy
1474
+ labels = torch.arange(len(clip_voxels_norm)).to(clip_voxels_norm.device)
1475
+ fwd_percent_correct += utils.topk(utils.batchwise_cosine_similarity(clip_voxels_norm, clip_target_norm), labels, k=1).item()
1476
+ bwd_percent_correct += utils.topk(utils.batchwise_cosine_similarity(clip_target_norm, clip_voxels_norm), labels, k=1).item()
1477
+
1478
+ if blurry_recon:
1479
+ with torch.no_grad():
1480
+ # only doing pixcorr eval on a subset of the samples per batch because its costly & slow to compute autoenc.decode()
1481
+ random_samps = np.random.choice(np.arange(len(image)), size=len(image)//5, replace=False)
1482
+ blurry_recon_images = (autoenc.decode(image_enc_pred[random_samps]/0.18215).sample/ 2 + 0.5).clamp(0,1)
1483
+ pixcorr = utils.pixcorr(image[random_samps], blurry_recon_images)
1484
+ blurry_pixcorr += pixcorr.item()
1485
+
1486
+ utils.check_loss(loss)
1487
+ accelerator.backward(loss)
1488
+ optimizer.step()
1489
+
1490
+ losses.append(loss.item())
1491
+ lrs.append(optimizer.param_groups[0]['lr'])
1492
+
1493
+ if lr_scheduler_type is not None:
1494
+ lr_scheduler.step()
1495
+
1496
+ if train_i >= num_iterations_per_epoch-1:
1497
+ break
1498
+
1499
+ model.eval()
1500
+ logs = {}
1501
+
1502
+ if local_rank == 0:
1503
+ with torch.no_grad(), torch.cuda.amp.autocast(dtype=data_type):
1504
+ for i in range(2):
1505
+ for j in range(2):
1506
+ subset_indices = MST_idx[:, i, j].reshape(-1)
1507
+ subset_dataset = torch.utils.data.TensorDataset(torch.tensor(subset_indices))
1508
+ subset_dl = torch.utils.data.DataLoader(
1509
+ subset_dataset, batch_size=len(MST_idx), shuffle=False,
1510
+ drop_last=True, pin_memory=True
1511
+ )
1512
+
1513
+ # Reset metrics for this subset
1514
+ test_losses = []
1515
+ test_loss_clip_total = 0
1516
+ test_loss_prior_total = 0
1517
+ test_blurry_pixcorr = 0
1518
+ test_fwd_percent_correct = 0
1519
+ test_bwd_percent_correct = 0
1520
+ test_recon_cossim = 0
1521
+ test_recon_mse = 0
1522
+
1523
+ for test_i, behav in enumerate(subset_dl):
1524
+ behav = behav[0]
1525
+ loss = 0.
1526
+
1527
+ if behav.ndim > 1:
1528
+ image = images[behav[:, 0].long().cpu()].to(device)
1529
+ voxel = vox[behav.long().cpu()].mean(1)
1530
+ else:
1531
+ image = images[behav.long().cpu()].to(device)
1532
+ voxel = vox[behav.long().cpu()]
1533
+
1534
+ voxel = torch.Tensor(voxel).unsqueeze(1).to(device)
1535
+
1536
+ clip_img_embedder = clip_img_embedder.to(device)
1537
+ clip_target = clip_img_embedder(image.float())
1538
+
1539
+ voxel_ridge = model.ridge(voxel, 0)
1540
+ backbone, clip_voxels, blurry_image_enc_ = model.backbone(voxel_ridge)
1541
+
1542
+ if clip_scale > 0:
1543
+ clip_voxels_norm = nn.functional.normalize(clip_voxels.flatten(1), dim=-1)
1544
+ clip_target_norm = nn.functional.normalize(clip_target.flatten(1), dim=-1)
1545
+
1546
+ random_samps = np.random.choice(np.arange(len(image)), size=len(image) // 5, replace=False)
1547
+
1548
+ if use_prior:
1549
+ loss_prior, contaminated_prior_out = model.diffusion_prior(
1550
+ text_embed=backbone[random_samps], image_embed=clip_target[random_samps])
1551
+ test_loss_prior_total += loss_prior.item()
1552
+ loss_prior *= prior_scale
1553
+ loss += loss_prior
1554
+
1555
+ if clip_scale > 0:
1556
+ loss_clip = utils.soft_clip_loss(
1557
+ clip_voxels_norm,
1558
+ clip_target_norm,
1559
+ temp=0.006
1560
+ )
1561
+ test_loss_clip_total += loss_clip.item()
1562
+ loss_clip *= clip_scale
1563
+ loss += loss_clip
1564
+
1565
+ if blurry_recon:
1566
+ image_enc_pred, _ = blurry_image_enc_
1567
+ blurry_recon_images = (autoenc.decode(image_enc_pred[random_samps] / 0.18215).sample / 2 + 0.5).clamp(0, 1)
1568
+ pixcorr = utils.pixcorr(image[random_samps], blurry_recon_images)
1569
+ test_blurry_pixcorr += pixcorr.item()
1570
+
1571
+ if clip_scale > 0:
1572
+ labels = torch.arange(len(clip_voxels_norm)).to(clip_voxels_norm.device)
1573
+ test_fwd_percent_correct += utils.topk(utils.batchwise_cosine_similarity(clip_voxels_norm, clip_target_norm), labels, k=1).item()
1574
+ test_bwd_percent_correct += utils.topk(utils.batchwise_cosine_similarity(clip_target_norm, clip_voxels_norm), labels, k=1).item()
1575
+
1576
+ utils.check_loss(loss)
1577
+ test_losses.append(loss.item())
1578
+
1579
+ logs.update({
1580
+ f"subset_{i}_{j}_test/loss": np.mean(test_losses),
1581
+ f"subset_{i}_{j}_test/loss_clip_total": test_loss_clip_total / (test_i + 1),
1582
+ f"subset_{i}_{j}_test/loss_prior": test_loss_prior_total / (test_i + 1),
1583
+ f"subset_{i}_{j}_test/blurry_pixcorr": test_blurry_pixcorr / (test_i + 1),
1584
+ f"subset_{i}_{j}_test/fwd_pct_correct": test_fwd_percent_correct / (test_i + 1),
1585
+ f"subset_{i}_{j}_test/bwd_pct_correct": test_bwd_percent_correct / (test_i + 1),
1586
+ })
1587
+ print(f"--- Subset ({i},{j}) ---")
1588
+ for k, v in logs.items():
1589
+ if f"subset_{i}_{j}" in k:
1590
+ print(f"{k}: {v:.4f}")
1591
+
1592
+ # After subset loop: add train (and global test, if you want) metrics
1593
+ logs.update({
1594
+ "train/loss": np.mean(losses[-(train_i+1):]),
1595
+ "train/lr": lrs[-1],
1596
+ "train/num_steps": len(losses),
1597
+ "train/fwd_pct_correct": fwd_percent_correct / (train_i + 1),
1598
+ "train/bwd_pct_correct": bwd_percent_correct / (train_i + 1),
1599
+ "train/loss_clip_total": loss_clip_total / (train_i + 1),
1600
+ "train/loss_blurry_total": loss_blurry_total / (train_i + 1),
1601
+ "train/loss_blurry_cont_total": loss_blurry_cont_total / (train_i + 1),
1602
+ "train/blurry_pixcorr": blurry_pixcorr / (train_i + 1),
1603
+ "train/recon_cossim": recon_cossim / (train_i + 1),
1604
+ "train/recon_mse": recon_mse / (train_i + 1),
1605
+ "train/loss_prior": loss_prior_total / (train_i + 1),
1606
+ })
1607
+
1608
+
1609
+ # if finished training, save jpg recons if they exist
1610
+ if (epoch == num_epochs-1) or (epoch % ckpt_interval == 0):
1611
+ if blurry_recon:
1612
+ image_enc = autoenc.encode(2*image[:4]-1).latent_dist.mode() * 0.18215
1613
+ # transform blurry recon latents to images and plot it
1614
+ fig, axes = plt.subplots(1, 8, figsize=(10, 4))
1615
+ jj=-1
1616
+ for j in [0,1,2,3]:
1617
+ jj+=1
1618
+ axes[jj].imshow(utils.torch_to_Image((autoenc.decode(image_enc[[j]]/0.18215).sample / 2 + 0.5).clamp(0,1)))
1619
+ axes[jj].axis('off')
1620
+ jj+=1
1621
+ axes[jj].imshow(utils.torch_to_Image((autoenc.decode(image_enc_pred[[j]]/0.18215).sample / 2 + 0.5).clamp(0,1)))
1622
+ axes[jj].axis('off')
1623
+ plt.show()
1624
+
1625
+ progress_bar.set_postfix(**logs)
1626
+
1627
+ if wandb_log: wandb.log(logs)
1628
+
1629
+ # Save model checkpoint and reconstruct
1630
+ if (ckpt_saving) and (epoch % ckpt_interval == 0):
1631
+ save_ckpt(f'last')
1632
+
1633
+ # wait for other GPUs to catch up if needed
1634
+ accelerator.wait_for_everyone()
1635
+ torch.cuda.empty_cache()
1636
+
1637
+ print("\n===Finished!===\n")
1638
+ if ckpt_saving:
1639
+ save_ckpt(f'last')
1640
+
1641
+
1642
+ # In[ ]:
1643
+
1644
+
1645
+ len(test_data)
1646
+
1647
+
1648
+ # In[ ]:
1649
+
1650
+
1651
+ # # Track metrics here:
1652
+ # https://docs.google.com/spreadsheets/d/1-dbmr4ovl2-4-MFNAL1DqLS651KM_ihjDkkUeP1kHXs/edit?gid=1494588999#gid=1494588999
1653
+
1654
+
1655
+ # **To tell if the model is working I'm looking at test_bwd/fwd_pct_correct and seeing if that is doing better than chance (1/batch_size)**
1656
+
1657
+ # In[ ]:
1658
+
1659
+
1660
+ # MST_pairmate_names
1661
+
1662
+
1663
+ # In[ ]:
1664
+
1665
+
1666
+ x = [im for im in image_names if str(im) not in ('blank.jpg', 'nan')]
1667
+ assert len(image_idx) == len(x)
1668
+ pairs = []
1669
+ for i, p in enumerate(MST_pairmate_names):
1670
+ assert p[0] != p[1] # no duplicate images
1671
+ pairs.append([utils.find_all_indices(x,p[0]), utils.find_all_indices(x,p[1])])
1672
+
1673
+ pairs = np.array(pairs)
1674
+
1675
+
1676
+ # In[ ]:
1677
+
1678
+
1679
+ pairs.shape
1680
+
1681
+
1682
+ # In[ ]:
1683
+
1684
+
1685
+ model.eval()
1686
+ logs = {}
1687
+ if local_rank == 0:
1688
+ with torch.no_grad(), torch.cuda.amp.autocast(dtype=data_type):
1689
+ for i in range(2):
1690
+ for j in range(2):
1691
+ subset_indices = MST_idx[:, i, j].reshape(-1)
1692
+ subset_dataset = torch.utils.data.TensorDataset(torch.tensor(subset_indices))
1693
+ subset_dl = torch.utils.data.DataLoader(
1694
+ subset_dataset, batch_size=len(MST_idx), shuffle=False,
1695
+ drop_last=True, pin_memory=True
1696
+ )
1697
+
1698
+ # Reset metrics for this subset
1699
+ test_fwd_percent_correct = 0
1700
+ test_bwd_percent_correct = 0
1701
+
1702
+ for test_i, behav in enumerate(subset_dl):
1703
+ behav = behav[0]
1704
+ loss = 0.
1705
+ image = images[behav.long().cpu()].to(device)
1706
+ voxel = vox[behav.long().cpu()]
1707
+ voxel = torch.Tensor(voxel).unsqueeze(1).to(device)
1708
+ clip_img_embedder = clip_img_embedder.to(device)
1709
+ clip_target = clip_img_embedder(image.float())
1710
+
1711
+ voxel_ridge = model.ridge(voxel, 0)
1712
+ backbone, clip_voxels, blurry_image_enc_ = model.backbone(voxel_ridge)
1713
+
1714
+ clip_voxels_norm = torch.nn.functional.normalize(clip_voxels, dim=-1)
1715
+ clip_target_norm = torch.nn.functional.normalize(clip_target, dim=-1)
1716
+
1717
+ if clip_scale > 0:
1718
+ labels = torch.arange(len(clip_voxels_norm)).to(clip_voxels_norm.device)
1719
+ test_fwd_percent_correct += utils.topk(utils.batchwise_cosine_similarity(clip_voxels_norm, clip_target_norm), labels, k=1).item()
1720
+ test_bwd_percent_correct += utils.topk(utils.batchwise_cosine_similarity(clip_target_norm, clip_voxels_norm), labels, k=1).item()
1721
+ print(test_fwd_percent_correct)
1722
+ print(test_bwd_percent_correct)
1723
+ logs.update({
1724
+ f"subset_{i}_{j}_test/fwd_pct_correct": test_fwd_percent_correct / (test_i + 1),
1725
+ f"subset_{i}_{j}_test/bwd_pct_correct": test_bwd_percent_correct / (test_i + 1),
1726
+ })
1727
+
1728
+ print("--- Full Dataset Evaluation ---")
1729
+ for k, v in logs.items():
1730
+ print(f"{k}: {v:.4f}")
1731
+
1732
+
1733
+ # In[ ]:
1734
+
1735
+
1736
+ # if sub=="sub-002":
1737
+ # unique_images_pairs = [
1738
+ # (2,3),(4,5),(7,8),(15,16),
1739
+ # (483, 484), (485, 486), (487, 488), (491, 492), (495, 496), (499, 500), (501, 502),
1740
+ # (503, 504), (512, 513),
1741
+ # ]
1742
+ # elif sub != 'sub-001' and session != 'ses-05':
1743
+ # unique_images_pairs = [
1744
+ # (1,2),(3,4),(5,6),(7,8),(9,10),(11,12),(13,14),(15,16),
1745
+ # (17,18),(19,20),(21,22),(23,24),(25,26),(27,28),(29,30),
1746
+ # (31,32),(33,34),(35,36),
1747
+ # (787, 788), (789, 790), (791, 792), (793, 794), (795, 796),
1748
+ # (797, 798), (799, 800), (801, 802), (803, 804), (805, 806),
1749
+ # (807, 808), (809, 810), (811, 812), (813, 814), (815, 816),
1750
+ # (817, 818), (819, 820), (821, 822), (823, 824), (825, 826),
1751
+ # (827, 828), (829, 830), (831, 832), (833, 834), (835, 836),
1752
+ # (837, 838), (839, 840), (841, 842), (843, 844), (845, 846),
1753
+ # (847, 848), (849, 850)
1754
+ # ]
1755
+ # else:
1756
+ # # unique_images = unique_images[unique_images!='blank.jpg'][:50]
1757
+ # unique_images_pairs = find_mst_pairs(x)
1758
+ # # unique_images[unique_images_pairs]
1759
+
1760
+
1761
+ # In[ ]:
1762
+
1763
+
1764
+ import pdb
1765
+
1766
+
1767
+ # In[ ]:
1768
+
1769
+
1770
+ def evaluate_mst_pairs(mst_pairs):
1771
+ with torch.no_grad(), torch.cuda.amp.autocast(dtype=data_type):
1772
+ failed_A = []
1773
+ failed_B = []
1774
+ failed_non_corr = []
1775
+
1776
+ # Get all unique image indices
1777
+ all_indices = np.unique(mst_pairs.flatten())
1778
+
1779
+ # Pre-load all images and betas to device
1780
+ all_images = images[image_idx[all_indices]].to(device)
1781
+ all_voxels = torch.Tensor(vox[image_idx[all_indices]]).unsqueeze(1).to(device)
1782
+
1783
+ # Get CLIP embeddings for all images
1784
+ all_clip_targets = clip_img_embedder(all_images.float())
1785
+ all_clip_targets_norm = nn.functional.normalize(all_clip_targets.flatten(1), dim=-1)
1786
+
1787
+ # Pass all betas through model to get MindEye embeddings
1788
+ all_voxel_ridge = model.ridge(all_voxels, 0)
1789
+ _, all_clip_voxels, _ = model.backbone(all_voxel_ridge)
1790
+ all_clip_voxels_norm = nn.functional.normalize(all_clip_voxels.flatten(1), dim=-1)
1791
+
1792
+ # Dict mapping idx (which indexes the "vox" and "images" tensors) to pos (their position in the flattened array "all_indices")
1793
+ idx_to_pos = {idx: pos for pos, idx in enumerate(all_indices)}
1794
+
1795
+ # Initialize scores
1796
+ corr_score = 0
1797
+ non_corr_score = 0
1798
+ corr_total = len(mst_pairs) * 2
1799
+ non_corr_total = len(mst_pairs) * (len(mst_pairs)-1) * 4 # number of elements in the matrix excluding the diagonal is n*(n-1)*4 since we're doing this twice each for pairmate A and B
1800
+
1801
+
1802
+ # Pre-load voxelwise beta-based embeddings from MindEye and CLIP image embeddings
1803
+ idxA = np.array([pair[0] for pair in mst_pairs])
1804
+ idxB = np.array([pair[1] for pair in mst_pairs])
1805
+
1806
+ posA = np.array([idx_to_pos[idx] for idx in idxA])
1807
+ posB = np.array([idx_to_pos[idx] for idx in idxB])
1808
+
1809
+ voxA_embeddings = all_clip_voxels_norm[posA]
1810
+ voxB_embeddings = all_clip_voxels_norm[posB]
1811
+ imgA_embeddings = all_clip_targets_norm[posA]
1812
+ imgB_embeddings = all_clip_targets_norm[posB]
1813
+
1814
+ simA_A = utils.batchwise_cosine_similarity(voxA_embeddings, imgA_embeddings)
1815
+ simA_B = utils.batchwise_cosine_similarity(voxA_embeddings, imgB_embeddings)
1816
+ simB_B = utils.batchwise_cosine_similarity(voxB_embeddings, imgB_embeddings)
1817
+ simB_A = utils.batchwise_cosine_similarity(voxB_embeddings, imgA_embeddings)
1818
+
1819
+
1820
+ # corresponding 2-AFC
1821
+ # is the voxel embedding for image 1 pairmate A more similar to the CLIP embedding for image 1 pairmate A or the CLIP embedding for image 1 pairmate B?
1822
+ correct_A = torch.diag(simA_A) > torch.diag(simA_B)
1823
+ # is the voxel embedding for image 1 pairmate B more similar to the CLIP embedding for image 1 pairmate B or the CLIP embedding for image 1 pairmate A?
1824
+ correct_B = torch.diag(simB_B) > torch.diag(simB_A)
1825
+
1826
+ corr_score += correct_A.sum().item()
1827
+ corr_score += correct_B.sum().item()
1828
+
1829
+ # Store indices where AFC fails
1830
+ failed_A = [i for i, correct in enumerate(correct_A.cpu()) if not correct]
1831
+ failed_B = [i for i, correct in enumerate(correct_B.cpu()) if not correct]
1832
+
1833
+ # non-corresponding 2-AFC
1834
+ N = len(mst_pairs)
1835
+ # Create a mask that is True for all off-diagonal elements
1836
+ row_idx = torch.arange(N).unsqueeze(1) # (N, 1)
1837
+ col_idx = torch.arange(N).unsqueeze(0) # (1, N)
1838
+ off_diag_mask = row_idx != col_idx # shape (N, N)
1839
+
1840
+ diagA_A = simA_A.diag().unsqueeze(1).expand(-1, N) # Get diagonal values and expand to (N, N) by duplicating the diagonal element along the rows (since each row is the cosine similarity between a single voxel embedding and all CLIP embeddings)
1841
+ diagB_B = simB_B.diag().unsqueeze(1).expand(-1, N)
1842
+
1843
+ # pdb.set_trace()
1844
+
1845
+ # Compare each element in the row to the diagonal element
1846
+ off_diag_mask_device = off_diag_mask.to(device)
1847
+
1848
+ fail_AA = (simA_A < diagA_A) & off_diag_mask_device
1849
+ fail_AB = (simA_B < diagA_A) & off_diag_mask_device
1850
+ fail_BB = (simB_B < diagB_B) & off_diag_mask_device
1851
+ fail_BA = (simB_A < diagB_B) & off_diag_mask_device
1852
+
1853
+ non_corr_score += fail_AA.sum().item()
1854
+ non_corr_score += fail_AB.sum().item()
1855
+ non_corr_score += fail_BB.sum().item()
1856
+ non_corr_score += fail_BA.sum().item()
1857
+
1858
+ # Log failed indices
1859
+ fail_sources = [fail_AA, fail_AB, fail_BB, fail_BA]
1860
+ for fail_matrix, label in zip(fail_sources, ["AA", "AB", "BB", "BA"]):
1861
+ fail_coords = torch.nonzero(fail_matrix, as_tuple=False).cpu().numpy()
1862
+ for i, j in fail_coords:
1863
+ failed_non_corr.append({"type": label, "i": i, "j": j, "pair_i": mst_pairs[i], "pair_j": mst_pairs[j]})
1864
+
1865
+ return corr_score, corr_total, int(non_corr_score), non_corr_total, failed_A, failed_B, failed_non_corr
1866
+
1867
+
1868
+ # In[ ]:
1869
+
1870
+
1871
+ all_scores = []
1872
+ all_failures = []
1873
+
1874
+ for i in range(4):
1875
+ for j in range(4):
1876
+ mst_pairs = np.stack([pairs[:, 0, i], pairs[:, 1, j]], axis=1) # shape (31, 2)
1877
+ corr_score, corr_total, non_corr_score, non_corr_total, failed_A, failed_B, failed_non_corr = evaluate_mst_pairs(mst_pairs)
1878
+
1879
+ # Store scores and failure info together
1880
+ all_scores.append((corr_score, corr_total, non_corr_score, non_corr_total))
1881
+ all_failures.append({
1882
+ "repeat_A": i,
1883
+ "repeat_B": j,
1884
+ "failed_A": failed_A,
1885
+ "failed_B": failed_B,
1886
+ "failed_non_corr": failed_non_corr,
1887
+ "mst_pairs": mst_pairs,
1888
+ })
1889
+
1890
+ # Print summary
1891
+ print(f"pairmate A repeat {i} vs pairmate B repeat {j}:")
1892
+ print(f"2-AFC corresponding = {corr_score}/{corr_total} ({corr_score/corr_total:.2%})")
1893
+ print(f"2-AFC non-corresponding = {non_corr_score}/{non_corr_total} ({non_corr_score/non_corr_total:.2%})")
1894
+ print("")
1895
+
1896
+
1897
+ # In[ ]:
1898
+
1899
+
1900
+ all_scores = np.array(all_scores)
1901
+ print(f"average 2-AFC corresponding: {all_scores[:,0].mean():.2f}/{all_scores[:,1].mean():.2f} ({(all_scores[:,0].sum()/all_scores[:,1].sum()):.2%})")
1902
+ print(f"average 2-AFC non-corresponding: {all_scores[:,2].mean():.2f}/{all_scores[:,3].mean():.2f} ({(all_scores[:,2].sum()/all_scores[:,3].sum()):.2%})")
1903
+ print(f'chance = 1/{corr_total} ({(1/corr_total):.2%})')
1904
+
1905
+
1906
+ # In[ ]:
1907
+
1908
+
1909
+ from collections import defaultdict
1910
+
1911
+ # Map from image index to failure details
1912
+ failed_images = defaultdict(list)
1913
+
1914
+ for failure_entry in all_failures:
1915
+ mst_pairs = failure_entry["mst_pairs"]
1916
+ i, j = failure_entry["repeat_A"], failure_entry["repeat_B"]
1917
+
1918
+ # A-side failures
1919
+ for fail_idx in failure_entry["failed_A"]:
1920
+ image_idx = mst_pairs[fail_idx][0]
1921
+ pairmate_idx = mst_pairs[fail_idx][1]
1922
+ failed_images[image_idx].append({
1923
+ "repeat_A": i,
1924
+ "repeat_B": j,
1925
+ "pairmate": pairmate_idx,
1926
+ "type": "A",
1927
+ })
1928
+
1929
+ # B-side failures
1930
+ for fail_idx in failure_entry["failed_B"]:
1931
+ image_idx = mst_pairs[fail_idx][1]
1932
+ pairmate_idx = mst_pairs[fail_idx][0]
1933
+ failed_images[image_idx].append({
1934
+ "repeat_A": i,
1935
+ "repeat_B": j,
1936
+ "pairmate": pairmate_idx,
1937
+ "type": "B",
1938
+ })
1939
+
1940
+
1941
+ # In[ ]:
1942
+
1943
+
1944
+ # import matplotlib.pyplot as plt
1945
+
1946
+ # for img_idx, failure_list in failed_images.items():
1947
+ # print(f"\n==== Failed Image {img_idx} ====")
1948
+
1949
+ # # Load and normalize the embeddings
1950
+ # image = images[img_idx].unsqueeze(0).to(device).float()
1951
+ # image_clip = nn.functional.normalize(clip_img_embedder(image).flatten(1), dim=-1)
1952
+
1953
+ # # Get voxel→CLIP embedding
1954
+ # voxel = torch.Tensor(vox[img_idx]).unsqueeze(0).unsqueeze(0).to(device)
1955
+ # voxel_embed = model.backbone(model.ridge(voxel, 0))[1]
1956
+ # voxel_embed = nn.functional.normalize(voxel_embed.flatten(1), dim=-1)
1957
+
1958
+ # # Display original image
1959
+ # print("Original image:")
1960
+ # display(utils.torch_to_Image(images[img_idx]))
1961
+
1962
+ # # Collect unique pairmates involved in the failure
1963
+ # pairmate_indices = list(set(entry["pairmate"] for entry in failure_list))
1964
+
1965
+ # # Plot failed pairmates with similarity annotations
1966
+ # fig, axs = plt.subplots(1, len(pairmate_indices), figsize=(4 * len(pairmate_indices), 4))
1967
+ # if len(pairmate_indices) == 1:
1968
+ # axs = [axs]
1969
+
1970
+ # # Compute "correct" similarity — voxel to its own CLIP embedding
1971
+ # correct_clip = image_clip.float()
1972
+ # correct_voxel_sim = (voxel_embed.float() @ correct_clip.T).item()
1973
+ # print(f"Correct voxel→CLIP similarity = {correct_voxel_sim:.4f}")
1974
+
1975
+ # # Plot failed pairmates with similarity annotations
1976
+ # fig, axs = plt.subplots(1, len(pairmate_indices), figsize=(4 * len(pairmate_indices), 4))
1977
+ # if len(pairmate_indices) == 1:
1978
+ # axs = [axs]
1979
+
1980
+ # for ax, mate_idx in zip(axs, pairmate_indices):
1981
+ # mate_image = images[mate_idx].unsqueeze(0).to(device).float()
1982
+ # mate_clip = nn.functional.normalize(clip_img_embedder(mate_image).flatten(1), dim=-1).float()
1983
+
1984
+ # # Similarities
1985
+ # clip_sim = (correct_clip @ mate_clip.T).item()
1986
+ # voxel_sim = (voxel_embed.float() @ mate_clip.T).item()
1987
+
1988
+ # # Check if this was the mistaken "higher" match
1989
+ # wrong_match = voxel_sim > correct_voxel_sim
1990
+
1991
+ # # Plot image and annotate
1992
+ # ax.imshow(utils.torch_to_Image(images[mate_idx]))
1993
+ # ax.axis("off")
1994
+ # ax.set_title(f"Pairmate {mate_idx}\nCLIP={clip_sim:.3f}\nVoxel={voxel_sim:.3f}\n{'← WRONG' if wrong_match else ''}",
1995
+ # color="red" if wrong_match else "black")
1996
+
1997
+
1998
+ # plt.tight_layout()
1999
+ # plt.show()
2000
+
2001
+
2002
+ # In[ ]:
2003
+
2004
+
2005
+ # comp[20,18] is the only False
2006
+
2007
+
2008
+ # In[ ]:
2009
+
2010
+
2011
+ # import matplotlib.pyplot as plt
2012
+
2013
+ # for img_idx, failure_list in failed_images.items():
2014
+ # print(f"\n==== Failed Image {img_idx} ====")
2015
+
2016
+ # # Load and normalize the embeddings
2017
+ # image = images[img_idx].unsqueeze(0).to(device).float()
2018
+ # image_clip = nn.functional.normalize(clip_img_embedder(image).flatten(1), dim=-1)
2019
+
2020
+ # # Get voxel→CLIP embedding
2021
+ # voxel = torch.Tensor(vox[img_idx]).unsqueeze(0).unsqueeze(0).to(device)
2022
+ # voxel_embed = model.backbone(model.ridge(voxel, 0))[1]
2023
+ # voxel_embed = nn.functional.normalize(voxel_embed.flatten(1), dim=-1)
2024
+
2025
+ # # Display original image
2026
+ # print("Original image:")
2027
+ # display(utils.torch_to_Image(images[img_idx]))
2028
+
2029
+ # # Collect unique pairmates involved in the failure
2030
+ # pairmate_indices = list(set(entry["pairmate"] for entry in failure_list))
2031
+
2032
+ # # Plot failed pairmates with similarity annotations
2033
+ # fig, axs = plt.subplots(1, len(pairmate_indices), figsize=(4 * len(pairmate_indices), 4))
2034
+ # if len(pairmate_indices) == 1:
2035
+ # axs = [axs]
2036
+
2037
+ # for ax, mate_idx in zip(axs, pairmate_indices):
2038
+ # # Get all CLIP embeddings for failed image and pairmates
2039
+ # all_indices = [img_idx] + pairmate_indices
2040
+ # all_images = images[all_indices].to(device).float()
2041
+ # all_clip_embeds = clip_img_embedder(all_images)
2042
+ # all_clip_embeds = nn.functional.normalize(all_clip_embeds.flatten(1), dim=-1).float()
2043
+
2044
+ # # Compare voxel embedding for the failed image to all CLIP embeddings
2045
+ # sims = (voxel_embed.float() @ all_clip_embeds.T).squeeze().cpu().detach().numpy() # shape: (1, N) → (N,)
2046
+ # image_ids = ["correct"] + [f"pairmate {idx}" for idx in pairmate_indices]
2047
+
2048
+ # # Sort and display
2049
+ # sorted_sims = sorted(zip(image_ids, all_indices, sims), key=lambda x: -x[2])
2050
+
2051
+ # print("\n🧠 Voxel→CLIP similarity ranking:")
2052
+ # for label, idx, sim in sorted_sims:
2053
+ # print(f"{label:12} (index {idx:3}): similarity = {sim:.4f}")
2054
+
2055
+ # # Optional assertion: did any pairmate score higher than the correct image?
2056
+ # correct_sim = sims[0]
2057
+ # higher = [(label, sim) for label, _, sim in sorted_sims[1:] if sim > correct_sim]
2058
+ # if higher:
2059
+ # print("\n❌ Mismatch detected: voxel embedding matched other images more than the correct one!")
2060
+ # else:
2061
+ # print("\n✅ Model correctly ranked the correct image highest (despite failure elsewhere)")
2062
+
2063
+ # plt.tight_layout()
2064
+ # plt.show()
2065
+
2066
+
2067
+ # In[ ]:
2068
+
2069
+
2070
+ mst_pairs[:5]
2071
+
2072
+
2073
+ # In[ ]:
2074
+
2075
+
2076
+ pairs[0]
2077
+
2078
+
2079
+ # In[ ]:
2080
+
2081
+
2082
+ # images[image_idx[pairs[0][0]]].shape
2083
+
2084
+
2085
+ # In[ ]:
2086
+
2087
+
2088
+ ix = 0
2089
+ display(utils.torch_to_Image(images[pairs[ix][0]]))
2090
+ display(utils.torch_to_Image(images[pairs[ix][1]]))
2091
+
2092
+
2093
+ # In[ ]:
2094
+
2095
+
2096
+ # print(np.allclose(embed_A[0], embed_A[1])) # across repeats
2097
+ # print(np.allclose(embed_A[0], embed_B[0])) # across pairmates
2098
+
2099
+
2100
+ # In[ ]:
2101
+
2102
+
2103
+ # def generate_random_nonmatching_pairs(pairs, num_images_per_source=5, num_repeats=2):
2104
+ # n_imgs, n_pairmates, n_repeats = pairs.shape
2105
+ # nonmatch_pairs = []
2106
+
2107
+ # for i in range(n_imgs):
2108
+ # other_idxs = [j for j in range(n_imgs) if j != i]
2109
+ # sampled_j = np.random.choice(other_idxs, size=num_images_per_source, replace=False)
2110
+
2111
+ # for j in sampled_j:
2112
+ # for _ in range(num_repeats):
2113
+ # a_side = np.random.randint(2)
2114
+ # b_side = np.random.randint(2)
2115
+ # a_repeat = np.random.randint(n_repeats)
2116
+ # b_repeat = np.random.randint(n_repeats)
2117
+
2118
+ # pair_a = pairs[i, a_side, a_repeat]
2119
+ # pair_b = pairs[j, b_side, b_repeat]
2120
+ # nonmatch_pairs.append([pair_a, pair_b])
2121
+
2122
+ # return np.array(nonmatch_pairs)
2123
+
2124
+
2125
+ # In[ ]:
2126
+
2127
+
2128
+ # nonmatch_pairs = generate_random_nonmatching_pairs(pairs, num_images_per_source=5, num_repeats=1)
2129
+ # results = evaluate_mst_pairs(nonmatch_pairs)
2130
+ # print(results)
2131
+
2132
+
2133
+ # In[ ]:
2134
+
2135
+
2136
+ # # Compare first few pairs
2137
+ # for pair in pairs: # Checking first 2 pairs
2138
+ # print("Indices in mst_pairs:", pair)
2139
+ # print("Corresponding filenames:")
2140
+ # print(f"Image 1: {x[pair[0]]}")
2141
+ # print(f"Image 2: {x[pair[1]]}\n")
2142
+
2143
+
2144
+ # In[ ]:
2145
+
2146
+
2147
+ # for i in range(len(pairs)):
2148
+ # fig, ax = plt.subplots(1, 2, figsize=(10,8))
2149
+
2150
+ # ax[0].imshow(images[pairs[i][0]].permute(1,2,0).numpy())
2151
+ # ax[0].set_title(f"Repeat 1")
2152
+
2153
+ # ax[1].imshow(images[pairs[i][1]].permute(1,2,0).numpy())
2154
+ # ax[1].set_title(f"Repeat 2")
2155
+
2156
+ # plt.setp(ax, xticks=[], yticks=[])
2157
+ # plt.tight_layout()
2158
+ # plt.show()
2159
+
2160
+
2161
+ # In[ ]:
2162
+
2163
+
2164
+ # score = 0
2165
+ # total = 0
2166
+ # with torch.no_grad(), torch.cuda.amp.autocast(dtype=data_type):
2167
+ # for pair in unique_images_pairs:
2168
+ # imageA_idx, imageB_idx = pair
2169
+ # imageA_idx = np.where(image_idx == imageA_idx)[0].item()
2170
+ # imageB_idx = np.where(image_idx == imageB_idx)[0].item()
2171
+
2172
+ # voxel = vox[imageA_idx].to(device)[None]
2173
+ # voxel = torch.Tensor(voxel).unsqueeze(1).to(device)
2174
+
2175
+ # imageA = images[imageA_idx].to(device)[None]
2176
+ # imageB = images[imageB_idx].to(device)[None]
2177
+
2178
+ # clip_targetA = clip_img_embedder(imageA.float())
2179
+ # clip_targetB = clip_img_embedder(imageB.float())
2180
+
2181
+ # voxel_ridge = model.ridge(voxel,0)
2182
+ # backbone, clip_voxels, blurry_image_enc_ = model.backbone(voxel_ridge)
2183
+
2184
+ # clip_voxels_norm = nn.functional.normalize(clip_voxels.flatten(1), dim=-1)
2185
+ # clip_targetA_norm = nn.functional.normalize(clip_targetA.flatten(1), dim=-1)
2186
+ # clip_targetB_norm = nn.functional.normalize(clip_targetB.flatten(1), dim=-1)
2187
+
2188
+ # cossimA = utils.batchwise_cosine_similarity(clip_voxels_norm, clip_targetA_norm)
2189
+ # cossimB = utils.batchwise_cosine_similarity(clip_voxels_norm, clip_targetB_norm)
2190
+
2191
+ # if cossimA > cossimB:
2192
+ # score += 1
2193
+ # total += 1
2194
+
2195
+ # for pair in unique_images_pairs:
2196
+ # imageA_idx, imageB_idx = pair
2197
+ # imageA_idx = np.where(image_idx == imageA_idx)[0].item()
2198
+ # imageB_idx = np.where(image_idx == imageB_idx)[0].item()
2199
+
2200
+ # voxel = vox[imageB_idx].to(device)[None]
2201
+ # voxel = torch.Tensor(voxel).unsqueeze(1).to(device)
2202
+
2203
+ # imageA = images[imageA_idx].to(device)[None]
2204
+ # imageB = images[imageB_idx].to(device)[None]
2205
+
2206
+ # clip_targetA = clip_img_embedder(imageA.float())
2207
+ # clip_targetB = clip_img_embedder(imageB.float())
2208
+
2209
+ # voxel_ridge = model.ridge(voxel,0)
2210
+ # backbone, clip_voxels, blurry_image_enc_ = model.backbone(voxel_ridge)
2211
+
2212
+ # clip_voxels_norm = nn.functional.normalize(clip_voxels.flatten(1), dim=-1)
2213
+ # clip_targetA_norm = nn.functional.normalize(clip_targetA.flatten(1), dim=-1)
2214
+ # clip_targetB_norm = nn.functional.normalize(clip_targetB.flatten(1), dim=-1)
2215
+
2216
+ # cossimA = utils.batchwise_cosine_similarity(clip_voxels_norm, clip_targetA_norm)
2217
+ # cossimB = utils.batchwise_cosine_similarity(clip_voxels_norm, clip_targetB_norm)
2218
+
2219
+ # if cossimB > cossimA:
2220
+ # score += 1
2221
+ # total += 1
2222
+
2223
+ # print(score/total)
2224
+
2225
+
2226
+ # In[ ]:
2227
+
2228
+
2229
+ #display(utils.torch_to_Image(imageA))
2230
+ #display(utils.torch_to_Image(imageB))
2231
+
2232
+
2233
+ # In[ ]:
2234
+
2235
+
2236
+ # from scipy.stats import binomtest
2237
+
2238
+ # total_samples = len(np.array(unique_images_pairs).flatten())
2239
+ # assert total_samples == 100
2240
+
2241
+ # correct_predictions = int((score/total) * total_samples) # calculate the number of correct predictions
2242
+ # expected_accuracy = 0.5 # expected accuracy under the null hypothesis
2243
+
2244
+ # # Perform the binomial test
2245
+ # binom_stats = binomtest(correct_predictions, total_samples, expected_accuracy, alternative='greater')
2246
+ # p_value = binom_stats.pvalue
2247
+
2248
+ # # Output the result
2249
+ # print(f"P-value: {p_value}")
2250
+ # if p_value < 0.05:
2251
+ # print("The decoder's accuracy is significantly better than chance.")
2252
+ # else:
2253
+ # print("The decoder's accuracy is not significantly better than chance.")
2254
+
2255
+
2256
+ # In[ ]:
2257
+
2258
+
2259
+
2260
+
main-finetune.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
main-finetune.py ADDED
@@ -0,0 +1,2280 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding: utf-8
3
+
4
+ # # Import packages & functions
5
+
6
+ # In[1]:
7
+
8
+
9
+ print("importing modules")
10
+ import os
11
+ import sys
12
+ import json
13
+ import argparse
14
+ import numpy as np
15
+ import time
16
+ import random
17
+ import string
18
+ import h5py
19
+ from tqdm import tqdm
20
+ import webdataset as wds
21
+ from PIL import Image
22
+ import pandas as pd
23
+ import nibabel as nib
24
+ import nilearn
25
+
26
+ import matplotlib.pyplot as plt
27
+ import torch
28
+ import torch.nn as nn
29
+ from torchvision import transforms
30
+
31
+ # tf32 data type is faster than standard float32
32
+ torch.backends.cuda.matmul.allow_tf32 = True
33
+
34
+ import utils
35
+ from utils import load_preprocess_betas, resample, applyxfm, apply_thresh, resample_betas
36
+
37
+ # imports utils from mindeye_preproc as "preproc"
38
+ import importlib.util
39
+ parent_utils_path = "/home/ri4541/mindeye_preproc/analysis/utils.py"
40
+ spec = importlib.util.spec_from_file_location("utils", parent_utils_path)
41
+ preproc = importlib.util.module_from_spec(spec)
42
+ parent_dir = os.path.dirname(parent_utils_path)
43
+ if parent_dir not in sys.path:
44
+ sys.path.append(parent_dir)
45
+ spec.loader.exec_module(preproc)
46
+
47
+ if utils.is_interactive():
48
+ from IPython.display import clear_output # function to clear print outputs in cell
49
+ get_ipython().run_line_magic('load_ext', 'autoreload')
50
+ # this allows you to change functions in models.py or utils.py and have this notebook automatically update with your revisions
51
+ get_ipython().run_line_magic('autoreload', '2')
52
+
53
+ seed = utils.get_slurm_seed()
54
+
55
+
56
+ # # Princeton data prep
57
+
58
+ # ## Load Data & Design
59
+
60
+ # In[2]:
61
+
62
+
63
+ if utils.is_interactive():
64
+ sub = "sub-005"
65
+ session = "all"
66
+ task = 'C' # 'study' or 'A'; used to search for functional run in bids format
67
+ func_task_name = 'C'
68
+ else:
69
+ sub = os.environ["sub"]
70
+ session = os.environ["session"]
71
+ task = os.environ["task"]
72
+ func_task_name = 'C'
73
+
74
+ if session == "all":
75
+ ses_list = ["ses-01", "ses-02", "ses-03"] # list of actual session IDs
76
+ design_ses_list = ["ses-01", "ses-02", "ses-03"] # list of session IDs to search for design matrix
77
+ else:
78
+ ses_list = [session]
79
+ design_ses_list = [session]
80
+
81
+ task_name = f"_task-{task}" if task != 'study' else ''
82
+ resample_voxel_size = False
83
+ resample_post_glmsingle = False # do you want to do voxel resampling here? if resample_voxel_size = True and resample_post_glmsingle = False, assume the resampling has been done prior to GLMsingle, so just use resampled directory but otherwise proceed as normal
84
+ load_from_resampled_file = False # do you want to load resampled data from file? if True, assume resampling was done in this notebook before, and that we're not using the GLMsingle resampled data
85
+
86
+ train_test_split = 'MST' # 'MST', 'orig', 'unique'
87
+ remove_close_to_MST = False
88
+ remove_random_n = False
89
+
90
+ if remove_close_to_MST or remove_random_n:
91
+ assert remove_close_to_MST != remove_random_n # don't remove both sets of images
92
+
93
+ n_to_remove = 0
94
+ if remove_random_n:
95
+ assert train_test_split == 'MST' # MST images are excluded from the n images removed, so only makes sense if they're not in the training set
96
+ n_to_remove = 150
97
+
98
+ if resample_voxel_size:
99
+ # voxel size was unchanged in glmsingle, want to perform resampling here
100
+ resampled_vox_size = 2.5
101
+ resample_method = "sinc" # {trilinear,nearestneighbour,sinc,spline}, credit: https://johnmuschelli.com/fslr/reference/flirt.help.html
102
+
103
+ # file name helper variables
104
+ vox_dim_str = str(resampled_vox_size).replace('.', '_') # in case the voxel size has a decimal, replace with an underscore
105
+ resampled_suffix = f"resampled_{vox_dim_str}mm_{resample_method}"
106
+ mask_resampled_suffix = resampled_suffix
107
+ if resample_post_glmsingle:
108
+ resampled_suffix += '_postglmsingle'
109
+ else:
110
+ resampled_suffix += '_preglmsingle'
111
+
112
+
113
+ # In[3]:
114
+
115
+
116
+ session_label = preproc.get_session_label(ses_list)
117
+ print('session label:', session_label)
118
+ n_runs, _ = preproc.get_runs_per_session(sub, session, ses_list)
119
+
120
+
121
+ # In[4]:
122
+
123
+
124
+ if utils.is_interactive():
125
+ glmsingle_path = f"/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/glmsingle_{sub}_{session_label}_task-{task}"
126
+ else:
127
+ glmsingle_path = os.environ["glmsingle_path"]
128
+
129
+ designdir = "/home/ri4541/real_time_mindEye2"
130
+ print(glmsingle_path)
131
+
132
+ if resample_voxel_size:
133
+ # option 1: we are using original (non-resampled) GLMsingle outputs and doing the resampling here
134
+ # option 2: doing resampling pre-GLMsingle and using those outputs; no resampling involved here
135
+ if resample_post_glmsingle:
136
+ # option 1
137
+ orig_glmsingle_path = glmsingle_path
138
+ glmsingle_path += f"_{resampled_suffix}"
139
+ print("resampled glmsingle path:", glmsingle_path)
140
+ if load_from_resampled_file:
141
+ # resampling is already done; load from file
142
+ assert os.path.exists(glmsingle_path) # the new directory must have been created if we reached here
143
+ else:
144
+ # don't load from file; do resampling here
145
+ os.makedirs(glmsingle_path,exist_ok=True)
146
+ else:
147
+ # option 2
148
+ glmsingle_path += f"_{resampled_suffix}"
149
+ print("glmsingle path:", glmsingle_path)
150
+
151
+ assert os.path.exists(glmsingle_path)
152
+ print("glmsingle path exists!")
153
+
154
+
155
+ # In[5]:
156
+
157
+
158
+ data, starts, images, is_new_run, image_names, unique_images, len_unique_images = preproc.load_design_files(
159
+ sub=sub,
160
+ session=session,
161
+ func_task_name=task,
162
+ designdir=designdir,
163
+ design_ses_list=design_ses_list
164
+ )
165
+
166
+ if sub == 'sub-001':
167
+ if session == 'ses-01':
168
+ assert image_names[0] == 'images/image_686_seed_1.png'
169
+ elif session in ('ses-02', 'all'):
170
+ assert image_names[0] == 'all_stimuli/special515/special_40840.jpg'
171
+ elif session == 'ses-03':
172
+ assert image_names[0] == 'all_stimuli/special515/special_69839.jpg'
173
+ elif session == 'ses-04':
174
+ assert image_names[0] == 'all_stimuli/rtmindeye_stimuli/image_686_seed_1.png'
175
+ elif sub == 'sub-003':
176
+ assert image_names[0] == 'all_stimuli/rtmindeye_stimuli/image_686_seed_1.png'
177
+
178
+ unique_images = np.unique(image_names.astype(str))
179
+ unique_images = unique_images[(unique_images!="nan")]
180
+ len_unique_images = len(unique_images)
181
+ print("n_runs",n_runs)
182
+
183
+ if (sub == 'sub-001' and session == 'ses-04') or (sub == 'sub-003' and session == 'ses-01'):
184
+ assert len(unique_images) == 851
185
+
186
+ print(image_names[:4])
187
+ print(starts[:4])
188
+ print(is_new_run[:4])
189
+
190
+ if remove_random_n:
191
+ # want to remove 150 imgs
192
+ # 100 special515 imgs are repeated 3x (300 total)
193
+ # all other train imgs are only shown once (558 total)
194
+ # of the 150, want to sample proportionally since we're cutting all repeats for special515
195
+ # so take out 51 (17 unique) from special515 and 99 from rest = removing 150 total
196
+ np.random.seed(seed)
197
+ options_to_remove = [x for x in set(image_names) if str(x) != 'nan' and x != 'blank.jpg' and 'MST_pairs' not in x and 'special515' not in x and list(image_names).count(x)==1] # all the imgs that only appear once (this is O(N^2) b/c of count() within list comprehension but image_names is a relatively small list)
198
+ options_to_remove_special515 = [x for x in set(image_names) if str(x) != 'nan' and x != 'blank.jpg' and 'MST_pairs' not in x and 'special515' in x and list(image_names).count(x)>1] # all the special515 images that are repeated (count()>1 necessary because there are special515 that are not repeated)
199
+ imgs_to_remove = np.random.choice(options_to_remove, size=99, replace=False)
200
+ imgs_to_remove = np.append(imgs_to_remove, np.random.choice(options_to_remove_special515, size=17, replace=False))
201
+
202
+ image_idx = np.array([]) # contains the unique index of each presented image
203
+ vox_image_names = np.array([]) # contains the names of the images corresponding to image_idx
204
+ all_MST_images = dict()
205
+ for i, im in enumerate(image_names):
206
+ # skip if blank, nan
207
+ if im == "blank.jpg":
208
+ i+=1
209
+ continue
210
+ if str(im) == "nan":
211
+ i+=1
212
+ continue
213
+ vox_image_names = np.append(vox_image_names, im)
214
+ if remove_close_to_MST: # optionally skip close_to_MST images
215
+ if "closest_pairs" in im:
216
+ i+=1
217
+ continue
218
+ elif remove_random_n:
219
+ if im in imgs_to_remove:
220
+ i+=1
221
+ continue
222
+
223
+ image_idx_ = np.where(im==unique_images)[0].item()
224
+ image_idx = np.append(image_idx, image_idx_)
225
+
226
+ if (sub == 'sub-001' and session == 'ses-04') or (sub == 'sub-003' and session == 'ses-01'): # MST images are ones that matched these image titles
227
+ import re
228
+ if ('w_' in im or 'paired_image_' in im or re.match(r'all_stimuli/rtmindeye_stimuli/\d{1,2}_\d{1,3}\.png$', im) or re.match(r'images/\d{1,2}_\d{1,3}\.png$', im)):
229
+ # the regexp here looks for **_***.png, allows 1-2 chars before underscore and 1-3 chars after it
230
+ # print(im)
231
+ all_MST_images[i] = im
232
+ i+=1
233
+ elif 'MST' in im:
234
+ all_MST_images[i] = im
235
+ i+=1
236
+
237
+ image_idx = torch.Tensor(image_idx).long()
238
+ # for im in new_image_names[MST_images]:
239
+ # assert 'MST_pairs' in im
240
+ # assert len(all_MST_images) == 300
241
+
242
+ unique_MST_images = np.unique(list(all_MST_images.values()))
243
+
244
+ MST_ID = np.array([], dtype=int)
245
+ if remove_close_to_MST:
246
+ close_to_MST_idx = np.array([], dtype=int)
247
+ if remove_random_n:
248
+ random_n_idx = np.array([], dtype=int)
249
+
250
+ vox_idx = np.array([], dtype=int)
251
+ j=0 # this is a counter keeping track of the remove_random_n used later to index vox based on the removed images; unused otherwise
252
+ for i, im in enumerate(image_names): # need unique_MST_images to be defined, so repeating the same loop structure
253
+ # skip if blank, nan
254
+ if im == "blank.jpg":
255
+ i+=1
256
+ continue
257
+ if str(im) == "nan":
258
+ i+=1
259
+ continue
260
+ if remove_close_to_MST: # optionally skip close_to_MST images
261
+ if "closest_pairs" in im:
262
+ close_to_MST_idx = np.append(close_to_MST_idx, i)
263
+ i+=1
264
+ continue
265
+ if remove_random_n:
266
+ if im in imgs_to_remove:
267
+ vox_idx = np.append(vox_idx, j)
268
+ i+=1
269
+ j+=1
270
+ continue
271
+ j+=1
272
+ curr = np.where(im == unique_MST_images)
273
+ # print(curr)
274
+ if curr[0].size == 0:
275
+ MST_ID = np.append(MST_ID, np.array(len(unique_MST_images))) # add a value that should be out of range based on the for loop, will index it out later
276
+ else:
277
+ MST_ID = np.append(MST_ID, curr)
278
+
279
+ assert len(MST_ID) == len(image_idx)
280
+ # assert len(np.argwhere(pd.isna(data['current_image']))) + len(np.argwhere(data['current_image'] == 'blank.jpg')) + len(image_idx) == len(data)
281
+ # MST_ID = torch.tensor(MST_ID[MST_ID != len(unique_MST_images)], dtype=torch.uint8) # torch.tensor (lowercase) allows dtype kwarg, Tensor (uppercase) is an alias for torch.FloatTensor
282
+ print(MST_ID.shape)
283
+ if (sub == 'sub-001' and session == 'ses-04') or (sub == 'sub-003' and session == 'ses-01'):
284
+ assert len(all_MST_images) == 100
285
+
286
+
287
+ # ## Load images
288
+
289
+ # In[ ]:
290
+
291
+
292
+ import imageio.v2 as imageio
293
+ resize_transform = transforms.Resize((224, 224))
294
+ MST_images = []
295
+ images = None
296
+ for im_name in tqdm(image_idx):
297
+ if sub == 'sub-001' and session == 'ses-01':
298
+ image_file = f"all_stimuli/rtmindeye_stimuli/{unique_images[im_name]}"
299
+ else:
300
+ image_file = f"{unique_images[im_name]}"
301
+ im = imageio.imread(image_file)
302
+ im = torch.Tensor(im / 255).permute(2,0,1)
303
+ im = resize_transform(im.unsqueeze(0))
304
+ if images is None:
305
+ images = im
306
+ else:
307
+ images = torch.vstack((images, im))
308
+ if (sub == 'sub-001' and session == 'ses-04') or (sub == 'sub-003' and session == 'ses-01'):
309
+ if ('w_' in image_file or 'paired_image_' in image_file or re.match(r'all_stimuli/rtmindeye_stimuli/\d{1,2}_\d{1,3}\.png$', image_file) or re.match(r'all_stimuli/rtmindeye_stimuli/images/\d{1,2}_\d{1,3}\.png$', image_file)):
310
+ MST_images.append(True)
311
+ else:
312
+ MST_images.append(False)
313
+ else:
314
+ if ("MST_pairs" in image_file): # ("_seed_" not in unique_images[im_name]) and (unique_images[im_name] != "blank.jpg")
315
+ MST_images.append(True)
316
+ else:
317
+ MST_images.append(False)
318
+
319
+ print("images", images.shape)
320
+ MST_images = np.array(MST_images)
321
+ print("MST_images", len(MST_images))
322
+ if (sub == 'sub-001' and session == 'ses-04') or (sub == 'sub-003' and session == 'ses-01'):
323
+ assert len(MST_images[MST_images==True]) == 100
324
+ print("MST_images==True", len(MST_images[MST_images==True]))
325
+
326
+
327
+ # In[ ]:
328
+
329
+
330
+ # want IDs of pairmates based on MST_images
331
+ # create "MST_pairmates" which is a 25x2 array with indices of the 25 pairs based on MST_images == True
332
+
333
+ assert unique_MST_images.shape[0] % 2 == 0 # make sure it's divisible by 2
334
+ MST_pairmate_names = unique_MST_images.reshape(int(unique_MST_images.shape[0]/2),2)
335
+ # print(MST_pairmate_names)
336
+
337
+ MST_pairmate_indices = np.empty(shape=MST_pairmate_names.shape, dtype=int)
338
+ for p, pair in enumerate(MST_pairmate_names):
339
+ for i, im in enumerate(pair):
340
+ MST_pairmate_indices[p][i] = np.where(np.isin(list(all_MST_images.values()), im))[0][0] # just take the first repeated instance of an image
341
+
342
+ print(MST_pairmate_indices.shape, MST_pairmate_indices)
343
+
344
+
345
+ # In[ ]:
346
+
347
+
348
+ if (sub == 'sub-001' and session in ('ses-02', 'ses-03', 'all')):
349
+ # MST_pairs contains the indices of repeats based on all_MST_images
350
+ # all_MST_images contains the indices of images from image_names
351
+ MST_pairs = utils.find_paired_indices(torch.tensor(MST_ID))
352
+ MST_pairs = np.array(sorted(MST_pairs[:-1], key=lambda x: x[0])) # we added a fake value as a placeholder so index out the last group of pairs
353
+
354
+ # assert images[MST_pairs]
355
+
356
+ fig, ax = plt.subplots(1, 3, figsize=(10,4))
357
+ fig.suptitle('Sample MST pairs')
358
+
359
+ ax[0].imshow(images[MST_pairs[-1][0]].permute(1,2,0).numpy())
360
+ ax[0].set_title(f"Trial 0")
361
+
362
+ ax[1].imshow(images[MST_pairs[-1][1]].permute(1,2,0).numpy())
363
+ ax[1].set_title(f"Trial 1")
364
+
365
+ ax[2].imshow(images[MST_pairs[-1][2]].permute(1,2,0).numpy())
366
+ ax[2].set_title(f"Trial 2")
367
+
368
+ plt.setp(ax, xticks=[], yticks=[])
369
+ plt.tight_layout()
370
+ plt.show()
371
+
372
+
373
+ # In[ ]:
374
+
375
+
376
+ # pairs has the indices of all repeated images
377
+ pairs = utils.find_paired_indices(image_idx)
378
+ pairs = sorted(pairs, key=lambda x: x[0])
379
+
380
+ fig, axes = plt.subplots(1, 3, figsize=(6, 2)) # 1 row, 3 columns
381
+ for i, ax in enumerate(axes):
382
+ ax.imshow(images[i].permute(1, 2, 0).numpy())
383
+ ax.set_title(f"Trial {i}")
384
+ ax.axis("off") # Hide axes for better visualization
385
+
386
+ plt.tight_layout()
387
+ # output_path = os.path.join(output_dir, "trials_plot.png")
388
+ # plt.savefig(output_path, dpi=300) # Save figure
389
+ plt.show()
390
+
391
+
392
+ # In[ ]:
393
+
394
+
395
+ p=0
396
+
397
+ # plot 2 repeats (anything in pairs should have 2 repeats, even if there's more)
398
+ fig, ax = plt.subplots(1, 2, figsize=(10,8))
399
+
400
+ ax[0].imshow(images[pairs[p][0]].permute(1,2,0).numpy())
401
+ ax[0].set_title(f"Repeat 1")
402
+
403
+ ax[1].imshow(images[pairs[p][1]].permute(1,2,0).numpy())
404
+ ax[1].set_title(f"Repeat 2")
405
+
406
+ plt.setp(ax, xticks=[], yticks=[])
407
+ plt.tight_layout()
408
+ plt.show()
409
+
410
+
411
+ # In[ ]:
412
+
413
+
414
+ def get_image_pairs(sub, session, func_task_name, designdir):
415
+ """Loads design files and processes image pairs for a given session."""
416
+ _, _, _, _, image_names, unique_images, _ = preproc.load_design_files(
417
+ sub=sub,
418
+ session=session,
419
+ func_task_name=func_task_name,
420
+ designdir=designdir,
421
+ design_ses_list=[session] # Ensure it's a list
422
+ )
423
+ return utils.process_images(image_names, unique_images)
424
+
425
+
426
+ # In[ ]:
427
+
428
+
429
+ from collections import defaultdict
430
+
431
+ all_dicts = []
432
+ for s_idx, s in enumerate(ses_list):
433
+ im, vo, _ = get_image_pairs(sub, s, func_task_name, designdir)
434
+ assert len(im) == len(vo)
435
+ all_dicts.append({k:v for k,v in enumerate(vo)})
436
+
437
+ # for the train set (ses-01-02 non-MST)
438
+ image_to_indices = defaultdict(lambda: [[] for _ in range(len(ses_list))])
439
+ for ses_idx, idx_to_name in enumerate(all_dicts):
440
+ for idx, name in idx_to_name.items():
441
+ image_to_indices[name][ses_idx].append(idx)
442
+
443
+ image_to_indices = dict(image_to_indices)
444
+
445
+ # for the test set (ses-03)
446
+ # test_image_to_indices = defaultdict(lambda: [[] for _ in range(len([ses_list[-1]]))])
447
+ # for ses_idx, idx_to_name in enumerate([all_dicts[-1]]):
448
+ # for idx, name in idx_to_name.items():
449
+ # test_image_to_indices[name][ses_idx].append(idx)
450
+
451
+ # test_image_to_indices = dict(test_image_to_indices)
452
+
453
+ if sub == 'sub-005' and len(ses_list) > 1:
454
+ session_length = 693
455
+ for image, session_indices_list in image_to_indices.items():
456
+ new_indices_list = []
457
+ for idx, indices in enumerate(session_indices_list):
458
+ offset = idx * session_length
459
+ new_indices = [i + offset for i in indices]
460
+ new_indices_list.append(new_indices)
461
+ image_to_indices[image] = new_indices_list
462
+
463
+ import itertools
464
+ assert max(itertools.chain.from_iterable(list(image_to_indices.values())))[0] == (len(ses_list)*session_length) - 1
465
+
466
+
467
+ # In[ ]:
468
+
469
+
470
+ if resample_voxel_size:
471
+ from nilearn.masking import apply_mask, unmask
472
+ ref_name = f'{glmsingle_path}/boldref_resampled.nii.gz'
473
+ omat_name = f'{glmsingle_path}/boldref_omat'
474
+
475
+
476
+ # In[ ]:
477
+
478
+
479
+ from nilearn.plotting import plot_roi
480
+
481
+ print('loading brain mask')
482
+ avg_mask = nib.load('/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/glmsingle_sub-005_task-C/sub-005_final_brain.nii.gz')
483
+ final_mask = nib.load('/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/glmsingle_sub-005_task-C/sub-005_final_mask.nii.gz')
484
+
485
+ # mask info
486
+ dimsize=avg_mask.header.get_zooms()
487
+ affine_mat = avg_mask.affine
488
+ brain=avg_mask.get_fdata()
489
+ xyz=brain.shape #xyz dimensionality of brain mask and epi data
490
+
491
+ print('Mask dimensions:', dimsize)
492
+ print('')
493
+ print('Affine:')
494
+ print(affine_mat)
495
+ print('')
496
+ print(f'There are {int(np.sum(brain))} voxels in the included brain mask\n')
497
+
498
+ plot_roi(final_mask, bg_img=avg_mask)
499
+ plt.show()
500
+
501
+
502
+ # In[ ]:
503
+
504
+
505
+ # # create union of ses-01 and ses-02 reliability masks and plot against avg_mask
506
+ # rel_masks = []
507
+ # rel_masks.append(np.load('/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/glmsingle_sub-005_task-C/rel_mask_from_ses-01_to_ses-03.npy'))
508
+ # rel_masks.append(np.load('/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/glmsingle_sub-005_task-C/rel_mask_from_ses-02_to_ses-03.npy'))
509
+ # rel_masks = np.array(rel_masks)
510
+ # for r in rel_masks:
511
+ # assert r.shape[0] == int(final_mask.get_fdata().sum())
512
+ # assert r.dtype == bool
513
+
514
+ # assert len(rel_masks) == 2 # should be the case if there's 2 training sessions
515
+ # union_mask = np.logical_or(rel_masks[0], rel_masks[1])
516
+ # assert union_mask.sum() > rel_masks[0].sum()
517
+ # assert union_mask.sum() > rel_masks[1].sum()
518
+ # print(f'there are {union_mask.sum()} reliable voxels based on the union mask out of {int(final_mask.get_fdata().sum())} voxels in the nsdgeneral roi')
519
+ # print(f'{(union_mask.sum() / int(final_mask.get_fdata().sum())):.2%} of the voxels in the roi were selected')
520
+ # path = f'/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/glmsingle_sub-005_task-C/union_mask_from_{session_label}.npy'
521
+ path = f'/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/glmsingle_sub-005_task-C/union_mask_from_ses-01-02.npy'
522
+ # np.save(f'/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/glmsingle_sub-005_task-C/union_mask_from_{session_label}.npy', union_mask)
523
+ # print(f'saved union mask to {path}!')
524
+ union_mask = np.load(path)
525
+
526
+
527
+ # ## Load GLMSingle voxel data
528
+
529
+ # In[ ]:
530
+
531
+
532
+ ses_mask = []
533
+
534
+ for s in ses_list:
535
+ ses_mask_path = f'/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/glmsingle_sub-005_{s}_task-C/sub-005_{s}_task-C_brain.nii.gz'
536
+ ses_mask.append(nib.load(ses_mask_path))
537
+
538
+ assert np.all(ses_mask[-1].affine == final_mask.affine)
539
+ assert np.all(ses_mask[-1].shape == final_mask.shape)
540
+
541
+
542
+ # In[ ]:
543
+
544
+
545
+ ses_vox = []
546
+ vox = None
547
+ needs_postprocessing = False
548
+ params = (session, ses_list, remove_close_to_MST, image_names, remove_random_n, vox_idx)
549
+
550
+ if resample_post_glmsingle == True:
551
+ glm_save_path_resampled = f"{glmsingle_path}/vox_resampled.nii.gz"
552
+ if load_from_resampled_file == True:
553
+ # resampling was done in this notebook so we can load from file
554
+ vox = nib.load(glm_save_path_resampled)
555
+ else:
556
+ # do resampling here
557
+ assert os.path.exists(ref_name) and os.path.exists(omat_name), "need to generate the boldref and omat separately since we don't have access to the functional data here; either do so using flirt on the command line or copy over the glmsingle resampled outputs"
558
+ vox = load_preprocess_betas(orig_glmsingle_path, *params)
559
+ vox = resample_betas(orig_glmsingle_path, sub, session, task_name, vox, glmsingle_path, glm_save_path_resampled, ref_name, omat_name)
560
+ needs_postprocessing = True
561
+
562
+ if vox is None:
563
+ for i, s in enumerate(ses_list):
564
+ # either resampling was done in glmsingle or we aren't resampling
565
+ ses_vox_path = f'/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/glmsingle_sub-005_{s}_task-C'
566
+ assert os.path.exists(ses_vox_path)
567
+ ses_vox.append(load_preprocess_betas(ses_vox_path, *params))
568
+ v = nilearn.masking.unmask(ses_vox[i], ses_mask[i])
569
+ ses_vox[i] = nilearn.masking.apply_mask(v, final_mask)
570
+ vox = np.concatenate(ses_vox)
571
+ print("applied final brain mask")
572
+ print(vox.shape)
573
+ vox = vox[:, union_mask]
574
+ print("applied union roi mask")
575
+ print(vox.shape)
576
+
577
+
578
+ if needs_postprocessing == True:
579
+ vox = apply_mask(vox, avg_mask)
580
+ vox = vox.reshape(-1, vox.shape[-1]) # flatten the 3D image into np array with shape (voxels, images)
581
+ print(vox.shape)
582
+
583
+ assert len(vox) == len(image_idx)
584
+
585
+
586
+ # In[ ]:
587
+
588
+
589
+ # # get vox into the same shape as the union mask
590
+ # v = nilearn.masking.unmask(vox, ses_mask) # move back to 3D based on own session mask
591
+ # final_mask = nilearn.masking.intersect_masks([avg_mask, roi])
592
+ # vox = nilearn.masking.apply_mask(vox, final_mask) # re-flatten based on final mask so everything is in the same shape now
593
+ # print(vox.shape)
594
+
595
+
596
+ # In[ ]:
597
+
598
+
599
+ pairs_homog = np.array([[p[0], p[1]] for p in pairs])
600
+
601
+
602
+ # In[ ]:
603
+
604
+
605
+ same_corrs = []
606
+ diff_corrs = []
607
+ for isamp, samp in enumerate(vox[pairs_homog]):
608
+ avg_same_img = []
609
+ for i in range(samp.shape[0]):
610
+ for j in range(i, samp.shape[0]):
611
+ if i != j:
612
+ avg_same_img.append(np.array([np.corrcoef(samp[i, :], samp[j, :])[0,1]]))
613
+
614
+ same_corrs.append(np.mean(avg_same_img))
615
+
616
+ avg_diff_img = []
617
+ for isamp_j, samp_j in enumerate(vox[pairs_homog]):
618
+ if isamp_j != isamp:
619
+ for i in range(samp_j.shape[0]):
620
+ for j in range(i, samp_j.shape[0]):
621
+ if i != j:
622
+ avg_diff_img.append(np.array([np.corrcoef(samp[i, :], samp_j[j, :])[0,1]]))
623
+
624
+ # print(len(avg_diff_img))
625
+ diff_corrs.append(np.mean(avg_diff_img))
626
+
627
+
628
+ print(len(same_corrs), len(diff_corrs))
629
+ same_corrs = np.array(same_corrs)
630
+ diff_corrs = np.array(diff_corrs)
631
+
632
+
633
+ plt.figure(figsize=(5,4))
634
+ plt.title(f"{sub}_{session} same/diff Pearson corr.")
635
+ plt.plot(np.sort(same_corrs),c='blue',label='same')
636
+ plt.plot(np.sort(diff_corrs),c='cyan',label='diff')
637
+ plt.axhline(0,c='k',ls='--')
638
+ plt.legend()
639
+ plt.xlabel("sample")
640
+ plt.ylabel("Pearson R")
641
+ plt.show()
642
+
643
+
644
+ # In[ ]:
645
+
646
+
647
+ vox_pairs = utils.zscore(vox[pairs_homog])
648
+ plt.figure(figsize=(5,4))
649
+ plt.title(f"{sub}_{session} same minus diff difference Pearson corr.")
650
+ plt.plot(np.sort(same_corrs) - np.sort(diff_corrs),c='cyan',label='difference')
651
+ plt.axhline(0,c='k',ls='--')
652
+ plt.legend()
653
+ plt.xlabel("sample")
654
+ plt.ylabel("Pearson R")
655
+ plt.show()
656
+
657
+
658
+ # # Training MindEye
659
+
660
+ # In[ ]:
661
+
662
+
663
+ utils.seed_everything(seed)
664
+
665
+ if train_test_split == 'orig':
666
+ # train = all images except images that were repeated
667
+ # test = average of the same-image presentations
668
+ imageTrain = np.arange(len(images))
669
+ train_image_indices = np.array([item for item in imageTrain if item not in pairs.flatten()])
670
+ test_image_indices = pairs
671
+ print(len(train_image_indices), len(test_image_indices))
672
+ assert len(train_image_indices) + len(test_image_indices) == len(image_idx)
673
+ elif train_test_split == 'MST':
674
+ # non-MST images are the train split
675
+ # MST images are the test split
676
+ MST_idx = np.array([v for k,v in image_to_indices.items() if 'MST_pairs' in k])
677
+ non_MST_idx = [v for k,v in image_to_indices.items() if 'MST_pairs' not in k]
678
+ non_MST_idx = np.array([z for y in non_MST_idx for x in y for z in x]) # flatten the indices
679
+ train_image_indices = non_MST_idx
680
+ test_image_indices = MST_idx.flatten() # MST_idx contains the mapping for the different test sets; test_image_indices has all MST indices combined
681
+ print(len(train_image_indices), len(test_image_indices))
682
+ assert len(train_image_indices) + len(test_image_indices) == len(vox)
683
+ elif train_test_split == 'unique':
684
+ imageTest = np.arange(len(images))
685
+ train_image_indices = pairs.flatten()
686
+ test_image_indices = np.array([item for item in imageTest if item not in pairs.flatten()])
687
+ print(len(train_image_indices), len(test_image_indices))
688
+ assert len(train_image_indices) + len(test_image_indices) == len(image_idx)
689
+ else:
690
+ raise Exception("invalid train_test_split")
691
+
692
+ # TODO add assertion that verifies file names in train and test don't overlap, guards against repeats
693
+
694
+ for i in train_image_indices:
695
+ assert i not in test_image_indices
696
+
697
+
698
+ # In[ ]:
699
+
700
+
701
+ ses_split = vox[train_image_indices].shape[0] // 2
702
+
703
+ train_mean_s1 = np.mean(vox[train_image_indices][:ses_split], axis=0)
704
+ train_std_s1 = np.std(vox[train_image_indices][:ses_split], axis=0)
705
+ train_mean_s2 = np.mean(vox[train_image_indices][ses_split:], axis=0)
706
+ train_std_s2 = np.std(vox[train_image_indices][ses_split:], axis=0)
707
+
708
+
709
+ vox[:ses_split] = utils.zscore(vox[:ses_split],train_mean=train_mean_s1,train_std=train_std_s1)
710
+ vox[ses_split:] = utils.zscore(vox[ses_split:],train_mean=train_mean_s2,train_std=train_std_s2)
711
+
712
+ print("voxels have been zscored")
713
+ print("ses-01:", vox[:ses_split,0].mean(), vox[:ses_split,0].std())
714
+ print("ses-02:", vox[ses_split:,0].mean(), vox[ses_split:,0].std())
715
+ print("vox", vox.shape)
716
+
717
+
718
+ # In[ ]:
719
+
720
+
721
+ # save the mean and std from ses-01 and 02
722
+ train_test_mean_s1 = np.mean(vox[:ses_split], axis=0)
723
+ train_test_std_s1 = np.std(vox[:ses_split], axis=0)
724
+ train_test_mean_s2 = np.mean(vox[ses_split:], axis=0)
725
+ train_test_std_s2 = np.std(vox[ses_split:], axis=0)
726
+ print(train_test_mean_s1.shape)
727
+ assert np.all(train_test_mean_s1.shape == train_test_std_s1.shape)
728
+ assert np.all(train_test_mean_s1.shape == train_test_mean_s2.shape)
729
+ assert np.all(train_test_mean_s1.shape == train_test_std_s2.shape)
730
+
731
+
732
+ # In[ ]:
733
+
734
+
735
+ # for idx in deleted_indices:
736
+ # # check image names to be deleted match
737
+ # original_name = vox_image_dict[idx]
738
+ # matching_indices = [i for i in deleted_indices if vox_image_dict[i] == original_name]
739
+ # assert all(vox_image_dict[i] == original_name for i in matching_indices), \
740
+ # f"Mismatch in image names for deleted indices {matching_indices}"
741
+
742
+ # # check image data to be deleted match
743
+ # base_image = images[matching_indices[0]] # Reference image
744
+ # for i in matching_indices[1:]:
745
+ # assert np.array_equal(base_image, images[i]), \
746
+ # f"Mismatch in image data for {vox_image_dict[i]} at index {i}"
747
+
748
+ # images = images[kept_indices]
749
+
750
+
751
+ # In[ ]:
752
+
753
+
754
+ images = torch.Tensor(images)
755
+ vox = torch.Tensor(vox)
756
+ assert len(images) == len(vox)
757
+
758
+
759
+ # In[ ]:
760
+
761
+
762
+ ### Multi-GPU config ###
763
+ from accelerate import Accelerator, DeepSpeedPlugin
764
+
765
+ local_rank = os.getenv('RANK')
766
+ if local_rank is None:
767
+ local_rank = 0
768
+ else:
769
+ local_rank = int(local_rank)
770
+ print("LOCAL RANK ", local_rank)
771
+
772
+ data_type = torch.float32 # change depending on your mixed_precision
773
+
774
+ accelerator = Accelerator(split_batches=False)
775
+ batch_size = 8
776
+
777
+
778
+ # In[ ]:
779
+
780
+
781
+ print("PID of this process =",os.getpid())
782
+ device = accelerator.device
783
+ print("device:",device)
784
+ world_size = accelerator.state.num_processes
785
+ distributed = not accelerator.state.distributed_type == 'NO'
786
+ num_devices = torch.cuda.device_count()
787
+ global_batch_size = batch_size * num_devices
788
+ print("global_batch_size", global_batch_size)
789
+ if num_devices==0 or not distributed: num_devices = 1
790
+ num_workers = num_devices
791
+ print(accelerator.state)
792
+
793
+ # set data_type to match your mixed precision (automatically set based on deepspeed config)
794
+ if accelerator.mixed_precision == "bf16":
795
+ data_type = torch.bfloat16
796
+ elif accelerator.mixed_precision == "fp16":
797
+ data_type = torch.float16
798
+ else:
799
+ data_type = torch.float32
800
+
801
+ print("distributed =",distributed, "num_devices =", num_devices, "local rank =", local_rank, "world size =", world_size, "data_type =", data_type)
802
+ print = accelerator.print # only print if local_rank=0
803
+
804
+
805
+ # ## Configurations
806
+
807
+ # In[ ]:
808
+
809
+
810
+ # if running this interactively, can specify jupyter_args here for argparser to use
811
+ if utils.is_interactive():
812
+ model_name = 'testing_MST' # 'sub-001_multi_bs24_MST_rishab_MSTsplit_remove_150_random_seed_0'
813
+ print("model_name:", model_name)
814
+
815
+ # global_batch_size and batch_size should already be defined in the above cells
816
+ # other variables can be specified in the following string:
817
+ # jupyter_args = f"--data_path=/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2 --model_name={model_name}"
818
+
819
+ jupyter_args = f"--data_path=/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2 \
820
+ --model_name={model_name} \
821
+ --no-multi_subject --subj=1 --batch_size={batch_size} \
822
+ --hidden_dim=1024 --clip_scale=1. \
823
+ --no-blurry_recon --blur_scale=.5 \
824
+ --no-use_prior --prior_scale=30 \
825
+ --n_blocks=4 --max_lr=3e-4 --mixup_pct=.33 --num_epochs=30 --no-use_image_aug \
826
+ --ckpt_interval=999 --no-ckpt_saving --new_test \
827
+ --multisubject_ckpt=/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/train_logs/multisubject_subj01_1024hid_nolow_300ep"
828
+ print(jupyter_args)
829
+ jupyter_args = jupyter_args.split()
830
+
831
+
832
+ # In[ ]:
833
+
834
+
835
+ parser = argparse.ArgumentParser(description="Model Training Configuration")
836
+ parser.add_argument(
837
+ "--model_name", type=str, default="testing",
838
+ help="name of model, used for ckpt saving and wandb logging (if enabled)",
839
+ )
840
+ parser.add_argument(
841
+ "--data_path", type=str, default="/weka/proj-fmri/shared/natural-scenes-dataset",
842
+ help="Path to where NSD data is stored / where to download it to",
843
+ )
844
+ parser.add_argument(
845
+ "--subj",type=int, default=1, choices=[1,2,3,4,5,6,7,8],
846
+ help="Validate on which subject?",
847
+ )
848
+ parser.add_argument(
849
+ "--multisubject_ckpt", type=str, default=None,
850
+ help="Path to pre-trained multisubject model to finetune a single subject from. multisubject must be False.",
851
+ )
852
+ parser.add_argument(
853
+ "--num_sessions", type=int, default=0,
854
+ help="Number of training sessions to include (if multi_subject, this variable doesnt matter)",
855
+ )
856
+ parser.add_argument(
857
+ "--use_prior",action=argparse.BooleanOptionalAction,default=False,
858
+ help="whether to train diffusion prior (True) or just rely on retrieval part of the pipeline (False)",
859
+ )
860
+ parser.add_argument(
861
+ "--batch_size", type=int, default=32,
862
+ help="Batch size can be increased by 10x if only training v2c and not diffusion diffuser",
863
+ )
864
+ parser.add_argument(
865
+ "--wandb_log",action=argparse.BooleanOptionalAction,default=False,
866
+ help="whether to log to wandb",
867
+ )
868
+ parser.add_argument(
869
+ "--resume_from_ckpt",action=argparse.BooleanOptionalAction,default=False,
870
+ help="if not using wandb and want to resume from a ckpt",
871
+ )
872
+ parser.add_argument(
873
+ "--wandb_project",type=str,default="stability",
874
+ help="wandb project name",
875
+ )
876
+ parser.add_argument(
877
+ "--mixup_pct",type=float,default=.33,
878
+ help="proportion of way through training when to switch from BiMixCo to SoftCLIP",
879
+ )
880
+ parser.add_argument(
881
+ "--low_mem",action=argparse.BooleanOptionalAction,default=False,
882
+ help="whether to preload images to cpu to speed things up but consume more memory",
883
+ )
884
+ parser.add_argument(
885
+ "--blurry_recon",action=argparse.BooleanOptionalAction,default=True,
886
+ help="whether to output blurry reconstructions",
887
+ )
888
+ parser.add_argument(
889
+ "--blur_scale",type=float,default=.5,
890
+ help="multiply loss from blurry recons by this number",
891
+ )
892
+ parser.add_argument(
893
+ "--clip_scale",type=float,default=1.,
894
+ help="multiply contrastive loss by this number",
895
+ )
896
+ parser.add_argument(
897
+ "--prior_scale",type=float,default=30,
898
+ help="multiply diffusion prior loss by this",
899
+ )
900
+ parser.add_argument(
901
+ "--use_image_aug",action=argparse.BooleanOptionalAction,default=True,
902
+ help="whether to use image augmentation",
903
+ )
904
+ parser.add_argument(
905
+ "--num_epochs",type=int,default=120,
906
+ help="number of epochs of training",
907
+ )
908
+ parser.add_argument(
909
+ "--multi_subject",action=argparse.BooleanOptionalAction,default=False,
910
+ )
911
+ parser.add_argument(
912
+ "--new_test",action=argparse.BooleanOptionalAction,default=True,
913
+ )
914
+ parser.add_argument(
915
+ "--n_blocks",type=int,default=2,
916
+ )
917
+ parser.add_argument(
918
+ "--hidden_dim",type=int,default=1024,
919
+ )
920
+ parser.add_argument(
921
+ "--seq_past",type=int,default=0,
922
+ )
923
+ parser.add_argument(
924
+ "--seq_future",type=int,default=0,
925
+ )
926
+ parser.add_argument(
927
+ "--lr_scheduler_type",type=str,default='cycle',choices=['cycle','linear'],
928
+ )
929
+ parser.add_argument(
930
+ "--ckpt_saving",action=argparse.BooleanOptionalAction,default=True,
931
+ )
932
+ parser.add_argument(
933
+ "--ckpt_interval",type=int,default=5,
934
+ help="save backup ckpt and reconstruct every x epochs",
935
+ )
936
+ parser.add_argument(
937
+ "--seed",type=int,default=42,
938
+ )
939
+ parser.add_argument(
940
+ "--max_lr",type=float,default=3e-4,
941
+ )
942
+
943
+ if utils.is_interactive():
944
+ args = parser.parse_args(jupyter_args)
945
+ else:
946
+ args = parser.parse_args()
947
+
948
+ # create global variables without the args prefix
949
+ for attribute_name in vars(args).keys():
950
+ globals()[attribute_name] = getattr(args, attribute_name)
951
+
952
+ outdir = os.path.abspath(f'/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/train_logs/{model_name}')
953
+ if not os.path.exists(outdir) and ckpt_saving:
954
+ os.makedirs(outdir,exist_ok=True)
955
+
956
+ if use_image_aug or blurry_recon:
957
+ import kornia
958
+ import kornia.augmentation as K
959
+ from kornia.augmentation.container import AugmentationSequential
960
+ if use_image_aug:
961
+ img_augment = AugmentationSequential(
962
+ kornia.augmentation.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.1, p=0.3),
963
+ same_on_batch=False,
964
+ data_keys=["input"],
965
+ )
966
+ # Define the blurring augmentations
967
+ blur_augment = K.RandomGaussianBlur(kernel_size=(21, 21), sigma=(51.0, 51.0), p=1.)
968
+
969
+ if multi_subject:
970
+ subj_list = np.arange(1,9)
971
+ subj_list = subj_list[subj_list != subj]
972
+ else:
973
+ subj_list = [subj]
974
+
975
+ print("subj_list", subj_list, "num_sessions", num_sessions)
976
+
977
+
978
+ # ## Prep data, models, and dataloaders
979
+
980
+ # In[ ]:
981
+
982
+
983
+ if ckpt_saving:
984
+ # save MST_ID for 2-alternative forced-choice retrieval evaluation
985
+ if 'MST' in model_name:
986
+ eval_dir = os.environ["eval_dir"]
987
+ print('saving MST info in', eval_dir)
988
+ # Saving ##
989
+ if not os.path.exists(eval_dir):
990
+ os.mkdir(eval_dir)
991
+
992
+ np.save(f"{eval_dir}/MST_ID.npy", MST_ID)
993
+ np.save(f"{eval_dir}/MST_pairmate_indices.npy", MST_pairmate_indices)
994
+
995
+ if remove_random_n:
996
+ np.save(f"{eval_dir}/imgs_to_remove.npy", imgs_to_remove)
997
+
998
+ np.save(f"{eval_dir}/train_image_indices.npy", train_image_indices)
999
+ np.save(f"{eval_dir}/test_image_indices.npy", test_image_indices)
1000
+ np.save(f"{eval_dir}/images.npy", images)
1001
+ np.save(f"{eval_dir}/vox.npy", vox)
1002
+
1003
+ np.save(f'{eval_dir}/train_test_mean_s1.npy', train_test_mean_s1)
1004
+ np.save(f'{eval_dir}/train_test_std_s1.npy', train_test_std_s1)
1005
+ np.save(f'{eval_dir}/train_test_mean_s2.npy', train_test_mean_s2)
1006
+ np.save(f'{eval_dir}/train_test_std_s2.npy', train_test_std_s2)
1007
+
1008
+
1009
+ # ### Creating wds dataloader, preload betas and all 73k possible images
1010
+
1011
+ # In[ ]:
1012
+
1013
+
1014
+ def my_split_by_node(urls): return urls
1015
+ num_voxels_list = []
1016
+
1017
+ if multi_subject:
1018
+ nsessions_allsubj=np.array([40, 40, 32, 30, 40, 32, 40, 30])
1019
+ num_samples_per_epoch = (750*40) // num_devices
1020
+ else:
1021
+ # num_samples_per_epoch = (750*num_sessions) // num_devices
1022
+ num_samples_per_epoch = len(train_image_indices)
1023
+
1024
+ print("dividing batch size by subj_list, which will then be concatenated across subj during training...")
1025
+ batch_size = batch_size // len(subj_list)
1026
+
1027
+ num_iterations_per_epoch = num_samples_per_epoch // (batch_size*len(subj_list))
1028
+
1029
+ print("batch_size =", batch_size, "num_iterations_per_epoch =",num_iterations_per_epoch, "num_samples_per_epoch =",num_samples_per_epoch)
1030
+
1031
+
1032
+ # In[ ]:
1033
+
1034
+
1035
+ train_data = {}
1036
+ train_dl = {}
1037
+
1038
+ train_data[f'subj0{subj}'] = torch.utils.data.TensorDataset(torch.tensor(train_image_indices))
1039
+ test_data = torch.utils.data.TensorDataset(torch.tensor(test_image_indices))
1040
+
1041
+
1042
+ # In[ ]:
1043
+
1044
+
1045
+ num_voxels = {}
1046
+ voxels = {}
1047
+ for s in subj_list:
1048
+ print(f"Training with {num_sessions} sessions")
1049
+ train_dl = torch.utils.data.DataLoader(train_data[f'subj0{s}'], batch_size=batch_size, shuffle=True, drop_last=True, pin_memory=True)
1050
+
1051
+ num_voxels_list.append(vox[0].shape[-1])
1052
+ num_voxels[f'subj0{s}'] = vox[0].shape[-1]
1053
+ voxels[f'subj0{s}'] = vox
1054
+ print(f"num_voxels for subj0{s}: {num_voxels[f'subj0{s}']}")
1055
+
1056
+ print("Loaded all subj train dls and vox!\n")
1057
+
1058
+ # Validate only on one subject
1059
+ if multi_subject:
1060
+ subj = subj_list[0] # cant validate on the actual held out person so picking first in subj_list
1061
+ test_dl = torch.utils.data.DataLoader(test_data, batch_size=24, shuffle=False, drop_last=True, pin_memory=True)
1062
+
1063
+ print(f"Loaded test dl for subj{subj}!\n")
1064
+
1065
+
1066
+ # ## Load models
1067
+
1068
+ # ### CLIP image embeddings model
1069
+
1070
+ # In[ ]:
1071
+
1072
+
1073
+ ## USING OpenCLIP ViT-bigG ###
1074
+ sys.path.append('generative_models/')
1075
+ import sgm
1076
+ from generative_models.sgm.modules.encoders.modules import FrozenOpenCLIPImageEmbedder
1077
+ # from generative_models.sgm.models.diffusion import DiffusionEngine
1078
+ # from omegaconf import OmegaConf
1079
+
1080
+ try:
1081
+ print(clip_img_embedder)
1082
+ except:
1083
+ clip_img_embedder = FrozenOpenCLIPImageEmbedder(
1084
+ arch="ViT-bigG-14",
1085
+ version="laion2b_s39b_b160k",
1086
+ output_tokens=True,
1087
+ only_tokens=True,
1088
+ )
1089
+ clip_img_embedder.to(device)
1090
+ clip_seq_dim = 256
1091
+ clip_emb_dim = 1664
1092
+
1093
+ # ## USING OPEN AI CLIP ViT-L ###
1094
+ # import clip
1095
+ # try:
1096
+ # print(clip_model)
1097
+ # except:
1098
+ # clip_model, preprocess = clip.load("ViT-L/14", device=device)
1099
+ # preprocess = transforms.Compose([
1100
+ # transforms.Resize(224, interpolation=transforms.InterpolationMode.BILINEAR),
1101
+ # transforms.Normalize(mean=[0.48145466, 0.4578275, 0.40821073],
1102
+ # std=[0.26862954, 0.26130258, 0.27577711]),
1103
+ # ])
1104
+ # def clip_img_embedder(image):
1105
+ # preproc_img = preprocess(image)
1106
+ # return clip_model.encode_image(preproc_img)
1107
+ # clip_seq_dim = 1
1108
+ # clip_emb_dim = 768
1109
+
1110
+
1111
+ # ### MindEye modules
1112
+
1113
+ # In[ ]:
1114
+
1115
+
1116
+ model = utils.prepare_model_and_training(
1117
+ num_voxels_list=num_voxels_list,
1118
+ n_blocks=n_blocks,
1119
+ hidden_dim=hidden_dim,
1120
+ clip_emb_dim=clip_emb_dim,
1121
+ clip_seq_dim=clip_seq_dim,
1122
+ use_prior=use_prior,
1123
+ clip_scale=clip_scale
1124
+ )
1125
+
1126
+
1127
+ # In[ ]:
1128
+
1129
+
1130
+ # test on subject 1 with fake data
1131
+ b = torch.randn((2,1,num_voxels_list[0]))
1132
+ print(b.shape, model.ridge(b,0).shape)
1133
+
1134
+
1135
+ # In[ ]:
1136
+
1137
+
1138
+ # test that the model works on some fake data
1139
+ b = torch.randn((2,1,hidden_dim))
1140
+ print("b.shape",b.shape)
1141
+
1142
+ backbone_, clip_, blur_ = model.backbone(b)
1143
+ print(backbone_.shape, clip_.shape, blur_[0].shape, blur_[1].shape)
1144
+
1145
+
1146
+ # ### Adding diffusion prior + unCLIP if use_prior=True
1147
+
1148
+ # In[ ]:
1149
+
1150
+
1151
+ if use_prior:
1152
+ from models import *
1153
+
1154
+ # setup diffusion prior network
1155
+ out_dim = clip_emb_dim
1156
+ depth = 6
1157
+ dim_head = 52
1158
+ heads = clip_emb_dim//52 # heads * dim_head = clip_emb_dim
1159
+ timesteps = 100
1160
+
1161
+ prior_network = VersatileDiffusionPriorNetwork(
1162
+ dim=out_dim,
1163
+ depth=depth,
1164
+ dim_head=dim_head,
1165
+ heads=heads,
1166
+ causal=False,
1167
+ num_tokens = clip_seq_dim,
1168
+ learned_query_mode="pos_emb"
1169
+ )
1170
+
1171
+ model.diffusion_prior = BrainDiffusionPrior(
1172
+ net=prior_network,
1173
+ image_embed_dim=out_dim,
1174
+ condition_on_text_encodings=False,
1175
+ timesteps=timesteps,
1176
+ cond_drop_prob=0.2,
1177
+ image_embed_scale=None,
1178
+ )
1179
+
1180
+ utils.count_params(model.diffusion_prior)
1181
+ utils.count_params(model)
1182
+
1183
+
1184
+ # ### Setup optimizer / lr / ckpt saving
1185
+
1186
+ # In[ ]:
1187
+
1188
+
1189
+ no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
1190
+
1191
+ opt_grouped_parameters = [
1192
+ {'params': [p for n, p in model.ridge.named_parameters()], 'weight_decay': 1e-2},
1193
+ {'params': [p for n, p in model.backbone.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': 1e-2},
1194
+ {'params': [p for n, p in model.backbone.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0},
1195
+ ]
1196
+ # model.backbone.requires_grad_(False)
1197
+
1198
+ if use_prior:
1199
+ opt_grouped_parameters.extend([
1200
+ {'params': [p for n, p in model.diffusion_prior.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': 1e-2},
1201
+ {'params': [p for n, p in model.diffusion_prior.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
1202
+ ])
1203
+
1204
+ optimizer = torch.optim.AdamW(opt_grouped_parameters, lr=max_lr)
1205
+
1206
+ if lr_scheduler_type == 'linear':
1207
+ lr_scheduler = torch.optim.lr_scheduler.LinearLR(
1208
+ optimizer,
1209
+ total_iters=int(np.floor(num_epochs*num_iterations_per_epoch)),
1210
+ last_epoch=-1
1211
+ )
1212
+ elif lr_scheduler_type == 'cycle':
1213
+ if num_iterations_per_epoch==0:
1214
+ num_iterations_per_epoch=1
1215
+ total_steps=int(np.floor(num_epochs*num_iterations_per_epoch))
1216
+ print("total_steps", total_steps)
1217
+ lr_scheduler = torch.optim.lr_scheduler.OneCycleLR(
1218
+ optimizer,
1219
+ max_lr=max_lr,
1220
+ total_steps=total_steps,
1221
+ final_div_factor=1000,
1222
+ last_epoch=-1, pct_start=2/num_epochs
1223
+ )
1224
+
1225
+ def save_ckpt(tag):
1226
+ ckpt_path = outdir+f'/{tag}.pth'
1227
+ if accelerator.is_main_process:
1228
+ unwrapped_model = accelerator.unwrap_model(model)
1229
+ torch.save({
1230
+ 'epoch': epoch,
1231
+ 'model_state_dict': unwrapped_model.state_dict(),
1232
+ 'optimizer_state_dict': optimizer.state_dict(),
1233
+ 'lr_scheduler': lr_scheduler.state_dict(),
1234
+ 'train_losses': losses,
1235
+ 'test_losses': test_losses,
1236
+ 'lrs': lrs,
1237
+ }, ckpt_path)
1238
+ print(f"\n---saved {outdir}/{tag} ckpt!---\n")
1239
+
1240
+ def load_ckpt(tag,load_lr=True,load_optimizer=True,load_epoch=True,strict=True,outdir=outdir,multisubj_loading=False):
1241
+ print(f"\n---loading {outdir}/{tag}.pth ckpt---\n")
1242
+ checkpoint = torch.load(outdir+'/last.pth', map_location='cpu')
1243
+ state_dict = checkpoint['model_state_dict']
1244
+ if multisubj_loading: # remove incompatible ridge layer that will otherwise error
1245
+ state_dict.pop('ridge.linears.0.weight',None)
1246
+ model.load_state_dict(state_dict, strict=strict)
1247
+ if load_epoch:
1248
+ globals()["epoch"] = checkpoint['epoch']
1249
+ print("Epoch",epoch)
1250
+ if load_optimizer:
1251
+ optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
1252
+ if load_lr:
1253
+ lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
1254
+ del checkpoint
1255
+
1256
+ print("\nDone with model preparations!")
1257
+ num_params = utils.count_params(model)
1258
+
1259
+
1260
+ # # Wandb
1261
+
1262
+ # In[ ]:
1263
+
1264
+
1265
+ if local_rank==0 and wandb_log: # only use main process for wandb logging
1266
+ import wandb
1267
+ import time
1268
+
1269
+ wandb_project = 'rtmindeye'
1270
+ print(f"wandb {wandb_project} run {model_name}")
1271
+
1272
+ # Need to configure wandb beforehand in terminal with "wandb init"!
1273
+ wandb_config = {
1274
+ "model_name": model_name,
1275
+ "global_batch_size": global_batch_size,
1276
+ "batch_size": batch_size,
1277
+ "num_epochs": num_epochs,
1278
+ "num_sessions": num_sessions,
1279
+ "num_params": num_params,
1280
+ "clip_scale": clip_scale,
1281
+ "prior_scale": prior_scale,
1282
+ "blur_scale": blur_scale,
1283
+ "use_image_aug": use_image_aug,
1284
+ "max_lr": max_lr,
1285
+ "mixup_pct": mixup_pct,
1286
+ "num_samples_per_epoch": num_samples_per_epoch,
1287
+ "ckpt_interval": ckpt_interval,
1288
+ "ckpt_saving": ckpt_saving,
1289
+ "seed": seed, # SLURM array task ID
1290
+ "distributed": distributed,
1291
+ "num_devices": num_devices,
1292
+ "world_size": world_size,
1293
+ }
1294
+ print("wandb_config:\n", wandb_config)
1295
+ print("wandb_id:", model_name)
1296
+
1297
+ # Initialize wandb
1298
+ wandb.init(
1299
+ id=model_name,
1300
+ project=wandb_project,
1301
+ name=model_name,
1302
+ config=wandb_config,
1303
+ resume="allow",
1304
+ save_code=True,
1305
+ )
1306
+
1307
+ # Get SLURM job & array ID
1308
+ slurm_job_id = utils.get_slurm_job()
1309
+ slurm_array_id = seed # seed corresponds to SLURM_ARRAY_TASK_ID
1310
+
1311
+ # Define SLURM log paths
1312
+ log_dir = "slurms"
1313
+ log_files = [
1314
+ f"{log_dir}/{slurm_job_id}_{slurm_array_id}.out",
1315
+ f"{log_dir}/{slurm_job_id}_{slurm_array_id}.err",
1316
+ ]
1317
+
1318
+ # Ensure logs exist before logging them
1319
+ for log_file in log_files:
1320
+ wait_time = 0
1321
+ while not os.path.exists(log_file) and wait_time < 60: # Wait max 60s
1322
+ time.sleep(5)
1323
+ wait_time += 5
1324
+
1325
+ # Log SLURM logs as artifacts
1326
+ artifact = wandb.Artifact(f"slurm_logs_{slurm_job_id}_{slurm_array_id}", type="logs")
1327
+ for log_file in log_files:
1328
+ if os.path.exists(log_file):
1329
+ artifact.add_file(log_file)
1330
+
1331
+ wandb.log_artifact(artifact)
1332
+ else:
1333
+ wandb_log = False
1334
+
1335
+
1336
+ # # Train the model
1337
+
1338
+ # In[ ]:
1339
+
1340
+
1341
+ epoch = 0
1342
+ losses, test_losses, lrs = [], [], []
1343
+ best_test_loss = 1e9
1344
+ torch.cuda.empty_cache()
1345
+
1346
+
1347
+ # In[ ]:
1348
+
1349
+
1350
+ # load multisubject stage1 ckpt if set
1351
+ if multisubject_ckpt is not None and not resume_from_ckpt:
1352
+ load_ckpt("last",outdir=multisubject_ckpt,load_lr=False,load_optimizer=False,load_epoch=False,strict=False,multisubj_loading=True)
1353
+
1354
+
1355
+ # In[ ]:
1356
+
1357
+
1358
+ # checkpoint = torch.load(multisubject_ckpt+'/last.pth', map_location='cpu')
1359
+ # state_dict = checkpoint['model_state_dict']
1360
+ # model.load_state_dict(state_dict, strict=False)
1361
+
1362
+
1363
+ # In[ ]:
1364
+
1365
+
1366
+ # train_dls = [train_dl[f'subj0{s}'] for s in subj_list]
1367
+
1368
+ model, optimizer, train_dl, lr_scheduler = accelerator.prepare(model, optimizer, train_dl, lr_scheduler)
1369
+ # leaving out test_dl since we will only have local_rank 0 device do evals
1370
+
1371
+
1372
+ # In[ ]:
1373
+
1374
+
1375
+ print(f"{model_name} starting with epoch {epoch} / {num_epochs}")
1376
+ progress_bar = tqdm(range(epoch,num_epochs), ncols=1200, disable=(local_rank!=0))
1377
+ test_image, test_voxel = None, None
1378
+ mse = nn.MSELoss()
1379
+ l1 = nn.L1Loss()
1380
+ soft_loss_temps = utils.cosine_anneal(0.004, 0.0075, num_epochs - int(mixup_pct * num_epochs))
1381
+ skip_train = True if epoch>=(num_epochs-1) else False # skip training if you are resuming from a fully trained model
1382
+
1383
+ for epoch in progress_bar:
1384
+ model.train()
1385
+
1386
+ fwd_percent_correct = 0.
1387
+ bwd_percent_correct = 0.
1388
+ test_fwd_percent_correct = 0.
1389
+ test_bwd_percent_correct = 0.
1390
+
1391
+ recon_cossim = 0.
1392
+ test_recon_cossim = 0.
1393
+ recon_mse = 0.
1394
+ test_recon_mse = 0.
1395
+
1396
+ loss_clip_total = 0.
1397
+ loss_blurry_total = 0.
1398
+ loss_blurry_cont_total = 0.
1399
+ test_loss_clip_total = 0.
1400
+
1401
+ loss_prior_total = 0.
1402
+ test_loss_prior_total = 0.
1403
+
1404
+ blurry_pixcorr = 0.
1405
+ test_blurry_pixcorr = 0.
1406
+
1407
+ # you now have voxel_iters and image_iters with num_iterations_per_epoch batches each
1408
+ for train_i, behav in enumerate(train_dl):
1409
+ with torch.cuda.amp.autocast(dtype=data_type):
1410
+ optimizer.zero_grad()
1411
+ loss = 0.
1412
+
1413
+ behav = behav[0]
1414
+
1415
+ image = images[behav.long().cpu()].to(device)
1416
+ voxel = vox[behav.long().cpu()]
1417
+ # voxel = (voxel - train_mean) / train_std
1418
+ voxel = torch.Tensor(voxel).unsqueeze(1).to(device)
1419
+
1420
+ if use_image_aug:
1421
+ image = img_augment(image)
1422
+
1423
+ clip_target = clip_img_embedder(image)
1424
+ assert not torch.any(torch.isnan(clip_target))
1425
+
1426
+ if epoch < int(mixup_pct * num_epochs):
1427
+ voxel, perm, betas, select = utils.mixco(voxel)
1428
+
1429
+ voxel_ridge = model.ridge(voxel,0) #[model.ridge(voxel_list[si],si) for si,s in enumerate(subj_list)]
1430
+ # voxel_ridge = torch.cat(voxel_ridge_list, dim=0)
1431
+
1432
+ backbone, clip_voxels, blurry_image_enc_ = model.backbone(voxel_ridge)
1433
+
1434
+ if clip_scale>0:
1435
+ clip_voxels_norm = nn.functional.normalize(clip_voxels.flatten(1), dim=-1)
1436
+ clip_target_norm = nn.functional.normalize(clip_target.flatten(1), dim=-1)
1437
+
1438
+ if use_prior:
1439
+ loss_prior, prior_out = model.diffusion_prior(text_embed=backbone, image_embed=clip_target)
1440
+ loss_prior_total += loss_prior.item()
1441
+ loss_prior *= prior_scale
1442
+ loss += loss_prior
1443
+
1444
+ recon_cossim += nn.functional.cosine_similarity(prior_out, clip_target).mean().item()
1445
+ recon_mse += mse(prior_out, clip_target).item()
1446
+
1447
+ if clip_scale>0:
1448
+ if epoch < int(mixup_pct * num_epochs):
1449
+ loss_clip = utils.mixco_nce(
1450
+ clip_voxels_norm,
1451
+ clip_target_norm,
1452
+ temp=.006,
1453
+ perm=perm, betas=betas, select=select)
1454
+ else:
1455
+ epoch_temp = soft_loss_temps[epoch-int(mixup_pct*num_epochs)]
1456
+ loss_clip = utils.soft_clip_loss(
1457
+ clip_voxels_norm,
1458
+ clip_target_norm,
1459
+ temp=epoch_temp)
1460
+
1461
+ loss_clip_total += loss_clip.item()
1462
+ loss_clip *= clip_scale
1463
+ loss += loss_clip
1464
+
1465
+ if blurry_recon:
1466
+ image_enc_pred, transformer_feats = blurry_image_enc_
1467
+
1468
+ image_enc = autoenc.encode(2*image-1).latent_dist.mode() * 0.18215
1469
+ loss_blurry = l1(image_enc_pred, image_enc)
1470
+ loss_blurry_total += loss_blurry.item()
1471
+
1472
+ if epoch < int(mixup_pct * num_epochs):
1473
+ image_enc_shuf = image_enc[perm]
1474
+ betas_shape = [-1] + [1]*(len(image_enc.shape)-1)
1475
+ image_enc[select] = image_enc[select] * betas[select].reshape(*betas_shape) + \
1476
+ image_enc_shuf[select] * (1 - betas[select]).reshape(*betas_shape)
1477
+
1478
+ image_norm = (image - mean)/std
1479
+ image_aug = (blur_augs(image) - mean)/std
1480
+ _, cnx_embeds = cnx(image_norm)
1481
+ _, cnx_aug_embeds = cnx(image_aug)
1482
+
1483
+ cont_loss = utils.soft_cont_loss(
1484
+ nn.functional.normalize(transformer_feats.reshape(-1, transformer_feats.shape[-1]), dim=-1),
1485
+ nn.functional.normalize(cnx_embeds.reshape(-1, cnx_embeds.shape[-1]), dim=-1),
1486
+ nn.functional.normalize(cnx_aug_embeds.reshape(-1, cnx_embeds.shape[-1]), dim=-1),
1487
+ temp=0.2)
1488
+ loss_blurry_cont_total += cont_loss.item()
1489
+
1490
+ loss += (loss_blurry + 0.1*cont_loss) * blur_scale #/.18215
1491
+
1492
+ if clip_scale>0:
1493
+ # forward and backward top 1 accuracy
1494
+ labels = torch.arange(len(clip_voxels_norm)).to(clip_voxels_norm.device)
1495
+ fwd_percent_correct += utils.topk(utils.batchwise_cosine_similarity(clip_voxels_norm, clip_target_norm), labels, k=1).item()
1496
+ bwd_percent_correct += utils.topk(utils.batchwise_cosine_similarity(clip_target_norm, clip_voxels_norm), labels, k=1).item()
1497
+
1498
+ if blurry_recon:
1499
+ with torch.no_grad():
1500
+ # only doing pixcorr eval on a subset of the samples per batch because its costly & slow to compute autoenc.decode()
1501
+ random_samps = np.random.choice(np.arange(len(image)), size=len(image)//5, replace=False)
1502
+ blurry_recon_images = (autoenc.decode(image_enc_pred[random_samps]/0.18215).sample/ 2 + 0.5).clamp(0,1)
1503
+ pixcorr = utils.pixcorr(image[random_samps], blurry_recon_images)
1504
+ blurry_pixcorr += pixcorr.item()
1505
+
1506
+ utils.check_loss(loss)
1507
+ accelerator.backward(loss)
1508
+ optimizer.step()
1509
+
1510
+ losses.append(loss.item())
1511
+ lrs.append(optimizer.param_groups[0]['lr'])
1512
+
1513
+ if lr_scheduler_type is not None:
1514
+ lr_scheduler.step()
1515
+
1516
+ if train_i >= num_iterations_per_epoch-1:
1517
+ break
1518
+
1519
+ model.eval()
1520
+ logs = {}
1521
+
1522
+ if local_rank == 0:
1523
+ with torch.no_grad(), torch.cuda.amp.autocast(dtype=data_type):
1524
+ for i in range(2):
1525
+ for j in range(2):
1526
+ subset_indices = MST_idx[:, i, j].reshape(-1)
1527
+ subset_dataset = torch.utils.data.TensorDataset(torch.tensor(subset_indices))
1528
+ subset_dl = torch.utils.data.DataLoader(
1529
+ subset_dataset, batch_size=len(MST_idx), shuffle=False,
1530
+ drop_last=True, pin_memory=True
1531
+ )
1532
+
1533
+ # Reset metrics for this subset
1534
+ test_losses = []
1535
+ test_loss_clip_total = 0
1536
+ test_loss_prior_total = 0
1537
+ test_blurry_pixcorr = 0
1538
+ test_fwd_percent_correct = 0
1539
+ test_bwd_percent_correct = 0
1540
+ test_recon_cossim = 0
1541
+ test_recon_mse = 0
1542
+
1543
+ for test_i, behav in enumerate(subset_dl):
1544
+ behav = behav[0]
1545
+ loss = 0.
1546
+
1547
+ if behav.ndim > 1:
1548
+ image = images[behav[:, 0].long().cpu()].to(device)
1549
+ voxel = vox[behav.long().cpu()].mean(1)
1550
+ else:
1551
+ image = images[behav.long().cpu()].to(device)
1552
+ voxel = vox[behav.long().cpu()]
1553
+
1554
+ voxel = torch.Tensor(voxel).unsqueeze(1).to(device)
1555
+
1556
+ clip_img_embedder = clip_img_embedder.to(device)
1557
+ clip_target = clip_img_embedder(image.float())
1558
+
1559
+ voxel_ridge = model.ridge(voxel, 0)
1560
+ backbone, clip_voxels, blurry_image_enc_ = model.backbone(voxel_ridge)
1561
+
1562
+ if clip_scale > 0:
1563
+ clip_voxels_norm = nn.functional.normalize(clip_voxels.flatten(1), dim=-1)
1564
+ clip_target_norm = nn.functional.normalize(clip_target.flatten(1), dim=-1)
1565
+
1566
+ random_samps = np.random.choice(np.arange(len(image)), size=len(image) // 5, replace=False)
1567
+
1568
+ if use_prior:
1569
+ loss_prior, contaminated_prior_out = model.diffusion_prior(
1570
+ text_embed=backbone[random_samps], image_embed=clip_target[random_samps])
1571
+ test_loss_prior_total += loss_prior.item()
1572
+ loss_prior *= prior_scale
1573
+ loss += loss_prior
1574
+
1575
+ if clip_scale > 0:
1576
+ loss_clip = utils.soft_clip_loss(
1577
+ clip_voxels_norm,
1578
+ clip_target_norm,
1579
+ temp=0.006
1580
+ )
1581
+ test_loss_clip_total += loss_clip.item()
1582
+ loss_clip *= clip_scale
1583
+ loss += loss_clip
1584
+
1585
+ if blurry_recon:
1586
+ image_enc_pred, _ = blurry_image_enc_
1587
+ blurry_recon_images = (autoenc.decode(image_enc_pred[random_samps] / 0.18215).sample / 2 + 0.5).clamp(0, 1)
1588
+ pixcorr = utils.pixcorr(image[random_samps], blurry_recon_images)
1589
+ test_blurry_pixcorr += pixcorr.item()
1590
+
1591
+ if clip_scale > 0:
1592
+ labels = torch.arange(len(clip_voxels_norm)).to(clip_voxels_norm.device)
1593
+ test_fwd_percent_correct += utils.topk(utils.batchwise_cosine_similarity(clip_voxels_norm, clip_target_norm), labels, k=1).item()
1594
+ test_bwd_percent_correct += utils.topk(utils.batchwise_cosine_similarity(clip_target_norm, clip_voxels_norm), labels, k=1).item()
1595
+
1596
+ utils.check_loss(loss)
1597
+ test_losses.append(loss.item())
1598
+
1599
+ logs.update({
1600
+ f"subset_{i}_{j}_test/loss": np.mean(test_losses),
1601
+ f"subset_{i}_{j}_test/loss_clip_total": test_loss_clip_total / (test_i + 1),
1602
+ f"subset_{i}_{j}_test/loss_prior": test_loss_prior_total / (test_i + 1),
1603
+ f"subset_{i}_{j}_test/blurry_pixcorr": test_blurry_pixcorr / (test_i + 1),
1604
+ f"subset_{i}_{j}_test/fwd_pct_correct": test_fwd_percent_correct / (test_i + 1),
1605
+ f"subset_{i}_{j}_test/bwd_pct_correct": test_bwd_percent_correct / (test_i + 1),
1606
+ })
1607
+ print(f"--- Subset ({i},{j}) ---")
1608
+ for k, v in logs.items():
1609
+ if f"subset_{i}_{j}" in k:
1610
+ print(f"{k}: {v:.4f}")
1611
+
1612
+ # After subset loop: add train (and global test, if you want) metrics
1613
+ logs.update({
1614
+ "train/loss": np.mean(losses[-(train_i+1):]),
1615
+ "train/lr": lrs[-1],
1616
+ "train/num_steps": len(losses),
1617
+ "train/fwd_pct_correct": fwd_percent_correct / (train_i + 1),
1618
+ "train/bwd_pct_correct": bwd_percent_correct / (train_i + 1),
1619
+ "train/loss_clip_total": loss_clip_total / (train_i + 1),
1620
+ "train/loss_blurry_total": loss_blurry_total / (train_i + 1),
1621
+ "train/loss_blurry_cont_total": loss_blurry_cont_total / (train_i + 1),
1622
+ "train/blurry_pixcorr": blurry_pixcorr / (train_i + 1),
1623
+ "train/recon_cossim": recon_cossim / (train_i + 1),
1624
+ "train/recon_mse": recon_mse / (train_i + 1),
1625
+ "train/loss_prior": loss_prior_total / (train_i + 1),
1626
+ })
1627
+
1628
+
1629
+ # if finished training, save jpg recons if they exist
1630
+ if (epoch == num_epochs-1) or (epoch % ckpt_interval == 0):
1631
+ if blurry_recon:
1632
+ image_enc = autoenc.encode(2*image[:4]-1).latent_dist.mode() * 0.18215
1633
+ # transform blurry recon latents to images and plot it
1634
+ fig, axes = plt.subplots(1, 8, figsize=(10, 4))
1635
+ jj=-1
1636
+ for j in [0,1,2,3]:
1637
+ jj+=1
1638
+ axes[jj].imshow(utils.torch_to_Image((autoenc.decode(image_enc[[j]]/0.18215).sample / 2 + 0.5).clamp(0,1)))
1639
+ axes[jj].axis('off')
1640
+ jj+=1
1641
+ axes[jj].imshow(utils.torch_to_Image((autoenc.decode(image_enc_pred[[j]]/0.18215).sample / 2 + 0.5).clamp(0,1)))
1642
+ axes[jj].axis('off')
1643
+ plt.show()
1644
+
1645
+ progress_bar.set_postfix(**logs)
1646
+
1647
+ if wandb_log: wandb.log(logs)
1648
+
1649
+ # Save model checkpoint and reconstruct
1650
+ if (ckpt_saving) and (epoch % ckpt_interval == 0):
1651
+ save_ckpt(f'last')
1652
+
1653
+ # wait for other GPUs to catch up if needed
1654
+ accelerator.wait_for_everyone()
1655
+ torch.cuda.empty_cache()
1656
+
1657
+ print("\n===Finished!===\n")
1658
+ if ckpt_saving:
1659
+ save_ckpt(f'last')
1660
+
1661
+
1662
+ # In[ ]:
1663
+
1664
+
1665
+ len(test_data)
1666
+
1667
+
1668
+ # In[ ]:
1669
+
1670
+
1671
+ # # Track metrics here:
1672
+ # https://docs.google.com/spreadsheets/d/1-dbmr4ovl2-4-MFNAL1DqLS651KM_ihjDkkUeP1kHXs/edit?gid=1494588999#gid=1494588999
1673
+
1674
+
1675
+ # **To tell if the model is working I'm looking at test_bwd/fwd_pct_correct and seeing if that is doing better than chance (1/batch_size)**
1676
+
1677
+ # In[ ]:
1678
+
1679
+
1680
+ # MST_pairmate_names
1681
+
1682
+
1683
+ # In[ ]:
1684
+
1685
+
1686
+ x = [im for im in image_names if str(im) not in ('blank.jpg', 'nan')]
1687
+ assert len(image_idx) == len(x)
1688
+ pairs = []
1689
+ for i, p in enumerate(MST_pairmate_names):
1690
+ assert p[0] != p[1] # no duplicate images
1691
+ pairs.append([utils.find_all_indices(x,p[0]), utils.find_all_indices(x,p[1])])
1692
+
1693
+ pairs = np.array(pairs)
1694
+
1695
+
1696
+ # In[ ]:
1697
+
1698
+
1699
+ pairs.shape
1700
+
1701
+
1702
+ # In[ ]:
1703
+
1704
+
1705
+ model.eval()
1706
+ logs = {}
1707
+ if local_rank == 0:
1708
+ with torch.no_grad(), torch.cuda.amp.autocast(dtype=data_type):
1709
+ for i in range(2):
1710
+ for j in range(2):
1711
+ subset_indices = MST_idx[:, i, j].reshape(-1)
1712
+ subset_dataset = torch.utils.data.TensorDataset(torch.tensor(subset_indices))
1713
+ subset_dl = torch.utils.data.DataLoader(
1714
+ subset_dataset, batch_size=len(MST_idx), shuffle=False,
1715
+ drop_last=True, pin_memory=True
1716
+ )
1717
+
1718
+ # Reset metrics for this subset
1719
+ test_fwd_percent_correct = 0
1720
+ test_bwd_percent_correct = 0
1721
+
1722
+ for test_i, behav in enumerate(subset_dl):
1723
+ behav = behav[0]
1724
+ loss = 0.
1725
+ image = images[behav.long().cpu()].to(device)
1726
+ voxel = vox[behav.long().cpu()]
1727
+ voxel = torch.Tensor(voxel).unsqueeze(1).to(device)
1728
+ clip_img_embedder = clip_img_embedder.to(device)
1729
+ clip_target = clip_img_embedder(image.float())
1730
+
1731
+ voxel_ridge = model.ridge(voxel, 0)
1732
+ backbone, clip_voxels, blurry_image_enc_ = model.backbone(voxel_ridge)
1733
+
1734
+ clip_voxels_norm = torch.nn.functional.normalize(clip_voxels, dim=-1)
1735
+ clip_target_norm = torch.nn.functional.normalize(clip_target, dim=-1)
1736
+
1737
+ if clip_scale > 0:
1738
+ labels = torch.arange(len(clip_voxels_norm)).to(clip_voxels_norm.device)
1739
+ test_fwd_percent_correct += utils.topk(utils.batchwise_cosine_similarity(clip_voxels_norm, clip_target_norm), labels, k=1).item()
1740
+ test_bwd_percent_correct += utils.topk(utils.batchwise_cosine_similarity(clip_target_norm, clip_voxels_norm), labels, k=1).item()
1741
+ print(test_fwd_percent_correct)
1742
+ print(test_bwd_percent_correct)
1743
+ logs.update({
1744
+ f"subset_{i}_{j}_test/fwd_pct_correct": test_fwd_percent_correct / (test_i + 1),
1745
+ f"subset_{i}_{j}_test/bwd_pct_correct": test_bwd_percent_correct / (test_i + 1),
1746
+ })
1747
+
1748
+ print("--- Full Dataset Evaluation ---")
1749
+ for k, v in logs.items():
1750
+ print(f"{k}: {v:.4f}")
1751
+
1752
+
1753
+ # In[ ]:
1754
+
1755
+
1756
+ # if sub=="sub-002":
1757
+ # unique_images_pairs = [
1758
+ # (2,3),(4,5),(7,8),(15,16),
1759
+ # (483, 484), (485, 486), (487, 488), (491, 492), (495, 496), (499, 500), (501, 502),
1760
+ # (503, 504), (512, 513),
1761
+ # ]
1762
+ # elif sub != 'sub-001' and session != 'ses-05':
1763
+ # unique_images_pairs = [
1764
+ # (1,2),(3,4),(5,6),(7,8),(9,10),(11,12),(13,14),(15,16),
1765
+ # (17,18),(19,20),(21,22),(23,24),(25,26),(27,28),(29,30),
1766
+ # (31,32),(33,34),(35,36),
1767
+ # (787, 788), (789, 790), (791, 792), (793, 794), (795, 796),
1768
+ # (797, 798), (799, 800), (801, 802), (803, 804), (805, 806),
1769
+ # (807, 808), (809, 810), (811, 812), (813, 814), (815, 816),
1770
+ # (817, 818), (819, 820), (821, 822), (823, 824), (825, 826),
1771
+ # (827, 828), (829, 830), (831, 832), (833, 834), (835, 836),
1772
+ # (837, 838), (839, 840), (841, 842), (843, 844), (845, 846),
1773
+ # (847, 848), (849, 850)
1774
+ # ]
1775
+ # else:
1776
+ # # unique_images = unique_images[unique_images!='blank.jpg'][:50]
1777
+ # unique_images_pairs = find_mst_pairs(x)
1778
+ # # unique_images[unique_images_pairs]
1779
+
1780
+
1781
+ # In[ ]:
1782
+
1783
+
1784
+ import pdb
1785
+
1786
+
1787
+ # In[ ]:
1788
+
1789
+
1790
+ def evaluate_mst_pairs(mst_pairs):
1791
+ with torch.no_grad(), torch.cuda.amp.autocast(dtype=data_type):
1792
+ failed_A = []
1793
+ failed_B = []
1794
+ failed_non_corr = []
1795
+
1796
+ # Get all unique image indices
1797
+ all_indices = np.unique(mst_pairs.flatten())
1798
+
1799
+ # Pre-load all images and betas to device
1800
+ all_images = images[image_idx[all_indices]].to(device)
1801
+ all_voxels = torch.Tensor(vox[image_idx[all_indices]]).unsqueeze(1).to(device)
1802
+
1803
+ # Get CLIP embeddings for all images
1804
+ all_clip_targets = clip_img_embedder(all_images.float())
1805
+ all_clip_targets_norm = nn.functional.normalize(all_clip_targets.flatten(1), dim=-1)
1806
+
1807
+ # Pass all betas through model to get MindEye embeddings
1808
+ all_voxel_ridge = model.ridge(all_voxels, 0)
1809
+ _, all_clip_voxels, _ = model.backbone(all_voxel_ridge)
1810
+ all_clip_voxels_norm = nn.functional.normalize(all_clip_voxels.flatten(1), dim=-1)
1811
+
1812
+ # Dict mapping idx (which indexes the "vox" and "images" tensors) to pos (their position in the flattened array "all_indices")
1813
+ idx_to_pos = {idx: pos for pos, idx in enumerate(all_indices)}
1814
+
1815
+ # Initialize scores
1816
+ corr_score = 0
1817
+ non_corr_score = 0
1818
+ corr_total = len(mst_pairs) * 2
1819
+ non_corr_total = len(mst_pairs) * (len(mst_pairs)-1) * 4 # number of elements in the matrix excluding the diagonal is n*(n-1)*4 since we're doing this twice each for pairmate A and B
1820
+
1821
+
1822
+ # Pre-load voxelwise beta-based embeddings from MindEye and CLIP image embeddings
1823
+ idxA = np.array([pair[0] for pair in mst_pairs])
1824
+ idxB = np.array([pair[1] for pair in mst_pairs])
1825
+
1826
+ posA = np.array([idx_to_pos[idx] for idx in idxA])
1827
+ posB = np.array([idx_to_pos[idx] for idx in idxB])
1828
+
1829
+ voxA_embeddings = all_clip_voxels_norm[posA]
1830
+ voxB_embeddings = all_clip_voxels_norm[posB]
1831
+ imgA_embeddings = all_clip_targets_norm[posA]
1832
+ imgB_embeddings = all_clip_targets_norm[posB]
1833
+
1834
+ simA_A = utils.batchwise_cosine_similarity(voxA_embeddings, imgA_embeddings)
1835
+ simA_B = utils.batchwise_cosine_similarity(voxA_embeddings, imgB_embeddings)
1836
+ simB_B = utils.batchwise_cosine_similarity(voxB_embeddings, imgB_embeddings)
1837
+ simB_A = utils.batchwise_cosine_similarity(voxB_embeddings, imgA_embeddings)
1838
+
1839
+
1840
+ # corresponding 2-AFC
1841
+ # is the voxel embedding for image 1 pairmate A more similar to the CLIP embedding for image 1 pairmate A or the CLIP embedding for image 1 pairmate B?
1842
+ correct_A = torch.diag(simA_A) > torch.diag(simA_B)
1843
+ # is the voxel embedding for image 1 pairmate B more similar to the CLIP embedding for image 1 pairmate B or the CLIP embedding for image 1 pairmate A?
1844
+ correct_B = torch.diag(simB_B) > torch.diag(simB_A)
1845
+
1846
+ corr_score += correct_A.sum().item()
1847
+ corr_score += correct_B.sum().item()
1848
+
1849
+ # Store indices where AFC fails
1850
+ failed_A = [i for i, correct in enumerate(correct_A.cpu()) if not correct]
1851
+ failed_B = [i for i, correct in enumerate(correct_B.cpu()) if not correct]
1852
+
1853
+ # non-corresponding 2-AFC
1854
+ N = len(mst_pairs)
1855
+ # Create a mask that is True for all off-diagonal elements
1856
+ row_idx = torch.arange(N).unsqueeze(1) # (N, 1)
1857
+ col_idx = torch.arange(N).unsqueeze(0) # (1, N)
1858
+ off_diag_mask = row_idx != col_idx # shape (N, N)
1859
+
1860
+ diagA_A = simA_A.diag().unsqueeze(1).expand(-1, N) # Get diagonal values and expand to (N, N) by duplicating the diagonal element along the rows (since each row is the cosine similarity between a single voxel embedding and all CLIP embeddings)
1861
+ diagB_B = simB_B.diag().unsqueeze(1).expand(-1, N)
1862
+
1863
+ # pdb.set_trace()
1864
+
1865
+ # Compare each element in the row to the diagonal element
1866
+ off_diag_mask_device = off_diag_mask.to(device)
1867
+
1868
+ fail_AA = (simA_A < diagA_A) & off_diag_mask_device
1869
+ fail_AB = (simA_B < diagA_A) & off_diag_mask_device
1870
+ fail_BB = (simB_B < diagB_B) & off_diag_mask_device
1871
+ fail_BA = (simB_A < diagB_B) & off_diag_mask_device
1872
+
1873
+ non_corr_score += fail_AA.sum().item()
1874
+ non_corr_score += fail_AB.sum().item()
1875
+ non_corr_score += fail_BB.sum().item()
1876
+ non_corr_score += fail_BA.sum().item()
1877
+
1878
+ # Log failed indices
1879
+ fail_sources = [fail_AA, fail_AB, fail_BB, fail_BA]
1880
+ for fail_matrix, label in zip(fail_sources, ["AA", "AB", "BB", "BA"]):
1881
+ fail_coords = torch.nonzero(fail_matrix, as_tuple=False).cpu().numpy()
1882
+ for i, j in fail_coords:
1883
+ failed_non_corr.append({"type": label, "i": i, "j": j, "pair_i": mst_pairs[i], "pair_j": mst_pairs[j]})
1884
+
1885
+ return corr_score, corr_total, int(non_corr_score), non_corr_total, failed_A, failed_B, failed_non_corr
1886
+
1887
+
1888
+ # In[ ]:
1889
+
1890
+
1891
+ all_scores = []
1892
+ all_failures = []
1893
+
1894
+ for i in range(4):
1895
+ for j in range(4):
1896
+ mst_pairs = np.stack([pairs[:, 0, i], pairs[:, 1, j]], axis=1) # shape (31, 2)
1897
+ corr_score, corr_total, non_corr_score, non_corr_total, failed_A, failed_B, failed_non_corr = evaluate_mst_pairs(mst_pairs)
1898
+
1899
+ # Store scores and failure info together
1900
+ all_scores.append((corr_score, corr_total, non_corr_score, non_corr_total))
1901
+ all_failures.append({
1902
+ "repeat_A": i,
1903
+ "repeat_B": j,
1904
+ "failed_A": failed_A,
1905
+ "failed_B": failed_B,
1906
+ "failed_non_corr": failed_non_corr,
1907
+ "mst_pairs": mst_pairs,
1908
+ })
1909
+
1910
+ # Print summary
1911
+ print(f"pairmate A repeat {i} vs pairmate B repeat {j}:")
1912
+ print(f"2-AFC corresponding = {corr_score}/{corr_total} ({corr_score/corr_total:.2%})")
1913
+ print(f"2-AFC non-corresponding = {non_corr_score}/{non_corr_total} ({non_corr_score/non_corr_total:.2%})")
1914
+ print("")
1915
+
1916
+
1917
+ # In[ ]:
1918
+
1919
+
1920
+ all_scores = np.array(all_scores)
1921
+ print(f"average 2-AFC corresponding: {all_scores[:,0].mean():.2f}/{all_scores[:,1].mean():.2f} ({(all_scores[:,0].sum()/all_scores[:,1].sum()):.2%})")
1922
+ print(f"average 2-AFC non-corresponding: {all_scores[:,2].mean():.2f}/{all_scores[:,3].mean():.2f} ({(all_scores[:,2].sum()/all_scores[:,3].sum()):.2%})")
1923
+ print(f'chance = 1/{corr_total} ({(1/corr_total):.2%})')
1924
+
1925
+
1926
+ # In[ ]:
1927
+
1928
+
1929
+ from collections import defaultdict
1930
+
1931
+ # Map from image index to failure details
1932
+ failed_images = defaultdict(list)
1933
+
1934
+ for failure_entry in all_failures:
1935
+ mst_pairs = failure_entry["mst_pairs"]
1936
+ i, j = failure_entry["repeat_A"], failure_entry["repeat_B"]
1937
+
1938
+ # A-side failures
1939
+ for fail_idx in failure_entry["failed_A"]:
1940
+ image_idx = mst_pairs[fail_idx][0]
1941
+ pairmate_idx = mst_pairs[fail_idx][1]
1942
+ failed_images[image_idx].append({
1943
+ "repeat_A": i,
1944
+ "repeat_B": j,
1945
+ "pairmate": pairmate_idx,
1946
+ "type": "A",
1947
+ })
1948
+
1949
+ # B-side failures
1950
+ for fail_idx in failure_entry["failed_B"]:
1951
+ image_idx = mst_pairs[fail_idx][1]
1952
+ pairmate_idx = mst_pairs[fail_idx][0]
1953
+ failed_images[image_idx].append({
1954
+ "repeat_A": i,
1955
+ "repeat_B": j,
1956
+ "pairmate": pairmate_idx,
1957
+ "type": "B",
1958
+ })
1959
+
1960
+
1961
+ # In[ ]:
1962
+
1963
+
1964
+ # import matplotlib.pyplot as plt
1965
+
1966
+ # for img_idx, failure_list in failed_images.items():
1967
+ # print(f"\n==== Failed Image {img_idx} ====")
1968
+
1969
+ # # Load and normalize the embeddings
1970
+ # image = images[img_idx].unsqueeze(0).to(device).float()
1971
+ # image_clip = nn.functional.normalize(clip_img_embedder(image).flatten(1), dim=-1)
1972
+
1973
+ # # Get voxel→CLIP embedding
1974
+ # voxel = torch.Tensor(vox[img_idx]).unsqueeze(0).unsqueeze(0).to(device)
1975
+ # voxel_embed = model.backbone(model.ridge(voxel, 0))[1]
1976
+ # voxel_embed = nn.functional.normalize(voxel_embed.flatten(1), dim=-1)
1977
+
1978
+ # # Display original image
1979
+ # print("Original image:")
1980
+ # display(utils.torch_to_Image(images[img_idx]))
1981
+
1982
+ # # Collect unique pairmates involved in the failure
1983
+ # pairmate_indices = list(set(entry["pairmate"] for entry in failure_list))
1984
+
1985
+ # # Plot failed pairmates with similarity annotations
1986
+ # fig, axs = plt.subplots(1, len(pairmate_indices), figsize=(4 * len(pairmate_indices), 4))
1987
+ # if len(pairmate_indices) == 1:
1988
+ # axs = [axs]
1989
+
1990
+ # # Compute "correct" similarity — voxel to its own CLIP embedding
1991
+ # correct_clip = image_clip.float()
1992
+ # correct_voxel_sim = (voxel_embed.float() @ correct_clip.T).item()
1993
+ # print(f"Correct voxel→CLIP similarity = {correct_voxel_sim:.4f}")
1994
+
1995
+ # # Plot failed pairmates with similarity annotations
1996
+ # fig, axs = plt.subplots(1, len(pairmate_indices), figsize=(4 * len(pairmate_indices), 4))
1997
+ # if len(pairmate_indices) == 1:
1998
+ # axs = [axs]
1999
+
2000
+ # for ax, mate_idx in zip(axs, pairmate_indices):
2001
+ # mate_image = images[mate_idx].unsqueeze(0).to(device).float()
2002
+ # mate_clip = nn.functional.normalize(clip_img_embedder(mate_image).flatten(1), dim=-1).float()
2003
+
2004
+ # # Similarities
2005
+ # clip_sim = (correct_clip @ mate_clip.T).item()
2006
+ # voxel_sim = (voxel_embed.float() @ mate_clip.T).item()
2007
+
2008
+ # # Check if this was the mistaken "higher" match
2009
+ # wrong_match = voxel_sim > correct_voxel_sim
2010
+
2011
+ # # Plot image and annotate
2012
+ # ax.imshow(utils.torch_to_Image(images[mate_idx]))
2013
+ # ax.axis("off")
2014
+ # ax.set_title(f"Pairmate {mate_idx}\nCLIP={clip_sim:.3f}\nVoxel={voxel_sim:.3f}\n{'← WRONG' if wrong_match else ''}",
2015
+ # color="red" if wrong_match else "black")
2016
+
2017
+
2018
+ # plt.tight_layout()
2019
+ # plt.show()
2020
+
2021
+
2022
+ # In[ ]:
2023
+
2024
+
2025
+ # comp[20,18] is the only False
2026
+
2027
+
2028
+ # In[ ]:
2029
+
2030
+
2031
+ # import matplotlib.pyplot as plt
2032
+
2033
+ # for img_idx, failure_list in failed_images.items():
2034
+ # print(f"\n==== Failed Image {img_idx} ====")
2035
+
2036
+ # # Load and normalize the embeddings
2037
+ # image = images[img_idx].unsqueeze(0).to(device).float()
2038
+ # image_clip = nn.functional.normalize(clip_img_embedder(image).flatten(1), dim=-1)
2039
+
2040
+ # # Get voxel→CLIP embedding
2041
+ # voxel = torch.Tensor(vox[img_idx]).unsqueeze(0).unsqueeze(0).to(device)
2042
+ # voxel_embed = model.backbone(model.ridge(voxel, 0))[1]
2043
+ # voxel_embed = nn.functional.normalize(voxel_embed.flatten(1), dim=-1)
2044
+
2045
+ # # Display original image
2046
+ # print("Original image:")
2047
+ # display(utils.torch_to_Image(images[img_idx]))
2048
+
2049
+ # # Collect unique pairmates involved in the failure
2050
+ # pairmate_indices = list(set(entry["pairmate"] for entry in failure_list))
2051
+
2052
+ # # Plot failed pairmates with similarity annotations
2053
+ # fig, axs = plt.subplots(1, len(pairmate_indices), figsize=(4 * len(pairmate_indices), 4))
2054
+ # if len(pairmate_indices) == 1:
2055
+ # axs = [axs]
2056
+
2057
+ # for ax, mate_idx in zip(axs, pairmate_indices):
2058
+ # # Get all CLIP embeddings for failed image and pairmates
2059
+ # all_indices = [img_idx] + pairmate_indices
2060
+ # all_images = images[all_indices].to(device).float()
2061
+ # all_clip_embeds = clip_img_embedder(all_images)
2062
+ # all_clip_embeds = nn.functional.normalize(all_clip_embeds.flatten(1), dim=-1).float()
2063
+
2064
+ # # Compare voxel embedding for the failed image to all CLIP embeddings
2065
+ # sims = (voxel_embed.float() @ all_clip_embeds.T).squeeze().cpu().detach().numpy() # shape: (1, N) → (N,)
2066
+ # image_ids = ["correct"] + [f"pairmate {idx}" for idx in pairmate_indices]
2067
+
2068
+ # # Sort and display
2069
+ # sorted_sims = sorted(zip(image_ids, all_indices, sims), key=lambda x: -x[2])
2070
+
2071
+ # print("\n🧠 Voxel→CLIP similarity ranking:")
2072
+ # for label, idx, sim in sorted_sims:
2073
+ # print(f"{label:12} (index {idx:3}): similarity = {sim:.4f}")
2074
+
2075
+ # # Optional assertion: did any pairmate score higher than the correct image?
2076
+ # correct_sim = sims[0]
2077
+ # higher = [(label, sim) for label, _, sim in sorted_sims[1:] if sim > correct_sim]
2078
+ # if higher:
2079
+ # print("\n❌ Mismatch detected: voxel embedding matched other images more than the correct one!")
2080
+ # else:
2081
+ # print("\n✅ Model correctly ranked the correct image highest (despite failure elsewhere)")
2082
+
2083
+ # plt.tight_layout()
2084
+ # plt.show()
2085
+
2086
+
2087
+ # In[ ]:
2088
+
2089
+
2090
+ mst_pairs[:5]
2091
+
2092
+
2093
+ # In[ ]:
2094
+
2095
+
2096
+ pairs[0]
2097
+
2098
+
2099
+ # In[ ]:
2100
+
2101
+
2102
+ # images[image_idx[pairs[0][0]]].shape
2103
+
2104
+
2105
+ # In[ ]:
2106
+
2107
+
2108
+ ix = 0
2109
+ display(utils.torch_to_Image(images[pairs[ix][0]]))
2110
+ display(utils.torch_to_Image(images[pairs[ix][1]]))
2111
+
2112
+
2113
+ # In[ ]:
2114
+
2115
+
2116
+ # print(np.allclose(embed_A[0], embed_A[1])) # across repeats
2117
+ # print(np.allclose(embed_A[0], embed_B[0])) # across pairmates
2118
+
2119
+
2120
+ # In[ ]:
2121
+
2122
+
2123
+ # def generate_random_nonmatching_pairs(pairs, num_images_per_source=5, num_repeats=2):
2124
+ # n_imgs, n_pairmates, n_repeats = pairs.shape
2125
+ # nonmatch_pairs = []
2126
+
2127
+ # for i in range(n_imgs):
2128
+ # other_idxs = [j for j in range(n_imgs) if j != i]
2129
+ # sampled_j = np.random.choice(other_idxs, size=num_images_per_source, replace=False)
2130
+
2131
+ # for j in sampled_j:
2132
+ # for _ in range(num_repeats):
2133
+ # a_side = np.random.randint(2)
2134
+ # b_side = np.random.randint(2)
2135
+ # a_repeat = np.random.randint(n_repeats)
2136
+ # b_repeat = np.random.randint(n_repeats)
2137
+
2138
+ # pair_a = pairs[i, a_side, a_repeat]
2139
+ # pair_b = pairs[j, b_side, b_repeat]
2140
+ # nonmatch_pairs.append([pair_a, pair_b])
2141
+
2142
+ # return np.array(nonmatch_pairs)
2143
+
2144
+
2145
+ # In[ ]:
2146
+
2147
+
2148
+ # nonmatch_pairs = generate_random_nonmatching_pairs(pairs, num_images_per_source=5, num_repeats=1)
2149
+ # results = evaluate_mst_pairs(nonmatch_pairs)
2150
+ # print(results)
2151
+
2152
+
2153
+ # In[ ]:
2154
+
2155
+
2156
+ # # Compare first few pairs
2157
+ # for pair in pairs: # Checking first 2 pairs
2158
+ # print("Indices in mst_pairs:", pair)
2159
+ # print("Corresponding filenames:")
2160
+ # print(f"Image 1: {x[pair[0]]}")
2161
+ # print(f"Image 2: {x[pair[1]]}\n")
2162
+
2163
+
2164
+ # In[ ]:
2165
+
2166
+
2167
+ # for i in range(len(pairs)):
2168
+ # fig, ax = plt.subplots(1, 2, figsize=(10,8))
2169
+
2170
+ # ax[0].imshow(images[pairs[i][0]].permute(1,2,0).numpy())
2171
+ # ax[0].set_title(f"Repeat 1")
2172
+
2173
+ # ax[1].imshow(images[pairs[i][1]].permute(1,2,0).numpy())
2174
+ # ax[1].set_title(f"Repeat 2")
2175
+
2176
+ # plt.setp(ax, xticks=[], yticks=[])
2177
+ # plt.tight_layout()
2178
+ # plt.show()
2179
+
2180
+
2181
+ # In[ ]:
2182
+
2183
+
2184
+ # score = 0
2185
+ # total = 0
2186
+ # with torch.no_grad(), torch.cuda.amp.autocast(dtype=data_type):
2187
+ # for pair in unique_images_pairs:
2188
+ # imageA_idx, imageB_idx = pair
2189
+ # imageA_idx = np.where(image_idx == imageA_idx)[0].item()
2190
+ # imageB_idx = np.where(image_idx == imageB_idx)[0].item()
2191
+
2192
+ # voxel = vox[imageA_idx].to(device)[None]
2193
+ # voxel = torch.Tensor(voxel).unsqueeze(1).to(device)
2194
+
2195
+ # imageA = images[imageA_idx].to(device)[None]
2196
+ # imageB = images[imageB_idx].to(device)[None]
2197
+
2198
+ # clip_targetA = clip_img_embedder(imageA.float())
2199
+ # clip_targetB = clip_img_embedder(imageB.float())
2200
+
2201
+ # voxel_ridge = model.ridge(voxel,0)
2202
+ # backbone, clip_voxels, blurry_image_enc_ = model.backbone(voxel_ridge)
2203
+
2204
+ # clip_voxels_norm = nn.functional.normalize(clip_voxels.flatten(1), dim=-1)
2205
+ # clip_targetA_norm = nn.functional.normalize(clip_targetA.flatten(1), dim=-1)
2206
+ # clip_targetB_norm = nn.functional.normalize(clip_targetB.flatten(1), dim=-1)
2207
+
2208
+ # cossimA = utils.batchwise_cosine_similarity(clip_voxels_norm, clip_targetA_norm)
2209
+ # cossimB = utils.batchwise_cosine_similarity(clip_voxels_norm, clip_targetB_norm)
2210
+
2211
+ # if cossimA > cossimB:
2212
+ # score += 1
2213
+ # total += 1
2214
+
2215
+ # for pair in unique_images_pairs:
2216
+ # imageA_idx, imageB_idx = pair
2217
+ # imageA_idx = np.where(image_idx == imageA_idx)[0].item()
2218
+ # imageB_idx = np.where(image_idx == imageB_idx)[0].item()
2219
+
2220
+ # voxel = vox[imageB_idx].to(device)[None]
2221
+ # voxel = torch.Tensor(voxel).unsqueeze(1).to(device)
2222
+
2223
+ # imageA = images[imageA_idx].to(device)[None]
2224
+ # imageB = images[imageB_idx].to(device)[None]
2225
+
2226
+ # clip_targetA = clip_img_embedder(imageA.float())
2227
+ # clip_targetB = clip_img_embedder(imageB.float())
2228
+
2229
+ # voxel_ridge = model.ridge(voxel,0)
2230
+ # backbone, clip_voxels, blurry_image_enc_ = model.backbone(voxel_ridge)
2231
+
2232
+ # clip_voxels_norm = nn.functional.normalize(clip_voxels.flatten(1), dim=-1)
2233
+ # clip_targetA_norm = nn.functional.normalize(clip_targetA.flatten(1), dim=-1)
2234
+ # clip_targetB_norm = nn.functional.normalize(clip_targetB.flatten(1), dim=-1)
2235
+
2236
+ # cossimA = utils.batchwise_cosine_similarity(clip_voxels_norm, clip_targetA_norm)
2237
+ # cossimB = utils.batchwise_cosine_similarity(clip_voxels_norm, clip_targetB_norm)
2238
+
2239
+ # if cossimB > cossimA:
2240
+ # score += 1
2241
+ # total += 1
2242
+
2243
+ # print(score/total)
2244
+
2245
+
2246
+ # In[ ]:
2247
+
2248
+
2249
+ #display(utils.torch_to_Image(imageA))
2250
+ #display(utils.torch_to_Image(imageB))
2251
+
2252
+
2253
+ # In[ ]:
2254
+
2255
+
2256
+ # from scipy.stats import binomtest
2257
+
2258
+ # total_samples = len(np.array(unique_images_pairs).flatten())
2259
+ # assert total_samples == 100
2260
+
2261
+ # correct_predictions = int((score/total) * total_samples) # calculate the number of correct predictions
2262
+ # expected_accuracy = 0.5 # expected accuracy under the null hypothesis
2263
+
2264
+ # # Perform the binomial test
2265
+ # binom_stats = binomtest(correct_predictions, total_samples, expected_accuracy, alternative='greater')
2266
+ # p_value = binom_stats.pvalue
2267
+
2268
+ # # Output the result
2269
+ # print(f"P-value: {p_value}")
2270
+ # if p_value < 0.05:
2271
+ # print("The decoder's accuracy is significantly better than chance.")
2272
+ # else:
2273
+ # print("The decoder's accuracy is not significantly better than chance.")
2274
+
2275
+
2276
+ # In[ ]:
2277
+
2278
+
2279
+
2280
+
main-multisession-3tasks.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
main-multisession-3tasks.py ADDED
@@ -0,0 +1,2269 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding: utf-8
3
+
4
+ # # Import packages & functions
5
+
6
+ # In[1]:
7
+
8
+
9
+ print("importing modules")
10
+ import os
11
+ import sys
12
+ import json
13
+ import argparse
14
+ import numpy as np
15
+ import time
16
+ import random
17
+ import string
18
+ import h5py
19
+ from tqdm import tqdm
20
+ import webdataset as wds
21
+ from PIL import Image
22
+ import pandas as pd
23
+ import nibabel as nib
24
+ import nilearn
25
+
26
+ import matplotlib.pyplot as plt
27
+ import torch
28
+ import torch.nn as nn
29
+ from torchvision import transforms
30
+
31
+ # tf32 data type is faster than standard float32
32
+ torch.backends.cuda.matmul.allow_tf32 = True
33
+
34
+ import utils
35
+ from utils import load_preprocess_betas, resample, applyxfm, apply_thresh, resample_betas
36
+
37
+ # imports utils from mindeye_preproc as "preproc"
38
+ import importlib.util
39
+ parent_utils_path = "/home/ri4541/mindeye_preproc/analysis/utils.py"
40
+ spec = importlib.util.spec_from_file_location("utils", parent_utils_path)
41
+ preproc = importlib.util.module_from_spec(spec)
42
+ parent_dir = os.path.dirname(parent_utils_path)
43
+ if parent_dir not in sys.path:
44
+ sys.path.append(parent_dir)
45
+ spec.loader.exec_module(preproc)
46
+
47
+ if utils.is_interactive():
48
+ from IPython.display import clear_output # function to clear print outputs in cell
49
+ get_ipython().run_line_magic('load_ext', 'autoreload')
50
+ # this allows you to change functions in models.py or utils.py and have this notebook automatically update with your revisions
51
+ get_ipython().run_line_magic('autoreload', '2')
52
+
53
+ seed = utils.get_slurm_seed()
54
+
55
+
56
+ # # Princeton data prep
57
+
58
+ # ## Load Data & Design
59
+
60
+ # In[67]:
61
+
62
+
63
+ def get_flag(name, default, cast=int):
64
+ """Retrieve a flag from environment variables or return a default value."""
65
+ if utils.is_interactive():
66
+ return default
67
+ val = os.environ.get(name.upper(), str(default))
68
+ print(f"Retrieved {name.upper()} from environment: {val}")
69
+
70
+ if cast == bool:
71
+ # Explicitly handle string conversion to boolean
72
+ if val.lower() in ['true', '1']:
73
+ return True
74
+ elif val.lower() in ['false', '0']:
75
+ return False
76
+ else:
77
+ return bool(val) # Fallback to default casting behavior
78
+
79
+ try:
80
+ return cast(val)
81
+ except Exception:
82
+ return val
83
+
84
+ if not utils.is_interactive():
85
+ print('running non-interactively')
86
+
87
+ # Define variables using get_flag
88
+ sub = get_flag('SUB', 'sub-005', cast=str)
89
+ session = get_flag('SESSION', 'ses-04', cast=str) # 'ses-xx', 'all'
90
+ task = get_flag('TASK', 'B', cast=str) # 'study', 'A', or 'B'
91
+ func_task_name = get_flag('FUNC_TASK_NAME', 'B', cast=str)
92
+
93
+ if session == "all":
94
+ ses_list = ["ses-01", "ses-02"] # list of actual session IDs
95
+ design_ses_list = ["ses-01", "ses-02"] # list of session IDs to search for design matrix
96
+ else:
97
+ ses_list = [session]
98
+ design_ses_list = [session]
99
+
100
+ task_name = f"_task-{task}" if task != 'study' else ''
101
+ resample_voxel_size = get_flag('RESAMPLE_VOXEL_SIZE', True, cast=bool)
102
+ resample_post_glmsingle = get_flag('RESAMPLE_POST_GLMSINGLE', False, cast=bool)
103
+ load_from_resampled_file = get_flag('LOAD_FROM_RESAMPLED_FILE', False, cast=bool)
104
+
105
+ train_test_split = get_flag('TRAIN_TEST_SPLIT', 'MST', cast=str)
106
+ remove_close_to_MST = get_flag('REMOVE_CLOSE_TO_MST', False, cast=bool)
107
+ remove_random_n = get_flag('REMOVE_RANDOM_N', False, cast=bool)
108
+ if remove_close_to_MST or remove_random_n:
109
+ assert remove_close_to_MST != remove_random_n # don't remove both sets of images
110
+
111
+ n_to_remove = 0
112
+ if remove_random_n:
113
+ assert train_test_split == 'MST' # MST images are excluded from the n images removed, so only makes sense if they're not in the training set
114
+ n_to_remove = 150
115
+
116
+ if resample_voxel_size:
117
+ resampled_vox_size = get_flag('RESAMPLED_VOX_SIZE', 2.0, cast=float)
118
+ resample_method = get_flag('RESAMPLE_METHOD', 'trilinear', cast=str)
119
+
120
+ vox_dim_str = str(resampled_vox_size).replace('.', '_')
121
+ resampled_suffix = f"resampled_{vox_dim_str}mm_{resample_method}"
122
+ mask_resampled_suffix = resampled_suffix
123
+ if resample_post_glmsingle:
124
+ resampled_suffix += '_postglmsingle'
125
+
126
+ print('resample_voxel_size:', resample_voxel_size)
127
+ print('resample_post_glmsingle:', resample_post_glmsingle)
128
+ print('load_from_resampled_file:', load_from_resampled_file)
129
+ print('resampled_vox_size:', resampled_vox_size)
130
+ print('resample_method:', resample_method)
131
+ print('resampled_suffix:', resampled_suffix)
132
+ print('mask_resampled_suffix:', mask_resampled_suffix)
133
+ print('sub:', sub)
134
+ print('session:', session)
135
+ print('task:', task)
136
+ print('func_task_name:', func_task_name)
137
+ print('ses_list:', ses_list)
138
+ print('design_ses_list:', design_ses_list)
139
+ print('task_name:', task_name)
140
+ print('train_test_split:', train_test_split)
141
+ print('remove_close_to_MST:', remove_close_to_MST)
142
+ print('remove_random_n:', remove_random_n)
143
+ print('n_to_remove:', n_to_remove)
144
+
145
+
146
+ # In[7]:
147
+
148
+
149
+ session_label = preproc.get_session_label(ses_list)
150
+ print('session label:', session_label)
151
+ n_runs, _ = preproc.get_runs_per_session(sub, session, ses_list)
152
+
153
+
154
+ # In[8]:
155
+
156
+
157
+ if utils.is_interactive():
158
+ glmsingle_path = f"/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/glmsingle_{sub}_{session_label}_task-{task}"
159
+ else:
160
+ glmsingle_path = os.environ["glmsingle_path"]
161
+
162
+ designdir = "/home/ri4541/real_time_mindEye2"
163
+ print(glmsingle_path)
164
+
165
+ if resample_voxel_size:
166
+ # option 1: we are using original (non-resampled) GLMsingle outputs and doing the resampling here
167
+ # option 2: doing resampling pre-GLMsingle and using those outputs; no resampling involved here
168
+ if resample_post_glmsingle:
169
+ # option 1
170
+ orig_glmsingle_path = glmsingle_path
171
+ glmsingle_path += f"_{resampled_suffix}"
172
+ print("resampled glmsingle path:", glmsingle_path)
173
+ if load_from_resampled_file:
174
+ # resampling is already done; load from file
175
+ assert os.path.exists(glmsingle_path) # the new directory must have been created if we reached here
176
+ else:
177
+ # don't load from file; do resampling here
178
+ os.makedirs(glmsingle_path,exist_ok=True)
179
+ else:
180
+ # option 2
181
+ glmsingle_path += f"_{resampled_suffix}"
182
+ print("glmsingle path:", glmsingle_path)
183
+
184
+ assert os.path.exists(glmsingle_path)
185
+ print("glmsingle path exists!")
186
+
187
+
188
+ # In[9]:
189
+
190
+
191
+ data, starts, images, is_new_run, image_names, unique_images, len_unique_images = preproc.load_design_files(
192
+ sub=sub,
193
+ session=session,
194
+ func_task_name=task,
195
+ designdir=designdir,
196
+ design_ses_list=design_ses_list
197
+ )
198
+
199
+ if sub == 'sub-001':
200
+ if session == 'ses-01':
201
+ assert image_names[0] == 'images/image_686_seed_1.png'
202
+ elif session in ('ses-02', 'all'):
203
+ assert image_names[0] == 'all_stimuli/special515/special_40840.jpg'
204
+ elif session == 'ses-03':
205
+ assert image_names[0] == 'all_stimuli/special515/special_69839.jpg'
206
+ elif session == 'ses-04':
207
+ assert image_names[0] == 'all_stimuli/rtmindeye_stimuli/image_686_seed_1.png'
208
+ elif sub == 'sub-003':
209
+ assert image_names[0] == 'all_stimuli/rtmindeye_stimuli/image_686_seed_1.png'
210
+
211
+ unique_images = np.unique(image_names.astype(str))
212
+ unique_images = unique_images[(unique_images!="nan")]
213
+ len_unique_images = len(unique_images)
214
+ print("n_runs",n_runs)
215
+
216
+ if (sub == 'sub-001' and session == 'ses-04') or (sub == 'sub-003' and session == 'ses-01'):
217
+ assert len(unique_images) == 851
218
+
219
+ print(image_names[:4])
220
+ print(starts[:4])
221
+ print(is_new_run[:4])
222
+
223
+ if remove_random_n:
224
+ # want to remove 150 imgs
225
+ # 100 special515 imgs are repeated 3x (300 total)
226
+ # all other train imgs are only shown once (558 total)
227
+ # of the 150, want to sample proportionally since we're cutting all repeats for special515
228
+ # so take out 51 (17 unique) from special515 and 99 from rest = removing 150 total
229
+ np.random.seed(seed)
230
+ options_to_remove = [x for x in set(image_names) if str(x) != 'nan' and x != 'blank.jpg' and 'MST_pairs' not in x and 'special515' not in x and list(image_names).count(x)==1] # all the imgs that only appear once (this is O(N^2) b/c of count() within list comprehension but image_names is a relatively small list)
231
+ options_to_remove_special515 = [x for x in set(image_names) if str(x) != 'nan' and x != 'blank.jpg' and 'MST_pairs' not in x and 'special515' in x and list(image_names).count(x)>1] # all the special515 images that are repeated (count()>1 necessary because there are special515 that are not repeated)
232
+ imgs_to_remove = np.random.choice(options_to_remove, size=99, replace=False)
233
+ imgs_to_remove = np.append(imgs_to_remove, np.random.choice(options_to_remove_special515, size=17, replace=False))
234
+
235
+ image_idx = np.array([]) # contains the unique index of each presented image
236
+ vox_image_names = np.array([]) # contains the names of the images corresponding to image_idx
237
+ all_MST_images = dict()
238
+ for i, im in enumerate(image_names):
239
+ # skip if blank, nan
240
+ if im == "blank.jpg":
241
+ i+=1
242
+ continue
243
+ if str(im) == "nan":
244
+ i+=1
245
+ continue
246
+ vox_image_names = np.append(vox_image_names, im)
247
+ if remove_close_to_MST: # optionally skip close_to_MST images
248
+ if "closest_pairs" in im:
249
+ i+=1
250
+ continue
251
+ elif remove_random_n:
252
+ if im in imgs_to_remove:
253
+ i+=1
254
+ continue
255
+
256
+ image_idx_ = np.where(im==unique_images)[0].item()
257
+ image_idx = np.append(image_idx, image_idx_)
258
+
259
+ if (sub == 'sub-001' and session == 'ses-04') or (sub == 'sub-003' and session == 'ses-01'): # MST images are ones that matched these image titles
260
+ import re
261
+ if ('w_' in im or 'paired_image_' in im or re.match(r'all_stimuli/rtmindeye_stimuli/\d{1,2}_\d{1,3}\.png$', im) or re.match(r'images/\d{1,2}_\d{1,3}\.png$', im)):
262
+ # the regexp here looks for **_***.png, allows 1-2 chars before underscore and 1-3 chars after it
263
+ # print(im)
264
+ all_MST_images[i] = im
265
+ i+=1
266
+ elif 'MST' in im:
267
+ all_MST_images[i] = im
268
+ i+=1
269
+
270
+ image_idx = torch.Tensor(image_idx).long()
271
+ # for im in new_image_names[MST_images]:
272
+ # assert 'MST_pairs' in im
273
+ # assert len(all_MST_images) == 300
274
+
275
+ unique_MST_images = np.unique(list(all_MST_images.values()))
276
+
277
+ MST_ID = np.array([], dtype=int)
278
+ if remove_close_to_MST:
279
+ close_to_MST_idx = np.array([], dtype=int)
280
+ if remove_random_n:
281
+ random_n_idx = np.array([], dtype=int)
282
+
283
+ vox_idx = np.array([], dtype=int)
284
+ j=0 # this is a counter keeping track of the remove_random_n used later to index vox based on the removed images; unused otherwise
285
+ for i, im in enumerate(image_names): # need unique_MST_images to be defined, so repeating the same loop structure
286
+ # skip if blank, nan
287
+ if im == "blank.jpg":
288
+ i+=1
289
+ continue
290
+ if str(im) == "nan":
291
+ i+=1
292
+ continue
293
+ if remove_close_to_MST: # optionally skip close_to_MST images
294
+ if "closest_pairs" in im:
295
+ close_to_MST_idx = np.append(close_to_MST_idx, i)
296
+ i+=1
297
+ continue
298
+ if remove_random_n:
299
+ if im in imgs_to_remove:
300
+ vox_idx = np.append(vox_idx, j)
301
+ i+=1
302
+ j+=1
303
+ continue
304
+ j+=1
305
+ curr = np.where(im == unique_MST_images)
306
+ # print(curr)
307
+ if curr[0].size == 0:
308
+ MST_ID = np.append(MST_ID, np.array(len(unique_MST_images))) # add a value that should be out of range based on the for loop, will index it out later
309
+ else:
310
+ MST_ID = np.append(MST_ID, curr)
311
+
312
+ assert len(MST_ID) == len(image_idx)
313
+ # assert len(np.argwhere(pd.isna(data['current_image']))) + len(np.argwhere(data['current_image'] == 'blank.jpg')) + len(image_idx) == len(data)
314
+ # MST_ID = torch.tensor(MST_ID[MST_ID != len(unique_MST_images)], dtype=torch.uint8) # torch.tensor (lowercase) allows dtype kwarg, Tensor (uppercase) is an alias for torch.FloatTensor
315
+ print(MST_ID.shape)
316
+ if (sub == 'sub-001' and session == 'ses-04') or (sub == 'sub-003' and session == 'ses-01'):
317
+ assert len(all_MST_images) == 100
318
+
319
+
320
+ # ## Load images
321
+
322
+ # In[10]:
323
+
324
+
325
+ import imageio.v2 as imageio
326
+ resize_transform = transforms.Resize((224, 224))
327
+ MST_images = []
328
+ images = None
329
+ for im_name in tqdm(image_idx):
330
+ if sub == 'sub-001' and session == 'ses-01':
331
+ image_file = f"all_stimuli/rtmindeye_stimuli/{unique_images[im_name]}"
332
+ else:
333
+ image_file = f"{unique_images[im_name]}"
334
+ im = imageio.imread(image_file)
335
+ im = torch.Tensor(im / 255).permute(2,0,1)
336
+ im = resize_transform(im.unsqueeze(0))
337
+ if images is None:
338
+ images = im
339
+ else:
340
+ images = torch.vstack((images, im))
341
+ if (sub == 'sub-001' and session == 'ses-04') or (sub == 'sub-003' and session == 'ses-01'):
342
+ if ('w_' in image_file or 'paired_image_' in image_file or re.match(r'all_stimuli/rtmindeye_stimuli/\d{1,2}_\d{1,3}\.png$', image_file) or re.match(r'all_stimuli/rtmindeye_stimuli/images/\d{1,2}_\d{1,3}\.png$', image_file)):
343
+ MST_images.append(True)
344
+ else:
345
+ MST_images.append(False)
346
+ else:
347
+ if ("MST_pairs" in image_file): # ("_seed_" not in unique_images[im_name]) and (unique_images[im_name] != "blank.jpg")
348
+ MST_images.append(True)
349
+ else:
350
+ MST_images.append(False)
351
+
352
+ print("images", images.shape)
353
+ MST_images = np.array(MST_images)
354
+ print("MST_images", len(MST_images))
355
+ if (sub == 'sub-001' and session == 'ses-04') or (sub == 'sub-003' and session == 'ses-01'):
356
+ assert len(MST_images[MST_images==True]) == 100
357
+ print("MST_images==True", len(MST_images[MST_images==True]))
358
+
359
+
360
+ # In[11]:
361
+
362
+
363
+ # want IDs of pairmates based on MST_images
364
+ # create "MST_pairmates" which is a 25x2 array with indices of the 25 pairs based on MST_images == True
365
+
366
+ assert unique_MST_images.shape[0] % 2 == 0 # make sure it's divisible by 2
367
+ MST_pairmate_names = unique_MST_images.reshape(int(unique_MST_images.shape[0]/2),2)
368
+ # print(MST_pairmate_names)
369
+
370
+ MST_pairmate_indices = np.empty(shape=MST_pairmate_names.shape, dtype=int)
371
+ for p, pair in enumerate(MST_pairmate_names):
372
+ for i, im in enumerate(pair):
373
+ MST_pairmate_indices[p][i] = np.where(np.isin(list(all_MST_images.values()), im))[0][0] # just take the first repeated instance of an image
374
+
375
+ print(MST_pairmate_indices.shape, MST_pairmate_indices)
376
+
377
+
378
+ # In[12]:
379
+
380
+
381
+ if (sub == 'sub-001' and session in ('ses-02', 'ses-03', 'all')):
382
+ # MST_pairs contains the indices of repeats based on all_MST_images
383
+ # all_MST_images contains the indices of images from image_names
384
+ MST_pairs = utils.find_paired_indices(torch.tensor(MST_ID))
385
+ MST_pairs = np.array(sorted(MST_pairs[:-1], key=lambda x: x[0])) # we added a fake value as a placeholder so index out the last group of pairs
386
+
387
+ # assert images[MST_pairs]
388
+
389
+ fig, ax = plt.subplots(1, 3, figsize=(10,4))
390
+ fig.suptitle('Sample MST pairs')
391
+
392
+ ax[0].imshow(images[MST_pairs[-1][0]].permute(1,2,0).numpy())
393
+ ax[0].set_title(f"Trial 0")
394
+
395
+ ax[1].imshow(images[MST_pairs[-1][1]].permute(1,2,0).numpy())
396
+ ax[1].set_title(f"Trial 1")
397
+
398
+ ax[2].imshow(images[MST_pairs[-1][2]].permute(1,2,0).numpy())
399
+ ax[2].set_title(f"Trial 2")
400
+
401
+ plt.setp(ax, xticks=[], yticks=[])
402
+ plt.tight_layout()
403
+ plt.show()
404
+
405
+
406
+ # In[13]:
407
+
408
+
409
+ # pairs has the indices of all repeated images
410
+ pairs = utils.find_paired_indices(image_idx)
411
+ pairs = sorted(pairs, key=lambda x: x[0])
412
+
413
+ fig, axes = plt.subplots(1, 3, figsize=(6, 2)) # 1 row, 3 columns
414
+ for i, ax in enumerate(axes):
415
+ ax.imshow(images[i].permute(1, 2, 0).numpy())
416
+ ax.set_title(f"Trial {i}")
417
+ ax.axis("off") # Hide axes for better visualization
418
+
419
+ plt.tight_layout()
420
+ # output_path = os.path.join(output_dir, "trials_plot.png")
421
+ # plt.savefig(output_path, dpi=300) # Save figure
422
+ plt.show()
423
+
424
+
425
+ # In[14]:
426
+
427
+
428
+ p=0
429
+
430
+ # plot 2 repeats (anything in pairs should have 2 repeats, even if there's more)
431
+ fig, ax = plt.subplots(1, 2, figsize=(10,8))
432
+
433
+ ax[0].imshow(images[pairs[p][0]].permute(1,2,0).numpy())
434
+ ax[0].set_title(f"Repeat 1")
435
+
436
+ ax[1].imshow(images[pairs[p][1]].permute(1,2,0).numpy())
437
+ ax[1].set_title(f"Repeat 2")
438
+
439
+ plt.setp(ax, xticks=[], yticks=[])
440
+ plt.tight_layout()
441
+ plt.show()
442
+
443
+
444
+ # In[15]:
445
+
446
+
447
+ def get_image_pairs(sub, session, func_task_name, designdir):
448
+ """Loads design files and processes image pairs for a given session."""
449
+ _, _, _, _, image_names, unique_images, _ = preproc.load_design_files(
450
+ sub=sub,
451
+ session=session,
452
+ func_task_name=func_task_name,
453
+ designdir=designdir,
454
+ design_ses_list=[session] # Ensure it's a list
455
+ )
456
+ return utils.process_images(image_names, unique_images)
457
+
458
+
459
+ # In[16]:
460
+
461
+
462
+ from collections import defaultdict
463
+
464
+ all_dicts = []
465
+ for s_idx, s in enumerate(ses_list):
466
+ im, vo, _ = get_image_pairs(sub, s, func_task_name, designdir)
467
+ assert len(im) == len(vo)
468
+ all_dicts.append({k:v for k,v in enumerate(vo)})
469
+
470
+ # for the train set (ses-01-02 non-MST)
471
+ image_to_indices = defaultdict(lambda: [[] for _ in range(len(ses_list))])
472
+ for ses_idx, idx_to_name in enumerate(all_dicts):
473
+ for idx, name in idx_to_name.items():
474
+ image_to_indices[name][ses_idx].append(idx)
475
+
476
+ image_to_indices = dict(image_to_indices)
477
+
478
+ # for the test set (ses-03)
479
+ # test_image_to_indices = defaultdict(lambda: [[] for _ in range(len([ses_list[-1]]))])
480
+ # for ses_idx, idx_to_name in enumerate([all_dicts[-1]]):
481
+ # for idx, name in idx_to_name.items():
482
+ # test_image_to_indices[name][ses_idx].append(idx)
483
+
484
+ # test_image_to_indices = dict(test_image_to_indices)
485
+
486
+
487
+ # In[17]:
488
+
489
+
490
+ # train_pairs_list = []
491
+ # test_pairs_list = []
492
+
493
+ if sub == 'sub-005' and ses_list == ["ses-01", "ses-02"]:
494
+ for image, (ses0_indices, ses1_indices) in image_to_indices.items():
495
+ # Offset session 1 indices by 693
496
+ image_to_indices[image] = [ses0_indices, [i + 693 for i in ses1_indices]]
497
+
498
+ # # Combine all repeat indices (across both sessions)
499
+ # all_indices = ses0_indices + ses1_indices_offset
500
+
501
+ # # Only include if there are at least 2 repeats
502
+ # if len(all_indices) >= 2:
503
+ # train_pairs_list.append(all_indices)
504
+
505
+ # for i in test_image_to_indices.values():
506
+ # # print(i[0])
507
+ # # Only include if there are at least 2 repeats
508
+ # if len(i[0]) >= 2:
509
+ # test_pairs_list.append(i[0])
510
+
511
+ # train_test_pairs = [train_pairs_list, test_pairs_list]
512
+
513
+ # elif sub == 'sub-005' and ses_list == ["ses-01", "ses-03"]:
514
+ # pairs_list = []
515
+
516
+ # if len(ses_list) > 2:
517
+ # # Case 1: Aggregate results from multiple sessions (ses_list[:-1]), concatenating into a single list
518
+ # combined_pairs = sum([get_image_pairs(sub, s, func_task_name, designdir) for s in ses_list[:-1]], [])
519
+ # pairs_list.append(combined_pairs)
520
+
521
+ # # Case 2: Process last session separately
522
+ # pairs_list.append(get_image_pairs(sub, ses_list[-1], func_task_name, designdir))
523
+
524
+ # else:
525
+ # # Case 3: Process both sessions individually if ses_list has only 2 entries
526
+ # pairs_list.extend([get_image_pairs(sub, s, func_task_name, designdir) for s in ses_list])
527
+
528
+ # assert len(pairs_list) == 2
529
+
530
+
531
+ # In[18]:
532
+
533
+
534
+ if resample_voxel_size:
535
+ from nilearn.masking import apply_mask, unmask
536
+ ref_name = f'{glmsingle_path}/boldref_resampled.nii.gz'
537
+ omat_name = f'{glmsingle_path}/boldref_omat'
538
+
539
+
540
+ # In[19]:
541
+
542
+
543
+ from nilearn.plotting import plot_roi, plot_anat, plot_epi
544
+
545
+ mask_name = f'{glmsingle_path}/{sub}_{session_label}{task_name}_brain'
546
+ if resample_voxel_size:
547
+ if resample_post_glmsingle is True:
548
+ # use original mask directory
549
+ mask_in_name = f'{orig_glmsingle_path}/{sub}_{session}{task_name}_brain.nii.gz'
550
+ mask_out_name = mask_name + f"_{mask_resampled_suffix}.nii.gz"
551
+ assert os.path.exists(mask_in_name)
552
+ applyxfm(mask_in_name, ref_name, omat_name, resample_method, output=mask_out_name)
553
+ apply_thresh(mask_out_name, 0.5, output=mask_out_name) # binarize the mask since resampling can result in non- 0 or 1 values
554
+ mask_name += f"_{mask_resampled_suffix}"
555
+
556
+ mask_name += ".nii.gz"
557
+ print(mask_name)
558
+ avg_mask = nib.load(mask_name)
559
+ # mask info
560
+ dimsize=avg_mask.header.get_zooms()
561
+ affine_mat = avg_mask.affine
562
+ brain=avg_mask.get_fdata()
563
+ xyz=brain.shape #xyz dimensionality of brain mask and epi data
564
+
565
+ print('Mask dimensions:', dimsize)
566
+ print('')
567
+ print('Affine:')
568
+ print(affine_mat)
569
+ print('')
570
+ print(f'There are {int(np.sum(brain))} voxels in the included brain mask\n')
571
+
572
+
573
+ # ## Load GLMSingle voxel data
574
+
575
+ # In[20]:
576
+
577
+
578
+ vox = None
579
+ needs_postprocessing = False
580
+ params = (session, ses_list, remove_close_to_MST, image_names, remove_random_n, vox_idx)
581
+
582
+ if resample_post_glmsingle == True:
583
+ glm_save_path_resampled = f"{glmsingle_path}/vox_resampled.nii.gz"
584
+ if load_from_resampled_file == True:
585
+ # resampling was done in this notebook so we can load from file
586
+ vox = nib.load(glm_save_path_resampled)
587
+ else:
588
+ # do resampling here
589
+ assert os.path.exists(ref_name) and os.path.exists(omat_name), "need to generate the boldref and omat separately since we don't have access to the functional data here; either do so using flirt on the command line or copy over the glmsingle resampled outputs"
590
+ vox = load_preprocess_betas(orig_glmsingle_path, *params)
591
+ vox = resample_betas(orig_glmsingle_path, sub, session, task_name, vox, glmsingle_path, glm_save_path_resampled, ref_name, omat_name)
592
+ needs_postprocessing = True
593
+
594
+ if vox is None:
595
+ # either resampling was done in glmsingle or we aren't resampling
596
+ vox = load_preprocess_betas(glmsingle_path, *params)
597
+
598
+ if needs_postprocessing == True:
599
+ vox = apply_mask(vox, avg_mask)
600
+ vox = vox.reshape(-1, vox.shape[-1]) # flatten the 3D image into np array with shape (voxels, images)
601
+ print(vox.shape)
602
+
603
+ assert len(vox) == len(image_idx)
604
+
605
+
606
+ # ### Load nsdgeneral ROI
607
+
608
+ # In[28]:
609
+
610
+
611
+ if resample_voxel_size:
612
+ nsdgeneral_path = f'{glmsingle_path}/{sub}_{session_label}_task-{task}_nsdgeneral_{resampled_suffix}.nii.gz'
613
+ if resample_post_glmsingle:
614
+ assert os.path.exists(orig_glmsingle_path)
615
+ roi_in_path = f"{orig_glmsingle_path}/{sub}_{session_label}_task-{task}_nsdgeneral.nii.gz" # the input file is the original nsdgeneral mask (without resampling), from the original glmsingle directory
616
+ applyxfm(roi_in_path, ref_name, omat_name, resample_method, output=nsdgeneral_path)
617
+ else:
618
+ nsdgeneral_path = f'{glmsingle_path}/{sub}_{session_label}{task_name}_nsdgeneral.nii.gz'
619
+
620
+ print(nsdgeneral_path)
621
+ assert os.path.exists(nsdgeneral_path)
622
+ print(f"nsdgeneral path exists!")
623
+
624
+
625
+ # In[29]:
626
+
627
+
628
+ roi = nib.load(nsdgeneral_path)
629
+ print(roi.shape)
630
+ plot_roi(roi, bg_img=avg_mask)
631
+ plt.show()
632
+
633
+
634
+ # In[30]:
635
+
636
+
637
+ avg_mask = avg_mask.get_fdata().flatten()
638
+ print(f"total voxels (whole brain) = {int(avg_mask.sum())}")
639
+
640
+ roi = roi.get_fdata()
641
+ roi = roi.flatten()
642
+ roi = roi[avg_mask.astype(bool)]
643
+ roi[np.isnan(roi)] = 0
644
+ roi = roi.astype(bool)
645
+ print(f"nsdgeneral voxels = {roi.sum()}")
646
+
647
+
648
+ # ### ROI voxel exclusion
649
+
650
+ # In[31]:
651
+
652
+
653
+ # ROI masking?
654
+ print(f"vox before ROI exclusion: {vox.shape}")
655
+ vox = vox[:,roi]
656
+ print(f"vox after ROI exclusion: {vox.shape}")
657
+
658
+ if np.any(np.isnan(vox)):
659
+ print("NaNs found! Removing voxels...")
660
+ x,y = np.where(np.isnan(vox))
661
+ vox = vox[:,np.setdiff1d(np.arange(vox.shape[-1]), y)]
662
+
663
+
664
+ # ## Reliability calculation
665
+
666
+ # ### Calculate reliability (corr between first and second presentation of same image) for every voxel
667
+
668
+ # In[32]:
669
+
670
+
671
+ pairs_homog = np.array([[p[0], p[1]] for p in pairs])
672
+
673
+
674
+ # In[33]:
675
+
676
+
677
+ # vox_pairs = []
678
+ # for i in pairs:
679
+ # vox_pairs.append(utils.zscore(vox[i]))
680
+
681
+ vox_pairs = utils.zscore(vox[pairs_homog])
682
+ rels = np.full(vox.shape[-1],np.nan)
683
+ for v in tqdm(range(vox.shape[-1])):
684
+ rels[v] = np.corrcoef(vox_pairs[:,0,v], vox_pairs[:,1,v])[1,0]
685
+ # for v in tqdm(range(vox[0].shape[-1])):
686
+ # rep0 = []
687
+ # rep1 = []
688
+
689
+ # for vp in vox_pairs:
690
+ # rep0.append(vp[0, v])
691
+ # rep1.append(vp[1, v])
692
+
693
+ # rels[v] = np.corrcoef(rep0, rep1)[1, 0]
694
+
695
+ print("rels", rels.shape)
696
+ assert np.sum(np.all(np.isnan(rels))) == 0
697
+
698
+
699
+ # ### Create representational similarity matrix
700
+
701
+ # In[34]:
702
+
703
+
704
+ # creating img x vox x repetitions matrix | shape=(150, 18419, 2)
705
+ vox0 = np.zeros((len(pairs_homog), vox.shape[-1], 2))
706
+ print(vox0.shape)
707
+ for ipair, pair in enumerate(tqdm(pairs_homog)):
708
+ pair = pair[:2] # to keep things consistent, just using the first two repeats
709
+ i,j = pair
710
+ vox0[ipair, :, :] = vox[pair].T
711
+ vox_avg = vox0.mean(-1) # average across the repetitions
712
+
713
+
714
+ # In[35]:
715
+
716
+
717
+ # Masking RDM for each reliability threshold
718
+ r_thresholds = np.array([.2])
719
+ rdm = np.zeros((len(r_thresholds), len(pairs), len(pairs)))
720
+ for ir_thresh, r_thresh in enumerate(r_thresholds):
721
+ print(f"reliability threshold = {r_thresh}")
722
+ for i in tqdm(range(len(pairs))):
723
+ for j in range(len(pairs)):
724
+ rdm[ir_thresh,i,j] = np.corrcoef(vox_avg[i,rels>r_thresh],
725
+ vox_avg[j,rels>r_thresh])[0,1]
726
+ # rdm is shape (4, 150, 150)
727
+
728
+
729
+ # In[36]:
730
+
731
+
732
+ thresh = .2
733
+ plt.figure(figsize=(4,4))
734
+ plt.imshow(rdm[np.where(r_thresholds==thresh)[0].item()], clim=(-1,1))
735
+ plt.colorbar(shrink=0.8)
736
+ plt.title(f"{sub}_{session}\nreliability threshold={thresh}\n")
737
+ plt.show()
738
+
739
+
740
+ # In[37]:
741
+
742
+
743
+ for thresh in range(rdm.shape[0]):
744
+ for img in range(rdm.shape[1]):
745
+ assert np.isclose(rdm[thresh, img, img], 1)
746
+
747
+
748
+ # In[38]:
749
+
750
+
751
+ vox.shape
752
+
753
+
754
+ # In[39]:
755
+
756
+
757
+ # Reliability thresholding?
758
+ print(f"\nvox before reliability thresholding: {vox.shape}")
759
+ vox = vox[:,rels>.2]
760
+ print(f"\nvox after reliability thresholding: {vox.shape}")
761
+
762
+
763
+ # In[40]:
764
+
765
+
766
+ print(images.shape)
767
+ print(vox.shape)
768
+ assert len(images) == len(vox)
769
+
770
+
771
+ # In[41]:
772
+
773
+
774
+ same_corrs = []
775
+ diff_corrs = []
776
+ for isamp, samp in enumerate(vox[pairs_homog]):
777
+ avg_same_img = []
778
+ for i in range(samp.shape[0]):
779
+ for j in range(i, samp.shape[0]):
780
+ if i != j:
781
+ avg_same_img.append(np.array([np.corrcoef(samp[i, :], samp[j, :])[0,1]]))
782
+
783
+ same_corrs.append(np.mean(avg_same_img))
784
+
785
+ avg_diff_img = []
786
+ for isamp_j, samp_j in enumerate(vox[pairs_homog]):
787
+ if isamp_j != isamp:
788
+ for i in range(samp_j.shape[0]):
789
+ for j in range(i, samp_j.shape[0]):
790
+ if i != j:
791
+ avg_diff_img.append(np.array([np.corrcoef(samp[i, :], samp_j[j, :])[0,1]]))
792
+
793
+ # print(len(avg_diff_img))
794
+ diff_corrs.append(np.mean(avg_diff_img))
795
+
796
+
797
+ print(len(same_corrs), len(diff_corrs))
798
+ same_corrs = np.array(same_corrs)
799
+ diff_corrs = np.array(diff_corrs)
800
+
801
+
802
+ plt.figure(figsize=(5,4))
803
+ plt.title(f"{sub}_{session} same/diff Pearson corr.")
804
+ plt.plot(np.sort(same_corrs),c='blue',label='same')
805
+ plt.plot(np.sort(diff_corrs),c='cyan',label='diff')
806
+ plt.axhline(0,c='k',ls='--')
807
+ plt.legend()
808
+ plt.xlabel("sample")
809
+ plt.ylabel("Pearson R")
810
+ plt.show()
811
+
812
+
813
+ # In[42]:
814
+
815
+
816
+ vox_pairs = utils.zscore(vox[pairs_homog])
817
+ plt.figure(figsize=(5,4))
818
+ plt.title(f"{sub}_{session} same minus diff difference Pearson corr.")
819
+ plt.plot(np.sort(same_corrs) - np.sort(diff_corrs),c='cyan',label='difference')
820
+ plt.axhline(0,c='k',ls='--')
821
+ plt.legend()
822
+ plt.xlabel("sample")
823
+ plt.ylabel("Pearson R")
824
+ plt.show()
825
+
826
+
827
+ # # Training MindEye
828
+
829
+ # In[43]:
830
+
831
+
832
+ utils.seed_everything(seed)
833
+
834
+ if train_test_split == 'orig':
835
+ # train = all images except images that were repeated
836
+ # test = average of the same-image presentations
837
+ imageTrain = np.arange(len(images))
838
+ train_image_indices = np.array([item for item in imageTrain if item not in pairs.flatten()])
839
+ test_image_indices = pairs
840
+ print(len(train_image_indices), len(test_image_indices))
841
+ assert len(train_image_indices) + len(test_image_indices) == len(image_idx)
842
+ elif train_test_split == 'MST':
843
+ # non-MST images are the train split
844
+ # MST images are the test split
845
+ MST_idx = np.array([v for k,v in image_to_indices.items() if 'MST_pairs' in k])
846
+ non_MST_idx = [v for k,v in image_to_indices.items() if 'MST_pairs' not in k]
847
+ non_MST_idx = np.array([z for y in non_MST_idx for x in y for z in x]) # flatten the indices
848
+ train_image_indices = non_MST_idx
849
+ test_image_indices = MST_idx.flatten() # MST_idx contains the mapping for the different test sets; test_image_indices has all MST indices combined
850
+ print(len(train_image_indices), len(test_image_indices))
851
+ assert len(train_image_indices) + len(test_image_indices) == len(vox)
852
+ elif train_test_split == 'unique':
853
+ imageTest = np.arange(len(images))
854
+ train_image_indices = pairs.flatten()
855
+ test_image_indices = np.array([item for item in imageTest if item not in pairs.flatten()])
856
+ print(len(train_image_indices), len(test_image_indices))
857
+ assert len(train_image_indices) + len(test_image_indices) == len(image_idx)
858
+ else:
859
+ raise Exception("invalid train_test_split")
860
+
861
+ # TODO add assertion that verifies file names in train and test don't overlap, guards against repeats
862
+
863
+ for i in train_image_indices:
864
+ assert i not in test_image_indices
865
+
866
+
867
+ # In[44]:
868
+
869
+
870
+ train_mean = np.mean(vox[train_image_indices],axis=0)
871
+ train_std = np.std(vox[train_image_indices],axis=0)
872
+
873
+ vox = utils.zscore(vox,train_mean=train_mean,train_std=train_std)
874
+ print("voxels have been zscored")
875
+ print(vox[:,0].mean(), vox[:,0].std())
876
+ print("vox", vox.shape)
877
+
878
+
879
+ # In[45]:
880
+
881
+
882
+ # for idx in deleted_indices:
883
+ # # check image names to be deleted match
884
+ # original_name = vox_image_dict[idx]
885
+ # matching_indices = [i for i in deleted_indices if vox_image_dict[i] == original_name]
886
+ # assert all(vox_image_dict[i] == original_name for i in matching_indices), \
887
+ # f"Mismatch in image names for deleted indices {matching_indices}"
888
+
889
+ # # check image data to be deleted match
890
+ # base_image = images[matching_indices[0]] # Reference image
891
+ # for i in matching_indices[1:]:
892
+ # assert np.array_equal(base_image, images[i]), \
893
+ # f"Mismatch in image data for {vox_image_dict[i]} at index {i}"
894
+
895
+ # images = images[kept_indices]
896
+
897
+
898
+ # In[46]:
899
+
900
+
901
+ images = torch.Tensor(images)
902
+ vox = torch.Tensor(vox)
903
+ assert len(images) == len(vox)
904
+
905
+
906
+ # In[47]:
907
+
908
+
909
+ ### Multi-GPU config ###
910
+ from accelerate import Accelerator, DeepSpeedPlugin
911
+
912
+ local_rank = os.getenv('RANK')
913
+ if local_rank is None:
914
+ local_rank = 0
915
+ else:
916
+ local_rank = int(local_rank)
917
+ print("LOCAL RANK ", local_rank)
918
+
919
+ data_type = torch.float32 # change depending on your mixed_precision
920
+
921
+ accelerator = Accelerator(split_batches=False)
922
+ batch_size = 8
923
+
924
+
925
+ # In[48]:
926
+
927
+
928
+ print("PID of this process =",os.getpid())
929
+ device = accelerator.device
930
+ print("device:",device)
931
+ world_size = accelerator.state.num_processes
932
+ distributed = not accelerator.state.distributed_type == 'NO'
933
+ num_devices = torch.cuda.device_count()
934
+ global_batch_size = batch_size * num_devices
935
+ print("global_batch_size", global_batch_size)
936
+ if num_devices==0 or not distributed: num_devices = 1
937
+ num_workers = num_devices
938
+ print(accelerator.state)
939
+
940
+ # set data_type to match your mixed precision (automatically set based on deepspeed config)
941
+ if accelerator.mixed_precision == "bf16":
942
+ data_type = torch.bfloat16
943
+ elif accelerator.mixed_precision == "fp16":
944
+ data_type = torch.float16
945
+ else:
946
+ data_type = torch.float32
947
+
948
+ print("distributed =",distributed, "num_devices =", num_devices, "local rank =", local_rank, "world size =", world_size, "data_type =", data_type)
949
+ print = accelerator.print # only print if local_rank=0
950
+
951
+
952
+ # ## Configurations
953
+
954
+ # In[49]:
955
+
956
+
957
+ # if running this interactively, can specify jupyter_args here for argparser to use
958
+ if utils.is_interactive():
959
+ model_name = 'testing_MST' # 'sub-001_multi_bs24_MST_rishab_MSTsplit_remove_150_random_seed_0'
960
+ print("model_name:", model_name)
961
+
962
+ # global_batch_size and batch_size should already be defined in the above cells
963
+ # other variables can be specified in the following string:
964
+ # jupyter_args = f"--data_path=/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2 --model_name={model_name}"
965
+
966
+ jupyter_args = f"--data_path=/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2 \
967
+ --model_name={model_name} \
968
+ --no-multi_subject --subj=1 --batch_size={batch_size} \
969
+ --hidden_dim=1024 --clip_scale=1. \
970
+ --no-blurry_recon --blur_scale=.5 \
971
+ --no-use_prior --prior_scale=30 \
972
+ --n_blocks=4 --max_lr=3e-4 --mixup_pct=.33 --num_epochs=30 --no-use_image_aug \
973
+ --ckpt_interval=999 --no-ckpt_saving --new_test \
974
+ --multisubject_ckpt=/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/train_logs/multisubject_subj01_1024hid_nolow_300ep"
975
+ print(jupyter_args)
976
+ jupyter_args = jupyter_args.split()
977
+
978
+
979
+ # In[50]:
980
+
981
+
982
+ parser = argparse.ArgumentParser(description="Model Training Configuration")
983
+ parser.add_argument(
984
+ "--model_name", type=str, default="testing",
985
+ help="name of model, used for ckpt saving and wandb logging (if enabled)",
986
+ )
987
+ parser.add_argument(
988
+ "--data_path", type=str, default="/weka/proj-fmri/shared/natural-scenes-dataset",
989
+ help="Path to where NSD data is stored / where to download it to",
990
+ )
991
+ parser.add_argument(
992
+ "--subj",type=int, default=1, choices=[1,2,3,4,5,6,7,8],
993
+ help="Validate on which subject?",
994
+ )
995
+ parser.add_argument(
996
+ "--multisubject_ckpt", type=str, default=None,
997
+ help="Path to pre-trained multisubject model to finetune a single subject from. multisubject must be False.",
998
+ )
999
+ parser.add_argument(
1000
+ "--num_sessions", type=int, default=0,
1001
+ help="Number of training sessions to include (if multi_subject, this variable doesnt matter)",
1002
+ )
1003
+ parser.add_argument(
1004
+ "--use_prior",action=argparse.BooleanOptionalAction,default=False,
1005
+ help="whether to train diffusion prior (True) or just rely on retrieval part of the pipeline (False)",
1006
+ )
1007
+ parser.add_argument(
1008
+ "--batch_size", type=int, default=32,
1009
+ help="Batch size can be increased by 10x if only training v2c and not diffusion diffuser",
1010
+ )
1011
+ parser.add_argument(
1012
+ "--wandb_log",action=argparse.BooleanOptionalAction,default=False,
1013
+ help="whether to log to wandb",
1014
+ )
1015
+ parser.add_argument(
1016
+ "--resume_from_ckpt",action=argparse.BooleanOptionalAction,default=False,
1017
+ help="if not using wandb and want to resume from a ckpt",
1018
+ )
1019
+ parser.add_argument(
1020
+ "--wandb_project",type=str,default="stability",
1021
+ help="wandb project name",
1022
+ )
1023
+ parser.add_argument(
1024
+ "--mixup_pct",type=float,default=.33,
1025
+ help="proportion of way through training when to switch from BiMixCo to SoftCLIP",
1026
+ )
1027
+ parser.add_argument(
1028
+ "--low_mem",action=argparse.BooleanOptionalAction,default=False,
1029
+ help="whether to preload images to cpu to speed things up but consume more memory",
1030
+ )
1031
+ parser.add_argument(
1032
+ "--blurry_recon",action=argparse.BooleanOptionalAction,default=True,
1033
+ help="whether to output blurry reconstructions",
1034
+ )
1035
+ parser.add_argument(
1036
+ "--blur_scale",type=float,default=.5,
1037
+ help="multiply loss from blurry recons by this number",
1038
+ )
1039
+ parser.add_argument(
1040
+ "--clip_scale",type=float,default=1.,
1041
+ help="multiply contrastive loss by this number",
1042
+ )
1043
+ parser.add_argument(
1044
+ "--prior_scale",type=float,default=30,
1045
+ help="multiply diffusion prior loss by this",
1046
+ )
1047
+ parser.add_argument(
1048
+ "--use_image_aug",action=argparse.BooleanOptionalAction,default=True,
1049
+ help="whether to use image augmentation",
1050
+ )
1051
+ parser.add_argument(
1052
+ "--num_epochs",type=int,default=120,
1053
+ help="number of epochs of training",
1054
+ )
1055
+ parser.add_argument(
1056
+ "--multi_subject",action=argparse.BooleanOptionalAction,default=False,
1057
+ )
1058
+ parser.add_argument(
1059
+ "--new_test",action=argparse.BooleanOptionalAction,default=True,
1060
+ )
1061
+ parser.add_argument(
1062
+ "--n_blocks",type=int,default=2,
1063
+ )
1064
+ parser.add_argument(
1065
+ "--hidden_dim",type=int,default=1024,
1066
+ )
1067
+ parser.add_argument(
1068
+ "--seq_past",type=int,default=0,
1069
+ )
1070
+ parser.add_argument(
1071
+ "--seq_future",type=int,default=0,
1072
+ )
1073
+ parser.add_argument(
1074
+ "--lr_scheduler_type",type=str,default='cycle',choices=['cycle','linear'],
1075
+ )
1076
+ parser.add_argument(
1077
+ "--ckpt_saving",action=argparse.BooleanOptionalAction,default=True,
1078
+ )
1079
+ parser.add_argument(
1080
+ "--ckpt_interval",type=int,default=5,
1081
+ help="save backup ckpt and reconstruct every x epochs",
1082
+ )
1083
+ parser.add_argument(
1084
+ "--seed",type=int,default=42,
1085
+ )
1086
+ parser.add_argument(
1087
+ "--max_lr",type=float,default=3e-4,
1088
+ )
1089
+
1090
+ if utils.is_interactive():
1091
+ args = parser.parse_args(jupyter_args)
1092
+ else:
1093
+ args = parser.parse_args()
1094
+
1095
+ # create global variables without the args prefix
1096
+ for attribute_name in vars(args).keys():
1097
+ globals()[attribute_name] = getattr(args, attribute_name)
1098
+
1099
+ outdir = os.path.abspath(f'/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/train_logs/{model_name}')
1100
+ if not os.path.exists(outdir) and ckpt_saving:
1101
+ os.makedirs(outdir,exist_ok=True)
1102
+
1103
+ if use_image_aug or blurry_recon:
1104
+ import kornia
1105
+ import kornia.augmentation as K
1106
+ from kornia.augmentation.container import AugmentationSequential
1107
+ if use_image_aug:
1108
+ img_augment = AugmentationSequential(
1109
+ kornia.augmentation.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.1, p=0.3),
1110
+ same_on_batch=False,
1111
+ data_keys=["input"],
1112
+ )
1113
+ # Define the blurring augmentations
1114
+ blur_augment = K.RandomGaussianBlur(kernel_size=(21, 21), sigma=(51.0, 51.0), p=1.)
1115
+
1116
+ if multi_subject:
1117
+ subj_list = np.arange(1,9)
1118
+ subj_list = subj_list[subj_list != subj]
1119
+ else:
1120
+ subj_list = [subj]
1121
+
1122
+ print("subj_list", subj_list, "num_sessions", num_sessions)
1123
+
1124
+
1125
+ # ## Prep data, models, and dataloaders
1126
+
1127
+ # In[51]:
1128
+
1129
+
1130
+ if ckpt_saving:
1131
+ # save MST_ID for 2-alternative forced-choice retrieval evaluation
1132
+ if 'MST' in model_name:
1133
+ eval_dir = os.environ["eval_dir"]
1134
+ print('saving MST info in', eval_dir)
1135
+ # Saving ##
1136
+ if not os.path.exists(eval_dir):
1137
+ os.mkdir(eval_dir)
1138
+
1139
+ np.save(f"{eval_dir}/MST_ID.npy", MST_ID)
1140
+ np.save(f"{eval_dir}/MST_pairmate_indices.npy", MST_pairmate_indices)
1141
+
1142
+ if remove_random_n:
1143
+ np.save(f"{eval_dir}/imgs_to_remove.npy", imgs_to_remove)
1144
+
1145
+ np.save(f"{eval_dir}/train_image_indices.npy", train_image_indices)
1146
+ np.save(f"{eval_dir}/test_image_indices.npy", test_image_indices)
1147
+ np.save(f"{eval_dir}/images.npy", images)
1148
+ np.save(f"{eval_dir}/vox.npy", vox)
1149
+
1150
+
1151
+ # ### Creating wds dataloader, preload betas and all 73k possible images
1152
+
1153
+ # In[52]:
1154
+
1155
+
1156
+ def my_split_by_node(urls): return urls
1157
+ num_voxels_list = []
1158
+
1159
+ if multi_subject:
1160
+ nsessions_allsubj=np.array([40, 40, 32, 30, 40, 32, 40, 30])
1161
+ num_samples_per_epoch = (750*40) // num_devices
1162
+ else:
1163
+ # num_samples_per_epoch = (750*num_sessions) // num_devices
1164
+ num_samples_per_epoch = len(train_image_indices)
1165
+
1166
+ print("dividing batch size by subj_list, which will then be concatenated across subj during training...")
1167
+ batch_size = batch_size // len(subj_list)
1168
+
1169
+ num_iterations_per_epoch = num_samples_per_epoch // (batch_size*len(subj_list))
1170
+
1171
+ print("batch_size =", batch_size, "num_iterations_per_epoch =",num_iterations_per_epoch, "num_samples_per_epoch =",num_samples_per_epoch)
1172
+
1173
+
1174
+ # In[53]:
1175
+
1176
+
1177
+ train_data = {}
1178
+ train_dl = {}
1179
+
1180
+ train_data[f'subj0{subj}'] = torch.utils.data.TensorDataset(torch.tensor(train_image_indices))
1181
+ test_data = torch.utils.data.TensorDataset(torch.tensor(test_image_indices))
1182
+
1183
+
1184
+ # In[54]:
1185
+
1186
+
1187
+ num_voxels = {}
1188
+ voxels = {}
1189
+ for s in subj_list:
1190
+ print(f"Training with {num_sessions} sessions")
1191
+ train_dl = torch.utils.data.DataLoader(train_data[f'subj0{s}'], batch_size=batch_size, shuffle=True, drop_last=True, pin_memory=True)
1192
+
1193
+ num_voxels_list.append(vox[0].shape[-1])
1194
+ num_voxels[f'subj0{s}'] = vox[0].shape[-1]
1195
+ voxels[f'subj0{s}'] = vox
1196
+ print(f"num_voxels for subj0{s}: {num_voxels[f'subj0{s}']}")
1197
+
1198
+ print("Loaded all subj train dls and vox!\n")
1199
+
1200
+ # Validate only on one subject
1201
+ if multi_subject:
1202
+ subj = subj_list[0] # cant validate on the actual held out person so picking first in subj_list
1203
+ test_dl = torch.utils.data.DataLoader(test_data, batch_size=24, shuffle=False, drop_last=True, pin_memory=True)
1204
+
1205
+ print(f"Loaded test dl for subj{subj}!\n")
1206
+
1207
+
1208
+ # ## Load models
1209
+
1210
+ # ### CLIP image embeddings model
1211
+
1212
+ # In[55]:
1213
+
1214
+
1215
+ ## USING OpenCLIP ViT-bigG ###
1216
+ sys.path.append('generative_models/')
1217
+ import sgm
1218
+ from generative_models.sgm.modules.encoders.modules import FrozenOpenCLIPImageEmbedder
1219
+ # from generative_models.sgm.models.diffusion import DiffusionEngine
1220
+ # from omegaconf import OmegaConf
1221
+
1222
+ try:
1223
+ print(clip_img_embedder)
1224
+ except:
1225
+ clip_img_embedder = FrozenOpenCLIPImageEmbedder(
1226
+ arch="ViT-bigG-14",
1227
+ version="laion2b_s39b_b160k",
1228
+ output_tokens=True,
1229
+ only_tokens=True,
1230
+ )
1231
+ clip_img_embedder.to(device)
1232
+ clip_seq_dim = 256
1233
+ clip_emb_dim = 1664
1234
+
1235
+ # ## USING OPEN AI CLIP ViT-L ###
1236
+ # import clip
1237
+ # try:
1238
+ # print(clip_model)
1239
+ # except:
1240
+ # clip_model, preprocess = clip.load("ViT-L/14", device=device)
1241
+ # preprocess = transforms.Compose([
1242
+ # transforms.Resize(224, interpolation=transforms.InterpolationMode.BILINEAR),
1243
+ # transforms.Normalize(mean=[0.48145466, 0.4578275, 0.40821073],
1244
+ # std=[0.26862954, 0.26130258, 0.27577711]),
1245
+ # ])
1246
+ # def clip_img_embedder(image):
1247
+ # preproc_img = preprocess(image)
1248
+ # return clip_model.encode_image(preproc_img)
1249
+ # clip_seq_dim = 1
1250
+ # clip_emb_dim = 768
1251
+
1252
+
1253
+ # ### MindEye modules
1254
+
1255
+ # In[56]:
1256
+
1257
+
1258
+ model = utils.prepare_model_and_training(
1259
+ num_voxels_list=num_voxels_list,
1260
+ n_blocks=n_blocks,
1261
+ hidden_dim=hidden_dim,
1262
+ clip_emb_dim=clip_emb_dim,
1263
+ clip_seq_dim=clip_seq_dim,
1264
+ use_prior=use_prior,
1265
+ clip_scale=clip_scale
1266
+ )
1267
+
1268
+
1269
+ # In[57]:
1270
+
1271
+
1272
+ # test on subject 1 with fake data
1273
+ b = torch.randn((2,1,num_voxels_list[0]))
1274
+ print(b.shape, model.ridge(b,0).shape)
1275
+
1276
+
1277
+ # In[58]:
1278
+
1279
+
1280
+ # test that the model works on some fake data
1281
+ b = torch.randn((2,1,hidden_dim))
1282
+ print("b.shape",b.shape)
1283
+
1284
+ backbone_, clip_, blur_ = model.backbone(b)
1285
+ print(backbone_.shape, clip_.shape, blur_[0].shape, blur_[1].shape)
1286
+
1287
+
1288
+ # ### Adding diffusion prior + unCLIP if use_prior=True
1289
+
1290
+ # In[59]:
1291
+
1292
+
1293
+ if use_prior:
1294
+ from models import *
1295
+
1296
+ # setup diffusion prior network
1297
+ out_dim = clip_emb_dim
1298
+ depth = 6
1299
+ dim_head = 52
1300
+ heads = clip_emb_dim//52 # heads * dim_head = clip_emb_dim
1301
+ timesteps = 100
1302
+
1303
+ prior_network = VersatileDiffusionPriorNetwork(
1304
+ dim=out_dim,
1305
+ depth=depth,
1306
+ dim_head=dim_head,
1307
+ heads=heads,
1308
+ causal=False,
1309
+ num_tokens = clip_seq_dim,
1310
+ learned_query_mode="pos_emb"
1311
+ )
1312
+
1313
+ model.diffusion_prior = BrainDiffusionPrior(
1314
+ net=prior_network,
1315
+ image_embed_dim=out_dim,
1316
+ condition_on_text_encodings=False,
1317
+ timesteps=timesteps,
1318
+ cond_drop_prob=0.2,
1319
+ image_embed_scale=None,
1320
+ )
1321
+
1322
+ utils.count_params(model.diffusion_prior)
1323
+ utils.count_params(model)
1324
+
1325
+
1326
+ # ### Setup optimizer / lr / ckpt saving
1327
+
1328
+ # In[60]:
1329
+
1330
+
1331
+ no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
1332
+
1333
+ opt_grouped_parameters = [
1334
+ {'params': [p for n, p in model.ridge.named_parameters()], 'weight_decay': 1e-2},
1335
+ {'params': [p for n, p in model.backbone.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': 1e-2},
1336
+ {'params': [p for n, p in model.backbone.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0},
1337
+ ]
1338
+ # model.backbone.requires_grad_(False)
1339
+
1340
+ if use_prior:
1341
+ opt_grouped_parameters.extend([
1342
+ {'params': [p for n, p in model.diffusion_prior.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': 1e-2},
1343
+ {'params': [p for n, p in model.diffusion_prior.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
1344
+ ])
1345
+
1346
+ optimizer = torch.optim.AdamW(opt_grouped_parameters, lr=max_lr)
1347
+
1348
+ if lr_scheduler_type == 'linear':
1349
+ lr_scheduler = torch.optim.lr_scheduler.LinearLR(
1350
+ optimizer,
1351
+ total_iters=int(np.floor(num_epochs*num_iterations_per_epoch)),
1352
+ last_epoch=-1
1353
+ )
1354
+ elif lr_scheduler_type == 'cycle':
1355
+ if num_iterations_per_epoch==0:
1356
+ num_iterations_per_epoch=1
1357
+ total_steps=int(np.floor(num_epochs*num_iterations_per_epoch))
1358
+ print("total_steps", total_steps)
1359
+ lr_scheduler = torch.optim.lr_scheduler.OneCycleLR(
1360
+ optimizer,
1361
+ max_lr=max_lr,
1362
+ total_steps=total_steps,
1363
+ final_div_factor=1000,
1364
+ last_epoch=-1, pct_start=2/num_epochs
1365
+ )
1366
+
1367
+ def save_ckpt(tag):
1368
+ ckpt_path = outdir+f'/{tag}.pth'
1369
+ if accelerator.is_main_process:
1370
+ unwrapped_model = accelerator.unwrap_model(model)
1371
+ torch.save({
1372
+ 'epoch': epoch,
1373
+ 'model_state_dict': unwrapped_model.state_dict(),
1374
+ 'optimizer_state_dict': optimizer.state_dict(),
1375
+ 'lr_scheduler': lr_scheduler.state_dict(),
1376
+ 'train_losses': losses,
1377
+ 'test_losses': test_losses,
1378
+ 'lrs': lrs,
1379
+ }, ckpt_path)
1380
+ print(f"\n---saved {outdir}/{tag} ckpt!---\n")
1381
+
1382
+ def load_ckpt(tag,load_lr=True,load_optimizer=True,load_epoch=True,strict=True,outdir=outdir,multisubj_loading=False):
1383
+ print(f"\n---loading {outdir}/{tag}.pth ckpt---\n")
1384
+ checkpoint = torch.load(outdir+'/last.pth', map_location='cpu')
1385
+ state_dict = checkpoint['model_state_dict']
1386
+ if multisubj_loading: # remove incompatible ridge layer that will otherwise error
1387
+ state_dict.pop('ridge.linears.0.weight',None)
1388
+ model.load_state_dict(state_dict, strict=strict)
1389
+ if load_epoch:
1390
+ globals()["epoch"] = checkpoint['epoch']
1391
+ print("Epoch",epoch)
1392
+ if load_optimizer:
1393
+ optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
1394
+ if load_lr:
1395
+ lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
1396
+ del checkpoint
1397
+
1398
+ print("\nDone with model preparations!")
1399
+ num_params = utils.count_params(model)
1400
+
1401
+
1402
+ # # Wandb
1403
+
1404
+ # In[61]:
1405
+
1406
+
1407
+ if local_rank==0 and wandb_log: # only use main process for wandb logging
1408
+ import wandb
1409
+ import time
1410
+
1411
+ wandb_project = 'rtmindeye'
1412
+ print(f"wandb {wandb_project} run {model_name}")
1413
+
1414
+ # Need to configure wandb beforehand in terminal with "wandb init"!
1415
+ wandb_config = {
1416
+ "model_name": model_name,
1417
+ "global_batch_size": global_batch_size,
1418
+ "batch_size": batch_size,
1419
+ "num_epochs": num_epochs,
1420
+ "num_sessions": num_sessions,
1421
+ "num_params": num_params,
1422
+ "clip_scale": clip_scale,
1423
+ "prior_scale": prior_scale,
1424
+ "blur_scale": blur_scale,
1425
+ "use_image_aug": use_image_aug,
1426
+ "max_lr": max_lr,
1427
+ "mixup_pct": mixup_pct,
1428
+ "num_samples_per_epoch": num_samples_per_epoch,
1429
+ "ckpt_interval": ckpt_interval,
1430
+ "ckpt_saving": ckpt_saving,
1431
+ "seed": seed, # SLURM array task ID
1432
+ "distributed": distributed,
1433
+ "num_devices": num_devices,
1434
+ "world_size": world_size,
1435
+ }
1436
+ print("wandb_config:\n", wandb_config)
1437
+ print("wandb_id:", model_name)
1438
+
1439
+ # Initialize wandb
1440
+ wandb.init(
1441
+ id=model_name,
1442
+ project=wandb_project,
1443
+ name=model_name,
1444
+ config=wandb_config,
1445
+ resume="allow",
1446
+ save_code=True,
1447
+ )
1448
+
1449
+ # Get SLURM job & array ID
1450
+ slurm_job_id = utils.get_slurm_job()
1451
+ slurm_array_id = seed # seed corresponds to SLURM_ARRAY_TASK_ID
1452
+
1453
+ # Define SLURM log paths
1454
+ log_dir = "slurms"
1455
+ log_files = [
1456
+ f"{log_dir}/{slurm_job_id}_{slurm_array_id}.out",
1457
+ f"{log_dir}/{slurm_job_id}_{slurm_array_id}.err",
1458
+ ]
1459
+
1460
+ # Ensure logs exist before logging them
1461
+ for log_file in log_files:
1462
+ wait_time = 0
1463
+ while not os.path.exists(log_file) and wait_time < 60: # Wait max 60s
1464
+ time.sleep(5)
1465
+ wait_time += 5
1466
+
1467
+ # Log SLURM logs as artifacts
1468
+ artifact = wandb.Artifact(f"slurm_logs_{slurm_job_id}_{slurm_array_id}", type="logs")
1469
+ for log_file in log_files:
1470
+ if os.path.exists(log_file):
1471
+ artifact.add_file(log_file)
1472
+
1473
+ wandb.log_artifact(artifact)
1474
+ else:
1475
+ wandb_log = False
1476
+
1477
+
1478
+ # # Train the model
1479
+
1480
+ # In[62]:
1481
+
1482
+
1483
+ epoch = 0
1484
+ losses, test_losses, lrs = [], [], []
1485
+ best_test_loss = 1e9
1486
+ torch.cuda.empty_cache()
1487
+
1488
+
1489
+ # In[63]:
1490
+
1491
+
1492
+ # load multisubject stage1 ckpt if set
1493
+ if multisubject_ckpt is not None and not resume_from_ckpt:
1494
+ load_ckpt("last",outdir=multisubject_ckpt,load_lr=False,load_optimizer=False,load_epoch=False,strict=False,multisubj_loading=True)
1495
+
1496
+
1497
+ # In[64]:
1498
+
1499
+
1500
+ # checkpoint = torch.load(multisubject_ckpt+'/last.pth', map_location='cpu')
1501
+ # state_dict = checkpoint['model_state_dict']
1502
+ # model.load_state_dict(state_dict, strict=False)
1503
+
1504
+
1505
+ # In[65]:
1506
+
1507
+
1508
+ # train_dls = [train_dl[f'subj0{s}'] for s in subj_list]
1509
+
1510
+ model, optimizer, train_dl, lr_scheduler = accelerator.prepare(model, optimizer, train_dl, lr_scheduler)
1511
+ # leaving out test_dl since we will only have local_rank 0 device do evals
1512
+
1513
+
1514
+ # In[ ]:
1515
+
1516
+
1517
+ print(f"{model_name} starting with epoch {epoch} / {num_epochs}")
1518
+ progress_bar = tqdm(range(epoch,num_epochs), ncols=1200, disable=(local_rank!=0))
1519
+ test_image, test_voxel = None, None
1520
+ mse = nn.MSELoss()
1521
+ l1 = nn.L1Loss()
1522
+ soft_loss_temps = utils.cosine_anneal(0.004, 0.0075, num_epochs - int(mixup_pct * num_epochs))
1523
+ skip_train = True if epoch>=(num_epochs-1) else False # skip training if you are resuming from a fully trained model
1524
+
1525
+ for epoch in progress_bar:
1526
+ model.train()
1527
+
1528
+ fwd_percent_correct = 0.
1529
+ bwd_percent_correct = 0.
1530
+ test_fwd_percent_correct = 0.
1531
+ test_bwd_percent_correct = 0.
1532
+
1533
+ recon_cossim = 0.
1534
+ test_recon_cossim = 0.
1535
+ recon_mse = 0.
1536
+ test_recon_mse = 0.
1537
+
1538
+ loss_clip_total = 0.
1539
+ loss_blurry_total = 0.
1540
+ loss_blurry_cont_total = 0.
1541
+ test_loss_clip_total = 0.
1542
+
1543
+ loss_prior_total = 0.
1544
+ test_loss_prior_total = 0.
1545
+
1546
+ blurry_pixcorr = 0.
1547
+ test_blurry_pixcorr = 0.
1548
+
1549
+ # you now have voxel_iters and image_iters with num_iterations_per_epoch batches each
1550
+ for train_i, behav in enumerate(train_dl):
1551
+ with torch.cuda.amp.autocast(dtype=data_type):
1552
+ optimizer.zero_grad()
1553
+ loss = 0.
1554
+
1555
+ behav = behav[0]
1556
+
1557
+ image = images[behav.long().cpu()].to(device)
1558
+ voxel = vox[behav.long().cpu()]
1559
+ # voxel = (voxel - train_mean) / train_std
1560
+ voxel = torch.Tensor(voxel).unsqueeze(1).to(device)
1561
+
1562
+ if use_image_aug:
1563
+ image = img_augment(image)
1564
+
1565
+ clip_target = clip_img_embedder(image)
1566
+ assert not torch.any(torch.isnan(clip_target))
1567
+
1568
+ if epoch < int(mixup_pct * num_epochs):
1569
+ voxel, perm, betas, select = utils.mixco(voxel)
1570
+
1571
+ voxel_ridge = model.ridge(voxel,0) #[model.ridge(voxel_list[si],si) for si,s in enumerate(subj_list)]
1572
+ # voxel_ridge = torch.cat(voxel_ridge_list, dim=0)
1573
+
1574
+ backbone, clip_voxels, blurry_image_enc_ = model.backbone(voxel_ridge)
1575
+
1576
+ if clip_scale>0:
1577
+ clip_voxels_norm = nn.functional.normalize(clip_voxels.flatten(1), dim=-1)
1578
+ clip_target_norm = nn.functional.normalize(clip_target.flatten(1), dim=-1)
1579
+
1580
+ if use_prior:
1581
+ loss_prior, prior_out = model.diffusion_prior(text_embed=backbone, image_embed=clip_target)
1582
+ loss_prior_total += loss_prior.item()
1583
+ loss_prior *= prior_scale
1584
+ loss += loss_prior
1585
+
1586
+ recon_cossim += nn.functional.cosine_similarity(prior_out, clip_target).mean().item()
1587
+ recon_mse += mse(prior_out, clip_target).item()
1588
+
1589
+ if clip_scale>0:
1590
+ if epoch < int(mixup_pct * num_epochs):
1591
+ loss_clip = utils.mixco_nce(
1592
+ clip_voxels_norm,
1593
+ clip_target_norm,
1594
+ temp=.006,
1595
+ perm=perm, betas=betas, select=select)
1596
+ else:
1597
+ epoch_temp = soft_loss_temps[epoch-int(mixup_pct*num_epochs)]
1598
+ loss_clip = utils.soft_clip_loss(
1599
+ clip_voxels_norm,
1600
+ clip_target_norm,
1601
+ temp=epoch_temp)
1602
+
1603
+ loss_clip_total += loss_clip.item()
1604
+ loss_clip *= clip_scale
1605
+ loss += loss_clip
1606
+
1607
+ if blurry_recon:
1608
+ image_enc_pred, transformer_feats = blurry_image_enc_
1609
+
1610
+ image_enc = autoenc.encode(2*image-1).latent_dist.mode() * 0.18215
1611
+ loss_blurry = l1(image_enc_pred, image_enc)
1612
+ loss_blurry_total += loss_blurry.item()
1613
+
1614
+ if epoch < int(mixup_pct * num_epochs):
1615
+ image_enc_shuf = image_enc[perm]
1616
+ betas_shape = [-1] + [1]*(len(image_enc.shape)-1)
1617
+ image_enc[select] = image_enc[select] * betas[select].reshape(*betas_shape) + \
1618
+ image_enc_shuf[select] * (1 - betas[select]).reshape(*betas_shape)
1619
+
1620
+ image_norm = (image - mean)/std
1621
+ image_aug = (blur_augs(image) - mean)/std
1622
+ _, cnx_embeds = cnx(image_norm)
1623
+ _, cnx_aug_embeds = cnx(image_aug)
1624
+
1625
+ cont_loss = utils.soft_cont_loss(
1626
+ nn.functional.normalize(transformer_feats.reshape(-1, transformer_feats.shape[-1]), dim=-1),
1627
+ nn.functional.normalize(cnx_embeds.reshape(-1, cnx_embeds.shape[-1]), dim=-1),
1628
+ nn.functional.normalize(cnx_aug_embeds.reshape(-1, cnx_embeds.shape[-1]), dim=-1),
1629
+ temp=0.2)
1630
+ loss_blurry_cont_total += cont_loss.item()
1631
+
1632
+ loss += (loss_blurry + 0.1*cont_loss) * blur_scale #/.18215
1633
+
1634
+ if clip_scale>0:
1635
+ # forward and backward top 1 accuracy
1636
+ labels = torch.arange(len(clip_voxels_norm)).to(clip_voxels_norm.device)
1637
+ fwd_percent_correct += utils.topk(utils.batchwise_cosine_similarity(clip_voxels_norm, clip_target_norm), labels, k=1).item()
1638
+ bwd_percent_correct += utils.topk(utils.batchwise_cosine_similarity(clip_target_norm, clip_voxels_norm), labels, k=1).item()
1639
+
1640
+ if blurry_recon:
1641
+ with torch.no_grad():
1642
+ # only doing pixcorr eval on a subset of the samples per batch because its costly & slow to compute autoenc.decode()
1643
+ random_samps = np.random.choice(np.arange(len(image)), size=len(image)//5, replace=False)
1644
+ blurry_recon_images = (autoenc.decode(image_enc_pred[random_samps]/0.18215).sample/ 2 + 0.5).clamp(0,1)
1645
+ pixcorr = utils.pixcorr(image[random_samps], blurry_recon_images)
1646
+ blurry_pixcorr += pixcorr.item()
1647
+
1648
+ utils.check_loss(loss)
1649
+ accelerator.backward(loss)
1650
+ optimizer.step()
1651
+
1652
+ losses.append(loss.item())
1653
+ lrs.append(optimizer.param_groups[0]['lr'])
1654
+
1655
+ if lr_scheduler_type is not None:
1656
+ lr_scheduler.step()
1657
+
1658
+ if train_i >= num_iterations_per_epoch-1:
1659
+ break
1660
+
1661
+ model.eval()
1662
+ logs = {}
1663
+
1664
+ if local_rank == 0:
1665
+ with torch.no_grad(), torch.cuda.amp.autocast(dtype=data_type):
1666
+ for i in range(1):
1667
+ for j in range(2):
1668
+ subset_indices = MST_idx[:, i, j].reshape(-1)
1669
+ subset_dataset = torch.utils.data.TensorDataset(torch.tensor(subset_indices))
1670
+ subset_dl = torch.utils.data.DataLoader(
1671
+ subset_dataset, batch_size=len(MST_idx), shuffle=False,
1672
+ drop_last=False, pin_memory=True
1673
+ )
1674
+
1675
+ # Reset metrics for this subset
1676
+ test_losses = []
1677
+ test_loss_clip_total = 0
1678
+ test_loss_prior_total = 0
1679
+ test_blurry_pixcorr = 0
1680
+ test_fwd_percent_correct = 0
1681
+ test_bwd_percent_correct = 0
1682
+ test_recon_cossim = 0
1683
+ test_recon_mse = 0
1684
+
1685
+ for test_i, behav in enumerate(subset_dl):
1686
+ behav = behav[0]
1687
+ loss = 0.
1688
+
1689
+ if behav.ndim > 1:
1690
+ image = images[behav[:, 0].long().cpu()].to(device)
1691
+ voxel = vox[behav.long().cpu()].mean(1)
1692
+ else:
1693
+ image = images[behav.long().cpu()].to(device)
1694
+ voxel = vox[behav.long().cpu()]
1695
+
1696
+ voxel = torch.Tensor(voxel).unsqueeze(1).to(device)
1697
+
1698
+ clip_img_embedder = clip_img_embedder.to(device)
1699
+ clip_target = clip_img_embedder(image.float())
1700
+
1701
+ voxel_ridge = model.ridge(voxel, 0)
1702
+ backbone, clip_voxels, blurry_image_enc_ = model.backbone(voxel_ridge)
1703
+
1704
+ if clip_scale > 0:
1705
+ clip_voxels_norm = nn.functional.normalize(clip_voxels.flatten(1), dim=-1)
1706
+ clip_target_norm = nn.functional.normalize(clip_target.flatten(1), dim=-1)
1707
+
1708
+ random_samps = np.random.choice(np.arange(len(image)), size=len(image) // 5, replace=False)
1709
+
1710
+ if use_prior:
1711
+ loss_prior, contaminated_prior_out = model.diffusion_prior(
1712
+ text_embed=backbone[random_samps], image_embed=clip_target[random_samps])
1713
+ test_loss_prior_total += loss_prior.item()
1714
+ loss_prior *= prior_scale
1715
+ loss += loss_prior
1716
+
1717
+ if clip_scale > 0:
1718
+ loss_clip = utils.soft_clip_loss(
1719
+ clip_voxels_norm,
1720
+ clip_target_norm,
1721
+ temp=0.006
1722
+ )
1723
+ test_loss_clip_total += loss_clip.item()
1724
+ loss_clip *= clip_scale
1725
+ loss += loss_clip
1726
+
1727
+ if blurry_recon:
1728
+ image_enc_pred, _ = blurry_image_enc_
1729
+ blurry_recon_images = (autoenc.decode(image_enc_pred[random_samps] / 0.18215).sample / 2 + 0.5).clamp(0, 1)
1730
+ pixcorr = utils.pixcorr(image[random_samps], blurry_recon_images)
1731
+ test_blurry_pixcorr += pixcorr.item()
1732
+
1733
+ if clip_scale > 0:
1734
+ labels = torch.arange(len(clip_voxels_norm)).to(clip_voxels_norm.device)
1735
+ test_fwd_percent_correct += utils.topk(utils.batchwise_cosine_similarity(clip_voxels_norm, clip_target_norm), labels, k=1).item()
1736
+ test_bwd_percent_correct += utils.topk(utils.batchwise_cosine_similarity(clip_target_norm, clip_voxels_norm), labels, k=1).item()
1737
+
1738
+ utils.check_loss(loss)
1739
+ test_losses.append(loss.item())
1740
+
1741
+ logs.update({
1742
+ f"subset_{i}_{j}_test/loss": np.mean(test_losses),
1743
+ f"subset_{i}_{j}_test/loss_clip_total": test_loss_clip_total / (test_i + 1),
1744
+ f"subset_{i}_{j}_test/loss_prior": test_loss_prior_total / (test_i + 1),
1745
+ f"subset_{i}_{j}_test/blurry_pixcorr": test_blurry_pixcorr / (test_i + 1),
1746
+ f"subset_{i}_{j}_test/fwd_pct_correct": test_fwd_percent_correct / (test_i + 1),
1747
+ f"subset_{i}_{j}_test/bwd_pct_correct": test_bwd_percent_correct / (test_i + 1),
1748
+ })
1749
+ print(f"--- Subset ({i},{j}) ---")
1750
+ for k, v in logs.items():
1751
+ if f"subset_{i}_{j}" in k:
1752
+ print(f"{k}: {v:.4f}")
1753
+
1754
+ # After subset loop: add train (and global test, if you want) metrics
1755
+ logs.update({
1756
+ "train/loss": np.mean(losses[-(train_i+1):]),
1757
+ "train/lr": lrs[-1],
1758
+ "train/num_steps": len(losses),
1759
+ "train/fwd_pct_correct": fwd_percent_correct / (train_i + 1),
1760
+ "train/bwd_pct_correct": bwd_percent_correct / (train_i + 1),
1761
+ "train/loss_clip_total": loss_clip_total / (train_i + 1),
1762
+ "train/loss_blurry_total": loss_blurry_total / (train_i + 1),
1763
+ "train/loss_blurry_cont_total": loss_blurry_cont_total / (train_i + 1),
1764
+ "train/blurry_pixcorr": blurry_pixcorr / (train_i + 1),
1765
+ "train/recon_cossim": recon_cossim / (train_i + 1),
1766
+ "train/recon_mse": recon_mse / (train_i + 1),
1767
+ "train/loss_prior": loss_prior_total / (train_i + 1),
1768
+ })
1769
+
1770
+
1771
+ # if finished training, save jpg recons if they exist
1772
+ if (epoch == num_epochs-1) or (epoch % ckpt_interval == 0):
1773
+ if blurry_recon:
1774
+ image_enc = autoenc.encode(2*image[:4]-1).latent_dist.mode() * 0.18215
1775
+ # transform blurry recon latents to images and plot it
1776
+ fig, axes = plt.subplots(1, 8, figsize=(10, 4))
1777
+ jj=-1
1778
+ for j in [0,1,2,3]:
1779
+ jj+=1
1780
+ axes[jj].imshow(utils.torch_to_Image((autoenc.decode(image_enc[[j]]/0.18215).sample / 2 + 0.5).clamp(0,1)))
1781
+ axes[jj].axis('off')
1782
+ jj+=1
1783
+ axes[jj].imshow(utils.torch_to_Image((autoenc.decode(image_enc_pred[[j]]/0.18215).sample / 2 + 0.5).clamp(0,1)))
1784
+ axes[jj].axis('off')
1785
+ plt.show()
1786
+
1787
+ progress_bar.set_postfix(**logs)
1788
+
1789
+ if wandb_log: wandb.log(logs)
1790
+
1791
+ # Save model checkpoint and reconstruct
1792
+ if (ckpt_saving) and (epoch % ckpt_interval == 0):
1793
+ save_ckpt(f'last')
1794
+
1795
+ # wait for other GPUs to catch up if needed
1796
+ accelerator.wait_for_everyone()
1797
+ torch.cuda.empty_cache()
1798
+
1799
+ print("\n===Finished!===\n")
1800
+ if ckpt_saving:
1801
+ save_ckpt(f'last')
1802
+
1803
+
1804
+ # In[68]:
1805
+
1806
+
1807
+ len(test_data)
1808
+
1809
+
1810
+ # In[69]:
1811
+
1812
+
1813
+ # # Track metrics here:
1814
+ # https://docs.google.com/spreadsheets/d/1-dbmr4ovl2-4-MFNAL1DqLS651KM_ihjDkkUeP1kHXs/edit?gid=1494588999#gid=1494588999
1815
+
1816
+
1817
+ # **To tell if the model is working I'm looking at test_bwd/fwd_pct_correct and seeing if that is doing better than chance (1/batch_size)**
1818
+
1819
+ # In[70]:
1820
+
1821
+
1822
+ # MST_pairmate_names
1823
+
1824
+
1825
+ # In[71]:
1826
+
1827
+
1828
+ x = [im for im in image_names if str(im) not in ('blank.jpg', 'nan')]
1829
+ assert len(image_idx) == len(x)
1830
+ pairs = np.empty(shape=MST_pairmate_names.shape, dtype=int)
1831
+ for i, p in enumerate(MST_pairmate_names):
1832
+ assert p[0] != p[1] # no duplicate images
1833
+ pairs[i,0] = x.index(p[0])
1834
+ pairs[i,1] = x.index(p[1])
1835
+
1836
+ # print(pairs)
1837
+
1838
+
1839
+ # In[72]:
1840
+
1841
+
1842
+ # if sub=="sub-002":
1843
+ # unique_images_pairs = [
1844
+ # (2,3),(4,5),(7,8),(15,16),
1845
+ # (483, 484), (485, 486), (487, 488), (491, 492), (495, 496), (499, 500), (501, 502),
1846
+ # (503, 504), (512, 513),
1847
+ # ]
1848
+ # elif sub != 'sub-001' and session != 'ses-05':
1849
+ # unique_images_pairs = [
1850
+ # (1,2),(3,4),(5,6),(7,8),(9,10),(11,12),(13,14),(15,16),
1851
+ # (17,18),(19,20),(21,22),(23,24),(25,26),(27,28),(29,30),
1852
+ # (31,32),(33,34),(35,36),
1853
+ # (787, 788), (789, 790), (791, 792), (793, 794), (795, 796),
1854
+ # (797, 798), (799, 800), (801, 802), (803, 804), (805, 806),
1855
+ # (807, 808), (809, 810), (811, 812), (813, 814), (815, 816),
1856
+ # (817, 818), (819, 820), (821, 822), (823, 824), (825, 826),
1857
+ # (827, 828), (829, 830), (831, 832), (833, 834), (835, 836),
1858
+ # (837, 838), (839, 840), (841, 842), (843, 844), (845, 846),
1859
+ # (847, 848), (849, 850)
1860
+ # ]
1861
+ # else:
1862
+ # # unique_images = unique_images[unique_images!='blank.jpg'][:50]
1863
+ # unique_images_pairs = find_mst_pairs(x)
1864
+ # # unique_images[unique_images_pairs]
1865
+
1866
+
1867
+ # In[73]:
1868
+
1869
+
1870
+ def evaluate_mst_pairs(mst_pairs):
1871
+ score = 0
1872
+ total = 0
1873
+
1874
+ with torch.no_grad(), torch.cuda.amp.autocast(dtype=data_type):
1875
+ for pair in mst_pairs:
1876
+ voxel = vox[image_idx[pair[0]]].to(device)[None]
1877
+ voxel = torch.Tensor(voxel).unsqueeze(1).to(device)
1878
+
1879
+ imageA = images[image_idx[pair[0]]].to(device)[None]
1880
+ imageB = images[image_idx[pair[1]]].to(device)[None]
1881
+
1882
+ clip_targetA = clip_img_embedder(imageA.float())
1883
+ clip_targetB = clip_img_embedder(imageB.float())
1884
+
1885
+ voxel_ridge = model.ridge(voxel,0)
1886
+ backbone, clip_voxels, _ = model.backbone(voxel_ridge)
1887
+
1888
+ clip_voxels_norm = nn.functional.normalize(clip_voxels.flatten(1), dim=-1)
1889
+ clip_targetA_norm = nn.functional.normalize(clip_targetA.flatten(1), dim=-1)
1890
+ clip_targetB_norm = nn.functional.normalize(clip_targetB.flatten(1), dim=-1)
1891
+
1892
+ if utils.batchwise_cosine_similarity(clip_voxels_norm, clip_targetA_norm) > utils.batchwise_cosine_similarity(clip_voxels_norm, clip_targetB_norm):
1893
+ score += 1
1894
+ total += 1
1895
+
1896
+ voxel = vox[image_idx[pair[1]]].to(device)[None]
1897
+ voxel = torch.Tensor(voxel).unsqueeze(1).to(device)
1898
+
1899
+ voxel_ridge = model.ridge(voxel,0)
1900
+ backbone, clip_voxels, _ = model.backbone(voxel_ridge)
1901
+ clip_voxels_norm = nn.functional.normalize(clip_voxels.flatten(1), dim=-1)
1902
+
1903
+ if utils.batchwise_cosine_similarity(clip_voxels_norm, clip_targetB_norm) > utils.batchwise_cosine_similarity(clip_voxels_norm, clip_targetA_norm):
1904
+ score += 1
1905
+ total += 1
1906
+
1907
+ return score/total
1908
+
1909
+ print(evaluate_mst_pairs(pairs))
1910
+
1911
+
1912
+ # In[74]:
1913
+
1914
+
1915
+ model.eval()
1916
+ logs = {}
1917
+ if local_rank == 0:
1918
+ with torch.no_grad(), torch.cuda.amp.autocast(dtype=data_type):
1919
+ for i in range(1):
1920
+ for j in range(2):
1921
+ subset_indices = MST_idx[:, i, j].reshape(-1)
1922
+ subset_dataset = torch.utils.data.TensorDataset(torch.tensor(subset_indices))
1923
+ subset_dl = torch.utils.data.DataLoader(
1924
+ subset_dataset, batch_size=len(MST_idx), shuffle=False,
1925
+ drop_last=True, pin_memory=True
1926
+ )
1927
+
1928
+ # Reset metrics for this subset
1929
+ test_fwd_percent_correct = 0
1930
+ test_bwd_percent_correct = 0
1931
+
1932
+ for test_i, behav in enumerate(subset_dl):
1933
+ behav = behav[0]
1934
+ loss = 0.
1935
+ image = images[behav.long().cpu()].to(device)
1936
+ voxel = vox[behav.long().cpu()]
1937
+ voxel = torch.Tensor(voxel).unsqueeze(1).to(device)
1938
+ clip_img_embedder = clip_img_embedder.to(device)
1939
+ clip_target = clip_img_embedder(image.float())
1940
+
1941
+ voxel_ridge = model.ridge(voxel, 0)
1942
+ backbone, clip_voxels, blurry_image_enc_ = model.backbone(voxel_ridge)
1943
+
1944
+ clip_voxels_norm = torch.nn.functional.normalize(clip_voxels, dim=-1)
1945
+ clip_target_norm = torch.nn.functional.normalize(clip_target, dim=-1)
1946
+
1947
+ if clip_scale > 0:
1948
+ labels = torch.arange(len(clip_voxels_norm)).to(clip_voxels_norm.device)
1949
+ test_fwd_percent_correct += utils.topk(utils.batchwise_cosine_similarity(clip_voxels_norm, clip_target_norm), labels, k=1).item()
1950
+ test_bwd_percent_correct += utils.topk(utils.batchwise_cosine_similarity(clip_target_norm, clip_voxels_norm), labels, k=1).item()
1951
+ print(test_fwd_percent_correct)
1952
+ print(test_bwd_percent_correct)
1953
+ logs.update({
1954
+ f"subset_{i}_{j}_test/fwd_pct_correct": test_fwd_percent_correct / (test_i + 1),
1955
+ f"subset_{i}_{j}_test/bwd_pct_correct": test_bwd_percent_correct / (test_i + 1),
1956
+ })
1957
+
1958
+ print("--- Full Dataset Evaluation ---")
1959
+ for k, v in logs.items():
1960
+ print(f"{k}: {v:.4f}")
1961
+
1962
+
1963
+ # In[ ]:
1964
+
1965
+
1966
+ top_k = 5
1967
+
1968
+ for x in range(len(MST_idx)):
1969
+ # Get top-k indices
1970
+ y = torch.topk(utils.batchwise_cosine_similarity(clip_voxels_norm, clip_target_norm)[x], k=top_k).indices.to('cpu').tolist()
1971
+
1972
+ # Set up the plot with original + top_k images in one row
1973
+ fig, axs = plt.subplots(1, top_k + 1, figsize=(3 * (top_k + 1), 3))
1974
+
1975
+ # Plot the original image
1976
+ orig_img = utils.torch_to_Image(images[MST_idx[x]])
1977
+ axs[0].imshow(orig_img)
1978
+ axs[0].set_title("Original")
1979
+ axs[0].axis("off")
1980
+
1981
+ # Plot the top-k retrieved images
1982
+ for idx, i in enumerate(y):
1983
+ pred_img = utils.torch_to_Image(images[MST_idx[i]])
1984
+ axs[idx + 1].imshow(pred_img)
1985
+ axs[idx + 1].set_title(f"Top {idx+1}")
1986
+ axs[idx + 1].axis("off")
1987
+
1988
+ plt.tight_layout()
1989
+ plt.show()
1990
+
1991
+
1992
+ # In[76]:
1993
+
1994
+
1995
+ def evaluate_mst_pairs(mst_pairs):
1996
+ with torch.no_grad(), torch.cuda.amp.autocast(dtype=data_type):
1997
+ failed_A = []
1998
+ failed_B = []
1999
+ failed_non_corr = []
2000
+
2001
+ # Get all unique image indices
2002
+ all_indices = np.unique(mst_pairs.flatten())
2003
+
2004
+ # Pre-load all images and betas to device
2005
+ all_images = images[image_idx[all_indices]].to(device)
2006
+ all_voxels = torch.Tensor(vox[image_idx[all_indices]]).unsqueeze(1).to(device)
2007
+
2008
+ # Get CLIP embeddings for all images
2009
+ all_clip_targets = clip_img_embedder(all_images.float())
2010
+ all_clip_targets_norm = nn.functional.normalize(all_clip_targets.flatten(1), dim=-1)
2011
+
2012
+ # Pass all betas through model to get MindEye embeddings
2013
+ all_voxel_ridge = model.ridge(all_voxels, 0)
2014
+ _, all_clip_voxels, _ = model.backbone(all_voxel_ridge)
2015
+ all_clip_voxels_norm = nn.functional.normalize(all_clip_voxels.flatten(1), dim=-1)
2016
+
2017
+ # Dict mapping idx (which indexes the "vox" and "images" tensors) to pos (their position in the flattened array "all_indices")
2018
+ idx_to_pos = {idx: pos for pos, idx in enumerate(all_indices)}
2019
+
2020
+ # Initialize scores
2021
+ corr_score = 0
2022
+ non_corr_score = 0
2023
+ corr_total = len(mst_pairs) * 2
2024
+ non_corr_total = len(mst_pairs) * (len(mst_pairs)-1) * 4 # number of elements in the matrix excluding the diagonal is n*(n-1)*4 since we're doing this twice each for pairmate A and B
2025
+
2026
+
2027
+ # Pre-load voxelwise beta-based embeddings from MindEye and CLIP image embeddings
2028
+ idxA = np.array([pair[0] for pair in mst_pairs])
2029
+ idxB = np.array([pair[1] for pair in mst_pairs])
2030
+
2031
+ posA = np.array([idx_to_pos[idx] for idx in idxA])
2032
+ posB = np.array([idx_to_pos[idx] for idx in idxB])
2033
+
2034
+ voxA_embeddings = all_clip_voxels_norm[posA]
2035
+ voxB_embeddings = all_clip_voxels_norm[posB]
2036
+ imgA_embeddings = all_clip_targets_norm[posA]
2037
+ imgB_embeddings = all_clip_targets_norm[posB]
2038
+
2039
+ simA_A = utils.batchwise_cosine_similarity(voxA_embeddings, imgA_embeddings)
2040
+ simA_B = utils.batchwise_cosine_similarity(voxA_embeddings, imgB_embeddings)
2041
+ simB_B = utils.batchwise_cosine_similarity(voxB_embeddings, imgB_embeddings)
2042
+ simB_A = utils.batchwise_cosine_similarity(voxB_embeddings, imgA_embeddings)
2043
+
2044
+
2045
+ # corresponding 2-AFC
2046
+ # is the voxel embedding for image 1 pairmate A more similar to the CLIP embedding for image 1 pairmate A or the CLIP embedding for image 1 pairmate B?
2047
+ correct_A = torch.diag(simA_A) > torch.diag(simA_B)
2048
+ # is the voxel embedding for image 1 pairmate B more similar to the CLIP embedding for image 1 pairmate B or the CLIP embedding for image 1 pairmate A?
2049
+ correct_B = torch.diag(simB_B) > torch.diag(simB_A)
2050
+
2051
+ corr_score += correct_A.sum().item()
2052
+ corr_score += correct_B.sum().item()
2053
+
2054
+ # Store indices where AFC fails
2055
+ failed_A = [i for i, correct in enumerate(correct_A.cpu()) if not correct]
2056
+ failed_B = [i for i, correct in enumerate(correct_B.cpu()) if not correct]
2057
+
2058
+ # non-corresponding 2-AFC
2059
+ N = len(mst_pairs)
2060
+ # Create a mask that is True for all off-diagonal elements
2061
+ row_idx = torch.arange(N).unsqueeze(1) # (N, 1)
2062
+ col_idx = torch.arange(N).unsqueeze(0) # (1, N)
2063
+ off_diag_mask = row_idx != col_idx # shape (N, N)
2064
+
2065
+ diagA_A = simA_A.diag().unsqueeze(1).expand(-1, N) # Get diagonal values and expand to (N, N) by duplicating the diagonal element along the rows (since each row is the cosine similarity between a single voxel embedding and all CLIP embeddings)
2066
+ diagB_B = simB_B.diag().unsqueeze(1).expand(-1, N)
2067
+
2068
+ # pdb.set_trace()
2069
+
2070
+ # Compare each element in the row to the diagonal element
2071
+ off_diag_mask_device = off_diag_mask.to(device)
2072
+
2073
+ fail_AA = (simA_A < diagA_A) & off_diag_mask_device
2074
+ fail_AB = (simA_B < diagA_A) & off_diag_mask_device
2075
+ fail_BB = (simB_B < diagB_B) & off_diag_mask_device
2076
+ fail_BA = (simB_A < diagB_B) & off_diag_mask_device
2077
+
2078
+ non_corr_score += fail_AA.sum().item()
2079
+ non_corr_score += fail_AB.sum().item()
2080
+ non_corr_score += fail_BB.sum().item()
2081
+ non_corr_score += fail_BA.sum().item()
2082
+
2083
+ # Log failed indices
2084
+ fail_sources = [fail_AA, fail_AB, fail_BB, fail_BA]
2085
+ for fail_matrix, label in zip(fail_sources, ["AA", "AB", "BB", "BA"]):
2086
+ fail_coords = torch.nonzero(fail_matrix, as_tuple=False).cpu().numpy()
2087
+ for i, j in fail_coords:
2088
+ failed_non_corr.append({"type": label, "i": i, "j": j, "pair_i": mst_pairs[i], "pair_j": mst_pairs[j]})
2089
+
2090
+ return corr_score, corr_total, int(non_corr_score), non_corr_total, failed_A, failed_B, failed_non_corr
2091
+
2092
+
2093
+ # In[77]:
2094
+
2095
+
2096
+ x = [im for im in image_names if str(im) not in ('blank.jpg', 'nan')]
2097
+ assert len(image_idx) == len(x)
2098
+ pairs = []
2099
+ for i, p in enumerate(MST_pairmate_names):
2100
+ assert p[0] != p[1] # no duplicate images
2101
+ pairs.append([utils.find_all_indices(x,p[0]), utils.find_all_indices(x,p[1])])
2102
+
2103
+ pairs = np.array(pairs)
2104
+ print(pairs.shape)
2105
+
2106
+
2107
+ # In[78]:
2108
+
2109
+
2110
+ all_scores = []
2111
+ all_failures = []
2112
+
2113
+ for i in range(1):
2114
+ for j in range(2):
2115
+ mst_pairs = np.stack([pairs[:, 0, i], pairs[:, 1, j]], axis=1) # shape (31, 2)
2116
+ corr_score, corr_total, non_corr_score, non_corr_total, failed_A, failed_B, failed_non_corr = evaluate_mst_pairs(mst_pairs)
2117
+
2118
+ # Store scores and failure info together
2119
+ all_scores.append((corr_score, corr_total, non_corr_score, non_corr_total))
2120
+ all_failures.append({
2121
+ "repeat_A": i,
2122
+ "repeat_B": j,
2123
+ "failed_A": failed_A,
2124
+ "failed_B": failed_B,
2125
+ "failed_non_corr": failed_non_corr,
2126
+ "mst_pairs": mst_pairs,
2127
+ })
2128
+
2129
+ # Print summary
2130
+ print(f"pairmate A repeat {i} vs pairmate B repeat {j}:")
2131
+ print(f"2-AFC corresponding = {corr_score}/{corr_total} ({corr_score/corr_total:.2%})")
2132
+ print(f"2-AFC non-corresponding = {non_corr_score}/{non_corr_total} ({non_corr_score/non_corr_total:.2%})")
2133
+ print("")
2134
+
2135
+
2136
+ # In[79]:
2137
+
2138
+
2139
+ # # Compare first few pairs
2140
+ # for pair in pairs: # Checking first 2 pairs
2141
+ # print("Indices in mst_pairs:", pair)
2142
+ # print("Corresponding filenames:")
2143
+ # print(f"Image 1: {x[pair[0]]}")
2144
+ # print(f"Image 2: {x[pair[1]]}\n")
2145
+
2146
+
2147
+ # In[80]:
2148
+
2149
+
2150
+ # for i in range(len(pairs)):
2151
+ # fig, ax = plt.subplots(1, 2, figsize=(10,8))
2152
+
2153
+ # ax[0].imshow(images[pairs[i][0]].permute(1,2,0).numpy())
2154
+ # ax[0].set_title(f"Repeat 1")
2155
+
2156
+ # ax[1].imshow(images[pairs[i][1]].permute(1,2,0).numpy())
2157
+ # ax[1].set_title(f"Repeat 2")
2158
+
2159
+ # plt.setp(ax, xticks=[], yticks=[])
2160
+ # plt.tight_layout()
2161
+ # plt.show()
2162
+
2163
+
2164
+ # In[81]:
2165
+
2166
+
2167
+ # score = 0
2168
+ # total = 0
2169
+ # with torch.no_grad(), torch.cuda.amp.autocast(dtype=data_type):
2170
+ # for pair in unique_images_pairs:
2171
+ # imageA_idx, imageB_idx = pair
2172
+ # imageA_idx = np.where(image_idx == imageA_idx)[0].item()
2173
+ # imageB_idx = np.where(image_idx == imageB_idx)[0].item()
2174
+
2175
+ # voxel = vox[imageA_idx].to(device)[None]
2176
+ # voxel = torch.Tensor(voxel).unsqueeze(1).to(device)
2177
+
2178
+ # imageA = images[imageA_idx].to(device)[None]
2179
+ # imageB = images[imageB_idx].to(device)[None]
2180
+
2181
+ # clip_targetA = clip_img_embedder(imageA.float())
2182
+ # clip_targetB = clip_img_embedder(imageB.float())
2183
+
2184
+ # voxel_ridge = model.ridge(voxel,0)
2185
+ # backbone, clip_voxels, blurry_image_enc_ = model.backbone(voxel_ridge)
2186
+
2187
+ # clip_voxels_norm = nn.functional.normalize(clip_voxels.flatten(1), dim=-1)
2188
+ # clip_targetA_norm = nn.functional.normalize(clip_targetA.flatten(1), dim=-1)
2189
+ # clip_targetB_norm = nn.functional.normalize(clip_targetB.flatten(1), dim=-1)
2190
+
2191
+ # cossimA = utils.batchwise_cosine_similarity(clip_voxels_norm, clip_targetA_norm)
2192
+ # cossimB = utils.batchwise_cosine_similarity(clip_voxels_norm, clip_targetB_norm)
2193
+
2194
+ # if cossimA > cossimB:
2195
+ # score += 1
2196
+ # total += 1
2197
+
2198
+ # for pair in unique_images_pairs:
2199
+ # imageA_idx, imageB_idx = pair
2200
+ # imageA_idx = np.where(image_idx == imageA_idx)[0].item()
2201
+ # imageB_idx = np.where(image_idx == imageB_idx)[0].item()
2202
+
2203
+ # voxel = vox[imageB_idx].to(device)[None]
2204
+ # voxel = torch.Tensor(voxel).unsqueeze(1).to(device)
2205
+
2206
+ # imageA = images[imageA_idx].to(device)[None]
2207
+ # imageB = images[imageB_idx].to(device)[None]
2208
+
2209
+ # clip_targetA = clip_img_embedder(imageA.float())
2210
+ # clip_targetB = clip_img_embedder(imageB.float())
2211
+
2212
+ # voxel_ridge = model.ridge(voxel,0)
2213
+ # backbone, clip_voxels, blurry_image_enc_ = model.backbone(voxel_ridge)
2214
+
2215
+ # clip_voxels_norm = nn.functional.normalize(clip_voxels.flatten(1), dim=-1)
2216
+ # clip_targetA_norm = nn.functional.normalize(clip_targetA.flatten(1), dim=-1)
2217
+ # clip_targetB_norm = nn.functional.normalize(clip_targetB.flatten(1), dim=-1)
2218
+
2219
+ # cossimA = utils.batchwise_cosine_similarity(clip_voxels_norm, clip_targetA_norm)
2220
+ # cossimB = utils.batchwise_cosine_similarity(clip_voxels_norm, clip_targetB_norm)
2221
+
2222
+ # if cossimB > cossimA:
2223
+ # score += 1
2224
+ # total += 1
2225
+
2226
+ # print(score/total)
2227
+
2228
+
2229
+ # In[82]:
2230
+
2231
+
2232
+ #display(utils.torch_to_Image(imageA))
2233
+ #display(utils.torch_to_Image(imageB))
2234
+
2235
+
2236
+ # In[83]:
2237
+
2238
+
2239
+ # from scipy.stats import binomtest
2240
+
2241
+ # total_samples = len(np.array(unique_images_pairs).flatten())
2242
+ # assert total_samples == 100
2243
+
2244
+ # correct_predictions = int((score/total) * total_samples) # calculate the number of correct predictions
2245
+ # expected_accuracy = 0.5 # expected accuracy under the null hypothesis
2246
+
2247
+ # # Perform the binomial test
2248
+ # binom_stats = binomtest(correct_predictions, total_samples, expected_accuracy, alternative='greater')
2249
+ # p_value = binom_stats.pvalue
2250
+
2251
+ # # Output the result
2252
+ # print(f"P-value: {p_value}")
2253
+ # if p_value < 0.05:
2254
+ # print("The decoder's accuracy is significantly better than chance.")
2255
+ # else:
2256
+ # print("The decoder's accuracy is not significantly better than chance.")
2257
+
2258
+
2259
+ # In[ ]:
2260
+
2261
+
2262
+
2263
+
2264
+
2265
+ # In[ ]:
2266
+
2267
+
2268
+
2269
+
main-multisession-sub-005_ses-03_union_mask.py ADDED
@@ -0,0 +1,1956 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding: utf-8
3
+
4
+ # # Import packages & functions
5
+
6
+ # In[1]:
7
+
8
+
9
+ print("importing modules")
10
+ import os
11
+ import sys
12
+ import json
13
+ import argparse
14
+ import numpy as np
15
+ import time
16
+ import random
17
+ import string
18
+ import h5py
19
+ from tqdm import tqdm
20
+ import webdataset as wds
21
+ from PIL import Image
22
+ import pandas as pd
23
+ import nibabel as nib
24
+ import nilearn
25
+
26
+ import matplotlib.pyplot as plt
27
+ import torch
28
+ import torch.nn as nn
29
+ from torchvision import transforms
30
+
31
+ # tf32 data type is faster than standard float32
32
+ torch.backends.cuda.matmul.allow_tf32 = True
33
+
34
+ import utils
35
+ from utils import load_preprocess_betas, resample, applyxfm, apply_thresh, resample_betas
36
+
37
+ # imports utils from mindeye_preproc as "preproc"
38
+ import importlib.util
39
+ parent_utils_path = "/home/ri4541/mindeye_preproc/analysis/utils.py"
40
+ spec = importlib.util.spec_from_file_location("utils", parent_utils_path)
41
+ preproc = importlib.util.module_from_spec(spec)
42
+ parent_dir = os.path.dirname(parent_utils_path)
43
+ if parent_dir not in sys.path:
44
+ sys.path.append(parent_dir)
45
+ spec.loader.exec_module(preproc)
46
+
47
+ if utils.is_interactive():
48
+ from IPython.display import clear_output # function to clear print outputs in cell
49
+ get_ipython().run_line_magic('load_ext', 'autoreload')
50
+ # this allows you to change functions in models.py or utils.py and have this notebook automatically update with your revisions
51
+ get_ipython().run_line_magic('autoreload', '2')
52
+
53
+ seed = utils.get_slurm_seed()
54
+
55
+
56
+ # # Princeton data prep
57
+
58
+ # ## Load Data & Design
59
+
60
+ # In[2]:
61
+
62
+
63
+ if utils.is_interactive():
64
+ sub = "sub-005"
65
+ session = "ses-03"
66
+ task = 'C' # 'study' or 'A'; used to search for functional run in bids format
67
+ func_task_name = 'C'
68
+ else:
69
+ sub = os.environ["sub"]
70
+ session = os.environ["session"]
71
+ task = os.environ["task"]
72
+ func_task_name = 'C'
73
+
74
+ if session == "all":
75
+ ses_list = ["ses-01", "ses-02"] # list of actual session IDs
76
+ design_ses_list = ["ses-01", "ses-02"] # list of session IDs to search for design matrix
77
+ else:
78
+ ses_list = [session]
79
+ design_ses_list = [session]
80
+
81
+ task_name = f"_task-{task}" if task != 'study' else ''
82
+ resample_voxel_size = False
83
+ resample_post_glmsingle = False # do you want to do voxel resampling here? if resample_voxel_size = True and resample_post_glmsingle = False, assume the resampling has been done prior to GLMsingle, so just use resampled directory but otherwise proceed as normal
84
+ load_from_resampled_file = False # do you want to load resampled data from file? if True, assume resampling was done in this notebook before, and that we're not using the GLMsingle resampled data
85
+
86
+ train_test_split = 'MST' # 'MST', 'orig', 'unique'
87
+ remove_close_to_MST = False
88
+ remove_random_n = False
89
+
90
+ if remove_close_to_MST or remove_random_n:
91
+ assert remove_close_to_MST != remove_random_n # don't remove both sets of images
92
+
93
+ n_to_remove = 0
94
+ if remove_random_n:
95
+ assert train_test_split == 'MST' # MST images are excluded from the n images removed, so only makes sense if they're not in the training set
96
+ n_to_remove = 150
97
+
98
+ if resample_voxel_size:
99
+ # voxel size was unchanged in glmsingle, want to perform resampling here
100
+ resampled_vox_size = 2.5
101
+ resample_method = "sinc" # {trilinear,nearestneighbour,sinc,spline}, credit: https://johnmuschelli.com/fslr/reference/flirt.help.html
102
+
103
+ # file name helper variables
104
+ vox_dim_str = str(resampled_vox_size).replace('.', '_') # in case the voxel size has a decimal, replace with an underscore
105
+ resampled_suffix = f"resampled_{vox_dim_str}mm_{resample_method}"
106
+ mask_resampled_suffix = resampled_suffix
107
+ if resample_post_glmsingle:
108
+ resampled_suffix += '_postglmsingle'
109
+ else:
110
+ resampled_suffix += '_preglmsingle'
111
+
112
+
113
+ # In[3]:
114
+
115
+
116
+ session_label = preproc.get_session_label(ses_list)
117
+ print('session label:', session_label)
118
+ n_runs, _ = preproc.get_runs_per_session(sub, session, ses_list)
119
+
120
+
121
+ # In[4]:
122
+
123
+
124
+ if utils.is_interactive():
125
+ glmsingle_path = f"/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/glmsingle_{sub}_{session_label}_task-{task}"
126
+ else:
127
+ glmsingle_path = os.environ["glmsingle_path"]
128
+
129
+ designdir = "/home/ri4541/real_time_mindEye2"
130
+ print(glmsingle_path)
131
+
132
+ if resample_voxel_size:
133
+ # option 1: we are using original (non-resampled) GLMsingle outputs and doing the resampling here
134
+ # option 2: doing resampling pre-GLMsingle and using those outputs; no resampling involved here
135
+ if resample_post_glmsingle:
136
+ # option 1
137
+ orig_glmsingle_path = glmsingle_path
138
+ glmsingle_path += f"_{resampled_suffix}"
139
+ print("resampled glmsingle path:", glmsingle_path)
140
+ if load_from_resampled_file:
141
+ # resampling is already done; load from file
142
+ assert os.path.exists(glmsingle_path) # the new directory must have been created if we reached here
143
+ else:
144
+ # don't load from file; do resampling here
145
+ os.makedirs(glmsingle_path,exist_ok=True)
146
+ else:
147
+ # option 2
148
+ glmsingle_path += f"_{resampled_suffix}"
149
+ print("glmsingle path:", glmsingle_path)
150
+
151
+ assert os.path.exists(glmsingle_path)
152
+ print("glmsingle path exists!")
153
+
154
+
155
+ # In[5]:
156
+
157
+
158
+ data, starts, images, is_new_run, image_names, unique_images, len_unique_images = preproc.load_design_files(
159
+ sub=sub,
160
+ session=session,
161
+ func_task_name=task,
162
+ designdir=designdir,
163
+ design_ses_list=design_ses_list
164
+ )
165
+
166
+ if sub == 'sub-001':
167
+ if session == 'ses-01':
168
+ assert image_names[0] == 'images/image_686_seed_1.png'
169
+ elif session in ('ses-02', 'all'):
170
+ assert image_names[0] == 'all_stimuli/special515/special_40840.jpg'
171
+ elif session == 'ses-03':
172
+ assert image_names[0] == 'all_stimuli/special515/special_69839.jpg'
173
+ elif session == 'ses-04':
174
+ assert image_names[0] == 'all_stimuli/rtmindeye_stimuli/image_686_seed_1.png'
175
+ elif sub == 'sub-003':
176
+ assert image_names[0] == 'all_stimuli/rtmindeye_stimuli/image_686_seed_1.png'
177
+
178
+ unique_images = np.unique(image_names.astype(str))
179
+ unique_images = unique_images[(unique_images!="nan")]
180
+ len_unique_images = len(unique_images)
181
+ print("n_runs",n_runs)
182
+
183
+ if (sub == 'sub-001' and session == 'ses-04') or (sub == 'sub-003' and session == 'ses-01'):
184
+ assert len(unique_images) == 851
185
+
186
+ print(image_names[:4])
187
+ print(starts[:4])
188
+ print(is_new_run[:4])
189
+
190
+ if remove_random_n:
191
+ # want to remove 150 imgs
192
+ # 100 special515 imgs are repeated 3x (300 total)
193
+ # all other train imgs are only shown once (558 total)
194
+ # of the 150, want to sample proportionally since we're cutting all repeats for special515
195
+ # so take out 51 (17 unique) from special515 and 99 from rest = removing 150 total
196
+ np.random.seed(seed)
197
+ options_to_remove = [x for x in set(image_names) if str(x) != 'nan' and x != 'blank.jpg' and 'MST_pairs' not in x and 'special515' not in x and list(image_names).count(x)==1] # all the imgs that only appear once (this is O(N^2) b/c of count() within list comprehension but image_names is a relatively small list)
198
+ options_to_remove_special515 = [x for x in set(image_names) if str(x) != 'nan' and x != 'blank.jpg' and 'MST_pairs' not in x and 'special515' in x and list(image_names).count(x)>1] # all the special515 images that are repeated (count()>1 necessary because there are special515 that are not repeated)
199
+ imgs_to_remove = np.random.choice(options_to_remove, size=99, replace=False)
200
+ imgs_to_remove = np.append(imgs_to_remove, np.random.choice(options_to_remove_special515, size=17, replace=False))
201
+
202
+ image_idx = np.array([]) # contains the unique index of each presented image
203
+ vox_image_names = np.array([]) # contains the names of the images corresponding to image_idx
204
+ all_MST_images = dict()
205
+ for i, im in enumerate(image_names):
206
+ # skip if blank, nan
207
+ if im == "blank.jpg":
208
+ i+=1
209
+ continue
210
+ if str(im) == "nan":
211
+ i+=1
212
+ continue
213
+ vox_image_names = np.append(vox_image_names, im)
214
+ if remove_close_to_MST: # optionally skip close_to_MST images
215
+ if "closest_pairs" in im:
216
+ i+=1
217
+ continue
218
+ elif remove_random_n:
219
+ if im in imgs_to_remove:
220
+ i+=1
221
+ continue
222
+
223
+ image_idx_ = np.where(im==unique_images)[0].item()
224
+ image_idx = np.append(image_idx, image_idx_)
225
+
226
+ if (sub == 'sub-001' and session == 'ses-04') or (sub == 'sub-003' and session == 'ses-01'): # MST images are ones that matched these image titles
227
+ import re
228
+ if ('w_' in im or 'paired_image_' in im or re.match(r'all_stimuli/rtmindeye_stimuli/\d{1,2}_\d{1,3}\.png$', im) or re.match(r'images/\d{1,2}_\d{1,3}\.png$', im)):
229
+ # the regexp here looks for **_***.png, allows 1-2 chars before underscore and 1-3 chars after it
230
+ # print(im)
231
+ all_MST_images[i] = im
232
+ i+=1
233
+ elif 'MST' in im:
234
+ all_MST_images[i] = im
235
+ i+=1
236
+
237
+ image_idx = torch.Tensor(image_idx).long()
238
+ # for im in new_image_names[MST_images]:
239
+ # assert 'MST_pairs' in im
240
+ # assert len(all_MST_images) == 300
241
+
242
+ unique_MST_images = np.unique(list(all_MST_images.values()))
243
+
244
+ MST_ID = np.array([], dtype=int)
245
+ if remove_close_to_MST:
246
+ close_to_MST_idx = np.array([], dtype=int)
247
+ if remove_random_n:
248
+ random_n_idx = np.array([], dtype=int)
249
+
250
+ vox_idx = np.array([], dtype=int)
251
+ j=0 # this is a counter keeping track of the remove_random_n used later to index vox based on the removed images; unused otherwise
252
+ for i, im in enumerate(image_names): # need unique_MST_images to be defined, so repeating the same loop structure
253
+ # skip if blank, nan
254
+ if im == "blank.jpg":
255
+ i+=1
256
+ continue
257
+ if str(im) == "nan":
258
+ i+=1
259
+ continue
260
+ if remove_close_to_MST: # optionally skip close_to_MST images
261
+ if "closest_pairs" in im:
262
+ close_to_MST_idx = np.append(close_to_MST_idx, i)
263
+ i+=1
264
+ continue
265
+ if remove_random_n:
266
+ if im in imgs_to_remove:
267
+ vox_idx = np.append(vox_idx, j)
268
+ i+=1
269
+ j+=1
270
+ continue
271
+ j+=1
272
+ curr = np.where(im == unique_MST_images)
273
+ # print(curr)
274
+ if curr[0].size == 0:
275
+ MST_ID = np.append(MST_ID, np.array(len(unique_MST_images))) # add a value that should be out of range based on the for loop, will index it out later
276
+ else:
277
+ MST_ID = np.append(MST_ID, curr)
278
+
279
+ assert len(MST_ID) == len(image_idx)
280
+ # assert len(np.argwhere(pd.isna(data['current_image']))) + len(np.argwhere(data['current_image'] == 'blank.jpg')) + len(image_idx) == len(data)
281
+ # MST_ID = torch.tensor(MST_ID[MST_ID != len(unique_MST_images)], dtype=torch.uint8) # torch.tensor (lowercase) allows dtype kwarg, Tensor (uppercase) is an alias for torch.FloatTensor
282
+ print(MST_ID.shape)
283
+ if (sub == 'sub-001' and session == 'ses-04') or (sub == 'sub-003' and session == 'ses-01'):
284
+ assert len(all_MST_images) == 100
285
+
286
+
287
+ # ## Load images
288
+
289
+ # In[6]:
290
+
291
+
292
+ import imageio.v2 as imageio
293
+ resize_transform = transforms.Resize((224, 224))
294
+ MST_images = []
295
+ images = None
296
+ for im_name in tqdm(image_idx):
297
+ if sub == 'sub-001' and session == 'ses-01':
298
+ image_file = f"all_stimuli/rtmindeye_stimuli/{unique_images[im_name]}"
299
+ else:
300
+ image_file = f"{unique_images[im_name]}"
301
+ im = imageio.imread(image_file)
302
+ im = torch.Tensor(im / 255).permute(2,0,1)
303
+ im = resize_transform(im.unsqueeze(0))
304
+ if images is None:
305
+ images = im
306
+ else:
307
+ images = torch.vstack((images, im))
308
+ if (sub == 'sub-001' and session == 'ses-04') or (sub == 'sub-003' and session == 'ses-01'):
309
+ if ('w_' in image_file or 'paired_image_' in image_file or re.match(r'all_stimuli/rtmindeye_stimuli/\d{1,2}_\d{1,3}\.png$', image_file) or re.match(r'all_stimuli/rtmindeye_stimuli/images/\d{1,2}_\d{1,3}\.png$', image_file)):
310
+ MST_images.append(True)
311
+ else:
312
+ MST_images.append(False)
313
+ else:
314
+ if ("MST_pairs" in image_file): # ("_seed_" not in unique_images[im_name]) and (unique_images[im_name] != "blank.jpg")
315
+ MST_images.append(True)
316
+ else:
317
+ MST_images.append(False)
318
+
319
+ print("images", images.shape)
320
+ MST_images = np.array(MST_images)
321
+ print("MST_images", len(MST_images))
322
+ if (sub == 'sub-001' and session == 'ses-04') or (sub == 'sub-003' and session == 'ses-01'):
323
+ assert len(MST_images[MST_images==True]) == 100
324
+ print("MST_images==True", len(MST_images[MST_images==True]))
325
+
326
+
327
+ # In[7]:
328
+
329
+
330
+ # want IDs of pairmates based on MST_images
331
+ # create "MST_pairmates" which is a 25x2 array with indices of the 25 pairs based on MST_images == True
332
+
333
+ assert unique_MST_images.shape[0] % 2 == 0 # make sure it's divisible by 2
334
+ MST_pairmate_names = unique_MST_images.reshape(int(unique_MST_images.shape[0]/2),2)
335
+ # print(MST_pairmate_names)
336
+
337
+ MST_pairmate_indices = np.empty(shape=MST_pairmate_names.shape, dtype=int)
338
+ for p, pair in enumerate(MST_pairmate_names):
339
+ for i, im in enumerate(pair):
340
+ MST_pairmate_indices[p][i] = np.where(np.isin(list(all_MST_images.values()), im))[0][0] # just take the first repeated instance of an image
341
+
342
+ print(MST_pairmate_indices.shape, MST_pairmate_indices)
343
+
344
+
345
+ # In[8]:
346
+
347
+
348
+ if (sub == 'sub-001' and session in ('ses-02', 'ses-03', 'all')):
349
+ # MST_pairs contains the indices of repeats based on all_MST_images
350
+ # all_MST_images contains the indices of images from image_names
351
+ MST_pairs = utils.find_paired_indices(torch.tensor(MST_ID))
352
+ MST_pairs = np.array(sorted(MST_pairs[:-1], key=lambda x: x[0])) # we added a fake value as a placeholder so index out the last group of pairs
353
+
354
+ # assert images[MST_pairs]
355
+
356
+ fig, ax = plt.subplots(1, 3, figsize=(10,4))
357
+ fig.suptitle('Sample MST pairs')
358
+
359
+ ax[0].imshow(images[MST_pairs[-1][0]].permute(1,2,0).numpy())
360
+ ax[0].set_title(f"Trial 0")
361
+
362
+ ax[1].imshow(images[MST_pairs[-1][1]].permute(1,2,0).numpy())
363
+ ax[1].set_title(f"Trial 1")
364
+
365
+ ax[2].imshow(images[MST_pairs[-1][2]].permute(1,2,0).numpy())
366
+ ax[2].set_title(f"Trial 2")
367
+
368
+ plt.setp(ax, xticks=[], yticks=[])
369
+ plt.tight_layout()
370
+ plt.show()
371
+
372
+
373
+ # In[9]:
374
+
375
+
376
+ # pairs has the indices of all repeated images
377
+ pairs = utils.find_paired_indices(image_idx)
378
+ pairs = sorted(pairs, key=lambda x: x[0])
379
+
380
+ fig, axes = plt.subplots(1, 3, figsize=(6, 2)) # 1 row, 3 columns
381
+ for i, ax in enumerate(axes):
382
+ ax.imshow(images[i].permute(1, 2, 0).numpy())
383
+ ax.set_title(f"Trial {i}")
384
+ ax.axis("off") # Hide axes for better visualization
385
+
386
+ plt.tight_layout()
387
+ # output_path = os.path.join(output_dir, "trials_plot.png")
388
+ # plt.savefig(output_path, dpi=300) # Save figure
389
+ plt.show()
390
+
391
+
392
+ # In[10]:
393
+
394
+
395
+ p=0
396
+
397
+ # plot 2 repeats (anything in pairs should have 2 repeats, even if there's more)
398
+ fig, ax = plt.subplots(1, 2, figsize=(10,8))
399
+
400
+ ax[0].imshow(images[pairs[p][0]].permute(1,2,0).numpy())
401
+ ax[0].set_title(f"Repeat 1")
402
+
403
+ ax[1].imshow(images[pairs[p][1]].permute(1,2,0).numpy())
404
+ ax[1].set_title(f"Repeat 2")
405
+
406
+ plt.setp(ax, xticks=[], yticks=[])
407
+ plt.tight_layout()
408
+ plt.show()
409
+
410
+
411
+ # In[11]:
412
+
413
+
414
+ def get_image_pairs(sub, session, func_task_name, designdir):
415
+ """Loads design files and processes image pairs for a given session."""
416
+ _, _, _, _, image_names, unique_images, _ = preproc.load_design_files(
417
+ sub=sub,
418
+ session=session,
419
+ func_task_name=func_task_name,
420
+ designdir=designdir,
421
+ design_ses_list=[session] # Ensure it's a list
422
+ )
423
+ return utils.process_images(image_names, unique_images)
424
+
425
+
426
+ # In[12]:
427
+
428
+
429
+ from collections import defaultdict
430
+
431
+ all_dicts = []
432
+ for s_idx, s in enumerate(ses_list):
433
+ im, vo, _ = get_image_pairs(sub, s, func_task_name, designdir)
434
+ assert len(im) == len(vo)
435
+ all_dicts.append({k:v for k,v in enumerate(vo)})
436
+
437
+ # for the train set (ses-01-02 non-MST)
438
+ image_to_indices = defaultdict(lambda: [[] for _ in range(len(ses_list))])
439
+ for ses_idx, idx_to_name in enumerate(all_dicts):
440
+ for idx, name in idx_to_name.items():
441
+ image_to_indices[name][ses_idx].append(idx)
442
+
443
+ image_to_indices = dict(image_to_indices)
444
+
445
+ # for the test set (ses-03)
446
+ # test_image_to_indices = defaultdict(lambda: [[] for _ in range(len([ses_list[-1]]))])
447
+ # for ses_idx, idx_to_name in enumerate([all_dicts[-1]]):
448
+ # for idx, name in idx_to_name.items():
449
+ # test_image_to_indices[name][ses_idx].append(idx)
450
+
451
+ # test_image_to_indices = dict(test_image_to_indices)
452
+
453
+
454
+ # In[13]:
455
+
456
+
457
+ # train_pairs_list = []
458
+ # test_pairs_list = []
459
+
460
+ if sub == 'sub-005' and ses_list == ["ses-01", "ses-02"]:
461
+ for image, (ses0_indices, ses1_indices) in image_to_indices.items():
462
+ # Offset session 1 indices by 693
463
+ image_to_indices[image] = [ses0_indices, [i + 693 for i in ses1_indices]]
464
+
465
+ # # Combine all repeat indices (across both sessions)
466
+ # all_indices = ses0_indices + ses1_indices_offset
467
+
468
+ # # Only include if there are at least 2 repeats
469
+ # if len(all_indices) >= 2:
470
+ # train_pairs_list.append(all_indices)
471
+
472
+ # for i in test_image_to_indices.values():
473
+ # # print(i[0])
474
+ # # Only include if there are at least 2 repeats
475
+ # if len(i[0]) >= 2:
476
+ # test_pairs_list.append(i[0])
477
+
478
+ # train_test_pairs = [train_pairs_list, test_pairs_list]
479
+
480
+ # elif sub == 'sub-005' and ses_list == ["ses-01", "ses-03"]:
481
+ # pairs_list = []
482
+
483
+ # if len(ses_list) > 2:
484
+ # # Case 1: Aggregate results from multiple sessions (ses_list[:-1]), concatenating into a single list
485
+ # combined_pairs = sum([get_image_pairs(sub, s, func_task_name, designdir) for s in ses_list[:-1]], [])
486
+ # pairs_list.append(combined_pairs)
487
+
488
+ # # Case 2: Process last session separately
489
+ # pairs_list.append(get_image_pairs(sub, ses_list[-1], func_task_name, designdir))
490
+
491
+ # else:
492
+ # # Case 3: Process both sessions individually if ses_list has only 2 entries
493
+ # pairs_list.extend([get_image_pairs(sub, s, func_task_name, designdir) for s in ses_list])
494
+
495
+ # assert len(pairs_list) == 2
496
+
497
+
498
+ # In[14]:
499
+
500
+
501
+ if resample_voxel_size:
502
+ from nilearn.masking import apply_mask, unmask
503
+ ref_name = f'{glmsingle_path}/boldref_resampled.nii.gz'
504
+ omat_name = f'{glmsingle_path}/boldref_omat'
505
+
506
+
507
+ # In[15]:
508
+
509
+
510
+ from nilearn.plotting import plot_roi, plot_anat, plot_epi
511
+
512
+ mask_name = f'{glmsingle_path}/{sub}_{session_label}{task_name}_brain'
513
+ if resample_voxel_size:
514
+ if resample_post_glmsingle is True:
515
+ # use original mask directory
516
+ mask_in_name = f'{orig_glmsingle_path}/{sub}_{session}{task_name}_brain.nii.gz'
517
+ mask_out_name = mask_name + f"_{mask_resampled_suffix}.nii.gz"
518
+ assert os.path.exists(mask_in_name)
519
+ applyxfm(mask_in_name, ref_name, omat_name, resample_method, output=mask_out_name)
520
+ apply_thresh(mask_out_name, 0.5, output=mask_out_name) # binarize the mask since resampling can result in non- 0 or 1 values
521
+ mask_name += f"_{mask_resampled_suffix}"
522
+
523
+ mask_name += ".nii.gz"
524
+ print(mask_name)
525
+ avg_mask = nib.load(mask_name)
526
+ # mask info
527
+ dimsize=avg_mask.header.get_zooms()
528
+ affine_mat = avg_mask.affine
529
+ brain=avg_mask.get_fdata()
530
+ xyz=brain.shape #xyz dimensionality of brain mask and epi data
531
+
532
+ print('Mask dimensions:', dimsize)
533
+ print('')
534
+ print('Affine:')
535
+ print(affine_mat)
536
+ print('')
537
+ print(f'There are {int(np.sum(brain))} voxels in the included brain mask\n')
538
+
539
+
540
+ # In[16]:
541
+
542
+
543
+ from nilearn.plotting import plot_roi
544
+ assert sub == 'sub-005' and session == "ses-03"
545
+ print('loading brain mask')
546
+ # func_masks, avg_mask, nsd_masks, roi = utils.get_mask(['ses-01', 'ses-02', 'ses-03'], sub, func_task_name)
547
+ avg_mask = nib.load('/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/glmsingle_sub-005_task-C/sub-005_final_brain.nii.gz')
548
+ final_mask = nib.load('/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/glmsingle_sub-005_task-C/sub-005_final_mask.nii.gz')
549
+
550
+ # mask info
551
+ dimsize=avg_mask.header.get_zooms()
552
+ affine_mat = avg_mask.affine
553
+ brain=avg_mask.get_fdata()
554
+ xyz=brain.shape #xyz dimensionality of brain mask and epi data
555
+
556
+ print('Mask dimensions:', dimsize)
557
+ print('')
558
+ print('Affine:')
559
+ print(affine_mat)
560
+ print('')
561
+ print(f'There are {int(np.sum(brain))} voxels in the included brain mask\n')
562
+
563
+ plot_roi(final_mask, bg_img=avg_mask)
564
+ plt.show()
565
+
566
+
567
+ # In[17]:
568
+
569
+
570
+ union_mask = np.load('/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/glmsingle_sub-005_task-C/union_mask_from_ses-01-02.npy')
571
+
572
+
573
+ # ## Load GLMSingle voxel data
574
+
575
+ # In[18]:
576
+
577
+
578
+ vox = None
579
+ needs_postprocessing = False
580
+ params = (session, ses_list, remove_close_to_MST, image_names, remove_random_n, vox_idx)
581
+
582
+ if resample_post_glmsingle == True:
583
+ glm_save_path_resampled = f"{glmsingle_path}/vox_resampled.nii.gz"
584
+ if load_from_resampled_file == True:
585
+ # resampling was done in this notebook so we can load from file
586
+ vox = nib.load(glm_save_path_resampled)
587
+ else:
588
+ # do resampling here
589
+ assert os.path.exists(ref_name) and os.path.exists(omat_name), "need to generate the boldref and omat separately since we don't have access to the functional data here; either do so using flirt on the command line or copy over the glmsingle resampled outputs"
590
+ vox = load_preprocess_betas(orig_glmsingle_path, *params)
591
+ vox = resample_betas(orig_glmsingle_path, sub, session, task_name, vox, glmsingle_path, glm_save_path_resampled, ref_name, omat_name)
592
+ needs_postprocessing = True
593
+
594
+ if vox is None:
595
+ # either resampling was done in glmsingle or we aren't resampling
596
+ vox = load_preprocess_betas(glmsingle_path, *params)
597
+
598
+ if needs_postprocessing == True:
599
+ vox = apply_mask(vox, avg_mask)
600
+ vox = vox.reshape(-1, vox.shape[-1]) # flatten the 3D image into np array with shape (voxels, images)
601
+ print(vox.shape)
602
+
603
+ assert len(vox) == len(image_idx)
604
+
605
+
606
+ # In[19]:
607
+
608
+
609
+ ses_mask = nib.load(f'/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/glmsingle_sub-005_{session_label}_task-C/sub-005_{session_label}_task-C_brain.nii.gz')
610
+ assert np.all(ses_mask.affine == final_mask.affine)
611
+ assert np.all(ses_mask.shape == final_mask.shape)
612
+
613
+
614
+ # In[20]:
615
+
616
+
617
+ # get vox into the same shape as the union mask
618
+ v = nilearn.masking.unmask(vox, ses_mask) # move back to 3D based on own session mask
619
+ # final_mask = nilearn.masking.intersect_masks([avg_mask, roi])
620
+ vox = nilearn.masking.apply_mask(v, final_mask) # re-flatten based on final mask so everything is in the same shape now
621
+ print(vox.shape)
622
+ vox = vox[:, union_mask]
623
+ print("applied union roi mask")
624
+ print(vox.shape)
625
+
626
+
627
+ # ## Reliability calculation
628
+
629
+ # ### Calculate reliability (corr between first and second presentation of same image) for every voxel
630
+
631
+ # In[21]:
632
+
633
+
634
+ pairs_homog = np.array([[p[0], p[1]] for p in pairs])
635
+
636
+
637
+ # In[22]:
638
+
639
+
640
+ same_corrs = []
641
+ diff_corrs = []
642
+ for isamp, samp in enumerate(vox[pairs_homog]):
643
+ avg_same_img = []
644
+ for i in range(samp.shape[0]):
645
+ for j in range(i, samp.shape[0]):
646
+ if i != j:
647
+ avg_same_img.append(np.array([np.corrcoef(samp[i, :], samp[j, :])[0,1]]))
648
+
649
+ same_corrs.append(np.mean(avg_same_img))
650
+
651
+ avg_diff_img = []
652
+ for isamp_j, samp_j in enumerate(vox[pairs_homog]):
653
+ if isamp_j != isamp:
654
+ for i in range(samp_j.shape[0]):
655
+ for j in range(i, samp_j.shape[0]):
656
+ if i != j:
657
+ avg_diff_img.append(np.array([np.corrcoef(samp[i, :], samp_j[j, :])[0,1]]))
658
+
659
+ # print(len(avg_diff_img))
660
+ diff_corrs.append(np.mean(avg_diff_img))
661
+
662
+
663
+ print(len(same_corrs), len(diff_corrs))
664
+ same_corrs = np.array(same_corrs)
665
+ diff_corrs = np.array(diff_corrs)
666
+
667
+
668
+ plt.figure(figsize=(5,4))
669
+ plt.title(f"{sub}_{session} same/diff Pearson corr.")
670
+ plt.plot(np.sort(same_corrs),c='blue',label='same')
671
+ plt.plot(np.sort(diff_corrs),c='cyan',label='diff')
672
+ plt.axhline(0,c='k',ls='--')
673
+ plt.legend()
674
+ plt.xlabel("sample")
675
+ plt.ylabel("Pearson R")
676
+ plt.show()
677
+
678
+
679
+ # In[23]:
680
+
681
+
682
+ vox_pairs = utils.zscore(vox[pairs_homog])
683
+ plt.figure(figsize=(5,4))
684
+ plt.title(f"{sub}_{session} same minus diff difference Pearson corr.")
685
+ plt.plot(np.sort(same_corrs) - np.sort(diff_corrs),c='cyan',label='difference')
686
+ plt.axhline(0,c='k',ls='--')
687
+ plt.legend()
688
+ plt.xlabel("sample")
689
+ plt.ylabel("Pearson R")
690
+ plt.show()
691
+
692
+
693
+ # # Training MindEye
694
+
695
+ # In[24]:
696
+
697
+
698
+ utils.seed_everything(seed)
699
+
700
+ if train_test_split == 'orig':
701
+ # train = all images except images that were repeated
702
+ # test = average of the same-image presentations
703
+ imageTrain = np.arange(len(images))
704
+ train_image_indices = np.array([item for item in imageTrain if item not in pairs.flatten()])
705
+ test_image_indices = pairs
706
+ print(len(train_image_indices), len(test_image_indices))
707
+ assert len(train_image_indices) + len(test_image_indices) == len(image_idx)
708
+ elif train_test_split == 'MST':
709
+ # non-MST images are the train split
710
+ # MST images are the test split
711
+ MST_idx = np.array([v for k,v in image_to_indices.items() if 'MST_pairs' in k])
712
+ non_MST_idx = [v for k,v in image_to_indices.items() if 'MST_pairs' not in k]
713
+ non_MST_idx = np.array([z for y in non_MST_idx for x in y for z in x]) # flatten the indices
714
+ train_image_indices = non_MST_idx
715
+ test_image_indices = MST_idx.flatten() # MST_idx contains the mapping for the different test sets; test_image_indices has all MST indices combined
716
+ print(len(train_image_indices), len(test_image_indices))
717
+ assert len(train_image_indices) + len(test_image_indices) == len(vox)
718
+ elif train_test_split == 'unique':
719
+ imageTest = np.arange(len(images))
720
+ train_image_indices = pairs.flatten()
721
+ test_image_indices = np.array([item for item in imageTest if item not in pairs.flatten()])
722
+ print(len(train_image_indices), len(test_image_indices))
723
+ assert len(train_image_indices) + len(test_image_indices) == len(image_idx)
724
+ else:
725
+ raise Exception("invalid train_test_split")
726
+
727
+ # TODO add assertion that verifies file names in train and test don't overlap, guards against repeats
728
+
729
+ for i in train_image_indices:
730
+ assert i not in test_image_indices
731
+
732
+
733
+ # In[25]:
734
+
735
+
736
+ train_mean = np.mean(vox[train_image_indices],axis=0)
737
+ train_std = np.std(vox[train_image_indices],axis=0)
738
+
739
+ vox = utils.zscore(vox,train_mean=train_mean,train_std=train_std)
740
+ print("voxels have been zscored")
741
+ print(vox[:,0].mean(), vox[:,0].std())
742
+ print("vox", vox.shape)
743
+
744
+
745
+ # In[26]:
746
+
747
+
748
+ # for idx in deleted_indices:
749
+ # # check image names to be deleted match
750
+ # original_name = vox_image_dict[idx]
751
+ # matching_indices = [i for i in deleted_indices if vox_image_dict[i] == original_name]
752
+ # assert all(vox_image_dict[i] == original_name for i in matching_indices), \
753
+ # f"Mismatch in image names for deleted indices {matching_indices}"
754
+
755
+ # # check image data to be deleted match
756
+ # base_image = images[matching_indices[0]] # Reference image
757
+ # for i in matching_indices[1:]:
758
+ # assert np.array_equal(base_image, images[i]), \
759
+ # f"Mismatch in image data for {vox_image_dict[i]} at index {i}"
760
+
761
+ # images = images[kept_indices]
762
+
763
+
764
+ # In[27]:
765
+
766
+
767
+ images = torch.Tensor(images)
768
+ vox = torch.Tensor(vox)
769
+ assert len(images) == len(vox)
770
+
771
+
772
+ # In[28]:
773
+
774
+
775
+ ### Multi-GPU config ###
776
+ from accelerate import Accelerator, DeepSpeedPlugin
777
+
778
+ local_rank = os.getenv('RANK')
779
+ if local_rank is None:
780
+ local_rank = 0
781
+ else:
782
+ local_rank = int(local_rank)
783
+ print("LOCAL RANK ", local_rank)
784
+
785
+ data_type = torch.float32 # change depending on your mixed_precision
786
+
787
+ accelerator = Accelerator(split_batches=False)
788
+ batch_size = 8
789
+
790
+
791
+ # In[29]:
792
+
793
+
794
+ print("PID of this process =",os.getpid())
795
+ device = accelerator.device
796
+ print("device:",device)
797
+ world_size = accelerator.state.num_processes
798
+ distributed = not accelerator.state.distributed_type == 'NO'
799
+ num_devices = torch.cuda.device_count()
800
+ global_batch_size = batch_size * num_devices
801
+ print("global_batch_size", global_batch_size)
802
+ if num_devices==0 or not distributed: num_devices = 1
803
+ num_workers = num_devices
804
+ print(accelerator.state)
805
+
806
+ # set data_type to match your mixed precision (automatically set based on deepspeed config)
807
+ if accelerator.mixed_precision == "bf16":
808
+ data_type = torch.bfloat16
809
+ elif accelerator.mixed_precision == "fp16":
810
+ data_type = torch.float16
811
+ else:
812
+ data_type = torch.float32
813
+
814
+ print("distributed =",distributed, "num_devices =", num_devices, "local rank =", local_rank, "world size =", world_size, "data_type =", data_type)
815
+ print = accelerator.print # only print if local_rank=0
816
+
817
+
818
+ # ## Configurations
819
+
820
+ # In[30]:
821
+
822
+
823
+ # if running this interactively, can specify jupyter_args here for argparser to use
824
+ if utils.is_interactive():
825
+ model_name = 'testing_MST' # 'sub-001_multi_bs24_MST_rishab_MSTsplit_remove_150_random_seed_0'
826
+ print("model_name:", model_name)
827
+
828
+ # global_batch_size and batch_size should already be defined in the above cells
829
+ # other variables can be specified in the following string:
830
+ # jupyter_args = f"--data_path=/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2 --model_name={model_name}"
831
+
832
+ jupyter_args = f"--data_path=/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2 \
833
+ --model_name={model_name} \
834
+ --no-multi_subject --subj=1 --batch_size={batch_size} \
835
+ --hidden_dim=1024 --clip_scale=1. \
836
+ --no-blurry_recon --blur_scale=.5 \
837
+ --no-use_prior --prior_scale=30 \
838
+ --n_blocks=4 --max_lr=3e-4 --mixup_pct=.33 --num_epochs=30 --no-use_image_aug \
839
+ --ckpt_interval=999 --no-ckpt_saving --new_test \
840
+ --multisubject_ckpt=/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/train_logs/multisubject_subj01_1024hid_nolow_300ep"
841
+ print(jupyter_args)
842
+ jupyter_args = jupyter_args.split()
843
+
844
+
845
+ # In[31]:
846
+
847
+
848
+ parser = argparse.ArgumentParser(description="Model Training Configuration")
849
+ parser.add_argument(
850
+ "--model_name", type=str, default="testing",
851
+ help="name of model, used for ckpt saving and wandb logging (if enabled)",
852
+ )
853
+ parser.add_argument(
854
+ "--data_path", type=str, default="/weka/proj-fmri/shared/natural-scenes-dataset",
855
+ help="Path to where NSD data is stored / where to download it to",
856
+ )
857
+ parser.add_argument(
858
+ "--subj",type=int, default=1, choices=[1,2,3,4,5,6,7,8],
859
+ help="Validate on which subject?",
860
+ )
861
+ parser.add_argument(
862
+ "--multisubject_ckpt", type=str, default=None,
863
+ help="Path to pre-trained multisubject model to finetune a single subject from. multisubject must be False.",
864
+ )
865
+ parser.add_argument(
866
+ "--num_sessions", type=int, default=0,
867
+ help="Number of training sessions to include (if multi_subject, this variable doesnt matter)",
868
+ )
869
+ parser.add_argument(
870
+ "--use_prior",action=argparse.BooleanOptionalAction,default=False,
871
+ help="whether to train diffusion prior (True) or just rely on retrieval part of the pipeline (False)",
872
+ )
873
+ parser.add_argument(
874
+ "--batch_size", type=int, default=32,
875
+ help="Batch size can be increased by 10x if only training v2c and not diffusion diffuser",
876
+ )
877
+ parser.add_argument(
878
+ "--wandb_log",action=argparse.BooleanOptionalAction,default=False,
879
+ help="whether to log to wandb",
880
+ )
881
+ parser.add_argument(
882
+ "--resume_from_ckpt",action=argparse.BooleanOptionalAction,default=False,
883
+ help="if not using wandb and want to resume from a ckpt",
884
+ )
885
+ parser.add_argument(
886
+ "--wandb_project",type=str,default="stability",
887
+ help="wandb project name",
888
+ )
889
+ parser.add_argument(
890
+ "--mixup_pct",type=float,default=.33,
891
+ help="proportion of way through training when to switch from BiMixCo to SoftCLIP",
892
+ )
893
+ parser.add_argument(
894
+ "--low_mem",action=argparse.BooleanOptionalAction,default=False,
895
+ help="whether to preload images to cpu to speed things up but consume more memory",
896
+ )
897
+ parser.add_argument(
898
+ "--blurry_recon",action=argparse.BooleanOptionalAction,default=True,
899
+ help="whether to output blurry reconstructions",
900
+ )
901
+ parser.add_argument(
902
+ "--blur_scale",type=float,default=.5,
903
+ help="multiply loss from blurry recons by this number",
904
+ )
905
+ parser.add_argument(
906
+ "--clip_scale",type=float,default=1.,
907
+ help="multiply contrastive loss by this number",
908
+ )
909
+ parser.add_argument(
910
+ "--prior_scale",type=float,default=30,
911
+ help="multiply diffusion prior loss by this",
912
+ )
913
+ parser.add_argument(
914
+ "--use_image_aug",action=argparse.BooleanOptionalAction,default=True,
915
+ help="whether to use image augmentation",
916
+ )
917
+ parser.add_argument(
918
+ "--num_epochs",type=int,default=120,
919
+ help="number of epochs of training",
920
+ )
921
+ parser.add_argument(
922
+ "--multi_subject",action=argparse.BooleanOptionalAction,default=False,
923
+ )
924
+ parser.add_argument(
925
+ "--new_test",action=argparse.BooleanOptionalAction,default=True,
926
+ )
927
+ parser.add_argument(
928
+ "--n_blocks",type=int,default=2,
929
+ )
930
+ parser.add_argument(
931
+ "--hidden_dim",type=int,default=1024,
932
+ )
933
+ parser.add_argument(
934
+ "--seq_past",type=int,default=0,
935
+ )
936
+ parser.add_argument(
937
+ "--seq_future",type=int,default=0,
938
+ )
939
+ parser.add_argument(
940
+ "--lr_scheduler_type",type=str,default='cycle',choices=['cycle','linear'],
941
+ )
942
+ parser.add_argument(
943
+ "--ckpt_saving",action=argparse.BooleanOptionalAction,default=True,
944
+ )
945
+ parser.add_argument(
946
+ "--ckpt_interval",type=int,default=5,
947
+ help="save backup ckpt and reconstruct every x epochs",
948
+ )
949
+ parser.add_argument(
950
+ "--seed",type=int,default=42,
951
+ )
952
+ parser.add_argument(
953
+ "--max_lr",type=float,default=3e-4,
954
+ )
955
+
956
+ if utils.is_interactive():
957
+ args = parser.parse_args(jupyter_args)
958
+ else:
959
+ args = parser.parse_args()
960
+
961
+ # create global variables without the args prefix
962
+ for attribute_name in vars(args).keys():
963
+ globals()[attribute_name] = getattr(args, attribute_name)
964
+
965
+ outdir = os.path.abspath(f'/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/train_logs/{model_name}')
966
+ if not os.path.exists(outdir) and ckpt_saving:
967
+ os.makedirs(outdir,exist_ok=True)
968
+
969
+ if use_image_aug or blurry_recon:
970
+ import kornia
971
+ import kornia.augmentation as K
972
+ from kornia.augmentation.container import AugmentationSequential
973
+ if use_image_aug:
974
+ img_augment = AugmentationSequential(
975
+ kornia.augmentation.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.1, p=0.3),
976
+ same_on_batch=False,
977
+ data_keys=["input"],
978
+ )
979
+ # Define the blurring augmentations
980
+ blur_augment = K.RandomGaussianBlur(kernel_size=(21, 21), sigma=(51.0, 51.0), p=1.)
981
+
982
+ if multi_subject:
983
+ subj_list = np.arange(1,9)
984
+ subj_list = subj_list[subj_list != subj]
985
+ else:
986
+ subj_list = [subj]
987
+
988
+ print("subj_list", subj_list, "num_sessions", num_sessions)
989
+
990
+
991
+ # ## Prep data, models, and dataloaders
992
+
993
+ # In[32]:
994
+
995
+
996
+ if ckpt_saving:
997
+ # save MST_ID for 2-alternative forced-choice retrieval evaluation
998
+ if 'MST' in model_name:
999
+ eval_dir = os.environ["eval_dir"]
1000
+ print('saving MST info in', eval_dir)
1001
+ # Saving ##
1002
+ if not os.path.exists(eval_dir):
1003
+ os.mkdir(eval_dir)
1004
+
1005
+ np.save(f"{eval_dir}/MST_ID.npy", MST_ID)
1006
+ np.save(f"{eval_dir}/MST_pairmate_indices.npy", MST_pairmate_indices)
1007
+
1008
+ if remove_random_n:
1009
+ np.save(f"{eval_dir}/imgs_to_remove.npy", imgs_to_remove)
1010
+
1011
+ np.save(f"{eval_dir}/train_image_indices.npy", train_image_indices)
1012
+ np.save(f"{eval_dir}/test_image_indices.npy", test_image_indices)
1013
+ np.save(f"{eval_dir}/images.npy", images)
1014
+ np.save(f"{eval_dir}/vox.npy", vox)
1015
+
1016
+
1017
+ # ### Creating wds dataloader, preload betas and all 73k possible images
1018
+
1019
+ # In[33]:
1020
+
1021
+
1022
+ def my_split_by_node(urls): return urls
1023
+ num_voxels_list = []
1024
+
1025
+ if multi_subject:
1026
+ nsessions_allsubj=np.array([40, 40, 32, 30, 40, 32, 40, 30])
1027
+ num_samples_per_epoch = (750*40) // num_devices
1028
+ else:
1029
+ # num_samples_per_epoch = (750*num_sessions) // num_devices
1030
+ num_samples_per_epoch = len(train_image_indices)
1031
+
1032
+ print("dividing batch size by subj_list, which will then be concatenated across subj during training...")
1033
+ batch_size = batch_size // len(subj_list)
1034
+
1035
+ num_iterations_per_epoch = num_samples_per_epoch // (batch_size*len(subj_list))
1036
+
1037
+ print("batch_size =", batch_size, "num_iterations_per_epoch =",num_iterations_per_epoch, "num_samples_per_epoch =",num_samples_per_epoch)
1038
+
1039
+
1040
+ # In[34]:
1041
+
1042
+
1043
+ train_data = {}
1044
+ train_dl = {}
1045
+
1046
+ train_data[f'subj0{subj}'] = torch.utils.data.TensorDataset(torch.tensor(train_image_indices))
1047
+ test_data = torch.utils.data.TensorDataset(torch.tensor(test_image_indices))
1048
+
1049
+
1050
+ # In[35]:
1051
+
1052
+
1053
+ num_voxels = {}
1054
+ voxels = {}
1055
+ for s in subj_list:
1056
+ print(f"Training with {num_sessions} sessions")
1057
+ train_dl = torch.utils.data.DataLoader(train_data[f'subj0{s}'], batch_size=batch_size, shuffle=True, drop_last=True, pin_memory=True)
1058
+
1059
+ num_voxels_list.append(vox[0].shape[-1])
1060
+ num_voxels[f'subj0{s}'] = vox[0].shape[-1]
1061
+ voxels[f'subj0{s}'] = vox
1062
+ print(f"num_voxels for subj0{s}: {num_voxels[f'subj0{s}']}")
1063
+
1064
+ print("Loaded all subj train dls and vox!\n")
1065
+
1066
+ # Validate only on one subject
1067
+ if multi_subject:
1068
+ subj = subj_list[0] # cant validate on the actual held out person so picking first in subj_list
1069
+ test_dl = torch.utils.data.DataLoader(test_data, batch_size=24, shuffle=False, drop_last=True, pin_memory=True)
1070
+
1071
+ print(f"Loaded test dl for subj{subj}!\n")
1072
+
1073
+
1074
+ # ## Load models
1075
+
1076
+ # ### CLIP image embeddings model
1077
+
1078
+ # In[36]:
1079
+
1080
+
1081
+ ## USING OpenCLIP ViT-bigG ###
1082
+ sys.path.append('generative_models/')
1083
+ import sgm
1084
+ from generative_models.sgm.modules.encoders.modules import FrozenOpenCLIPImageEmbedder
1085
+ # from generative_models.sgm.models.diffusion import DiffusionEngine
1086
+ # from omegaconf import OmegaConf
1087
+
1088
+ try:
1089
+ print(clip_img_embedder)
1090
+ except:
1091
+ clip_img_embedder = FrozenOpenCLIPImageEmbedder(
1092
+ arch="ViT-bigG-14",
1093
+ version="laion2b_s39b_b160k",
1094
+ output_tokens=True,
1095
+ only_tokens=True,
1096
+ )
1097
+ clip_img_embedder.to(device)
1098
+ clip_seq_dim = 256
1099
+ clip_emb_dim = 1664
1100
+
1101
+ # ## USING OPEN AI CLIP ViT-L ###
1102
+ # import clip
1103
+ # try:
1104
+ # print(clip_model)
1105
+ # except:
1106
+ # clip_model, preprocess = clip.load("ViT-L/14", device=device)
1107
+ # preprocess = transforms.Compose([
1108
+ # transforms.Resize(224, interpolation=transforms.InterpolationMode.BILINEAR),
1109
+ # transforms.Normalize(mean=[0.48145466, 0.4578275, 0.40821073],
1110
+ # std=[0.26862954, 0.26130258, 0.27577711]),
1111
+ # ])
1112
+ # def clip_img_embedder(image):
1113
+ # preproc_img = preprocess(image)
1114
+ # return clip_model.encode_image(preproc_img)
1115
+ # clip_seq_dim = 1
1116
+ # clip_emb_dim = 768
1117
+
1118
+
1119
+ # ### MindEye modules
1120
+
1121
+ # In[37]:
1122
+
1123
+
1124
+ model = utils.prepare_model_and_training(
1125
+ num_voxels_list=num_voxels_list,
1126
+ n_blocks=n_blocks,
1127
+ hidden_dim=hidden_dim,
1128
+ clip_emb_dim=clip_emb_dim,
1129
+ clip_seq_dim=clip_seq_dim,
1130
+ use_prior=use_prior,
1131
+ clip_scale=clip_scale
1132
+ )
1133
+
1134
+
1135
+ # In[38]:
1136
+
1137
+
1138
+ # test on subject 1 with fake data
1139
+ b = torch.randn((2,1,num_voxels_list[0]))
1140
+ print(b.shape, model.ridge(b,0).shape)
1141
+
1142
+
1143
+ # In[39]:
1144
+
1145
+
1146
+ # test that the model works on some fake data
1147
+ b = torch.randn((2,1,hidden_dim))
1148
+ print("b.shape",b.shape)
1149
+
1150
+ backbone_, clip_, blur_ = model.backbone(b)
1151
+ print(backbone_.shape, clip_.shape, blur_[0].shape, blur_[1].shape)
1152
+
1153
+
1154
+ # ### Adding diffusion prior + unCLIP if use_prior=True
1155
+
1156
+ # In[40]:
1157
+
1158
+
1159
+ if use_prior:
1160
+ from models import *
1161
+
1162
+ # setup diffusion prior network
1163
+ out_dim = clip_emb_dim
1164
+ depth = 6
1165
+ dim_head = 52
1166
+ heads = clip_emb_dim//52 # heads * dim_head = clip_emb_dim
1167
+ timesteps = 100
1168
+
1169
+ prior_network = VersatileDiffusionPriorNetwork(
1170
+ dim=out_dim,
1171
+ depth=depth,
1172
+ dim_head=dim_head,
1173
+ heads=heads,
1174
+ causal=False,
1175
+ num_tokens = clip_seq_dim,
1176
+ learned_query_mode="pos_emb"
1177
+ )
1178
+
1179
+ model.diffusion_prior = BrainDiffusionPrior(
1180
+ net=prior_network,
1181
+ image_embed_dim=out_dim,
1182
+ condition_on_text_encodings=False,
1183
+ timesteps=timesteps,
1184
+ cond_drop_prob=0.2,
1185
+ image_embed_scale=None,
1186
+ )
1187
+
1188
+ utils.count_params(model.diffusion_prior)
1189
+ utils.count_params(model)
1190
+
1191
+
1192
+ # ### Setup optimizer / lr / ckpt saving
1193
+
1194
+ # In[41]:
1195
+
1196
+
1197
+ no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
1198
+
1199
+ opt_grouped_parameters = [
1200
+ {'params': [p for n, p in model.ridge.named_parameters()], 'weight_decay': 1e-2},
1201
+ {'params': [p for n, p in model.backbone.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': 1e-2},
1202
+ {'params': [p for n, p in model.backbone.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0},
1203
+ ]
1204
+ # model.backbone.requires_grad_(False)
1205
+
1206
+ if use_prior:
1207
+ opt_grouped_parameters.extend([
1208
+ {'params': [p for n, p in model.diffusion_prior.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': 1e-2},
1209
+ {'params': [p for n, p in model.diffusion_prior.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
1210
+ ])
1211
+
1212
+ optimizer = torch.optim.AdamW(opt_grouped_parameters, lr=max_lr)
1213
+
1214
+ if lr_scheduler_type == 'linear':
1215
+ lr_scheduler = torch.optim.lr_scheduler.LinearLR(
1216
+ optimizer,
1217
+ total_iters=int(np.floor(num_epochs*num_iterations_per_epoch)),
1218
+ last_epoch=-1
1219
+ )
1220
+ elif lr_scheduler_type == 'cycle':
1221
+ if num_iterations_per_epoch==0:
1222
+ num_iterations_per_epoch=1
1223
+ total_steps=int(np.floor(num_epochs*num_iterations_per_epoch))
1224
+ print("total_steps", total_steps)
1225
+ lr_scheduler = torch.optim.lr_scheduler.OneCycleLR(
1226
+ optimizer,
1227
+ max_lr=max_lr,
1228
+ total_steps=total_steps,
1229
+ final_div_factor=1000,
1230
+ last_epoch=-1, pct_start=2/num_epochs
1231
+ )
1232
+
1233
+ def save_ckpt(tag):
1234
+ ckpt_path = outdir+f'/{tag}.pth'
1235
+ if accelerator.is_main_process:
1236
+ unwrapped_model = accelerator.unwrap_model(model)
1237
+ torch.save({
1238
+ 'epoch': epoch,
1239
+ 'model_state_dict': unwrapped_model.state_dict(),
1240
+ 'optimizer_state_dict': optimizer.state_dict(),
1241
+ 'lr_scheduler': lr_scheduler.state_dict(),
1242
+ 'train_losses': losses,
1243
+ 'test_losses': test_losses,
1244
+ 'lrs': lrs,
1245
+ }, ckpt_path)
1246
+ print(f"\n---saved {outdir}/{tag} ckpt!---\n")
1247
+
1248
+ def load_ckpt(tag,load_lr=True,load_optimizer=True,load_epoch=True,strict=True,outdir=outdir,multisubj_loading=False):
1249
+ print(f"\n---loading {outdir}/{tag}.pth ckpt---\n")
1250
+ checkpoint = torch.load(outdir+'/last.pth', map_location='cpu')
1251
+ state_dict = checkpoint['model_state_dict']
1252
+ if multisubj_loading: # remove incompatible ridge layer that will otherwise error
1253
+ state_dict.pop('ridge.linears.0.weight',None)
1254
+ model.load_state_dict(state_dict, strict=strict)
1255
+ if load_epoch:
1256
+ globals()["epoch"] = checkpoint['epoch']
1257
+ print("Epoch",epoch)
1258
+ if load_optimizer:
1259
+ optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
1260
+ if load_lr:
1261
+ lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
1262
+ del checkpoint
1263
+
1264
+ print("\nDone with model preparations!")
1265
+ num_params = utils.count_params(model)
1266
+
1267
+
1268
+ # # Wandb
1269
+
1270
+ # In[42]:
1271
+
1272
+
1273
+ if local_rank==0 and wandb_log: # only use main process for wandb logging
1274
+ import wandb
1275
+ import time
1276
+
1277
+ wandb_project = 'rtmindeye'
1278
+ print(f"wandb {wandb_project} run {model_name}")
1279
+
1280
+ # Need to configure wandb beforehand in terminal with "wandb init"!
1281
+ wandb_config = {
1282
+ "model_name": model_name,
1283
+ "global_batch_size": global_batch_size,
1284
+ "batch_size": batch_size,
1285
+ "num_epochs": num_epochs,
1286
+ "num_sessions": num_sessions,
1287
+ "num_params": num_params,
1288
+ "clip_scale": clip_scale,
1289
+ "prior_scale": prior_scale,
1290
+ "blur_scale": blur_scale,
1291
+ "use_image_aug": use_image_aug,
1292
+ "max_lr": max_lr,
1293
+ "mixup_pct": mixup_pct,
1294
+ "num_samples_per_epoch": num_samples_per_epoch,
1295
+ "ckpt_interval": ckpt_interval,
1296
+ "ckpt_saving": ckpt_saving,
1297
+ "seed": seed, # SLURM array task ID
1298
+ "distributed": distributed,
1299
+ "num_devices": num_devices,
1300
+ "world_size": world_size,
1301
+ }
1302
+ print("wandb_config:\n", wandb_config)
1303
+ print("wandb_id:", model_name)
1304
+
1305
+ # Initialize wandb
1306
+ wandb.init(
1307
+ id=model_name,
1308
+ project=wandb_project,
1309
+ name=model_name,
1310
+ config=wandb_config,
1311
+ resume="allow",
1312
+ save_code=True,
1313
+ )
1314
+
1315
+ # Get SLURM job & array ID
1316
+ slurm_job_id = utils.get_slurm_job()
1317
+ slurm_array_id = seed # seed corresponds to SLURM_ARRAY_TASK_ID
1318
+
1319
+ # Define SLURM log paths
1320
+ log_dir = "slurms"
1321
+ log_files = [
1322
+ f"{log_dir}/{slurm_job_id}_{slurm_array_id}.out",
1323
+ f"{log_dir}/{slurm_job_id}_{slurm_array_id}.err",
1324
+ ]
1325
+
1326
+ # Ensure logs exist before logging them
1327
+ for log_file in log_files:
1328
+ wait_time = 0
1329
+ while not os.path.exists(log_file) and wait_time < 60: # Wait max 60s
1330
+ time.sleep(5)
1331
+ wait_time += 5
1332
+
1333
+ # Log SLURM logs as artifacts
1334
+ artifact = wandb.Artifact(f"slurm_logs_{slurm_job_id}_{slurm_array_id}", type="logs")
1335
+ for log_file in log_files:
1336
+ if os.path.exists(log_file):
1337
+ artifact.add_file(log_file)
1338
+
1339
+ wandb.log_artifact(artifact)
1340
+ else:
1341
+ wandb_log = False
1342
+
1343
+
1344
+ # # Train the model
1345
+
1346
+ # In[43]:
1347
+
1348
+
1349
+ epoch = 0
1350
+ losses, test_losses, lrs = [], [], []
1351
+ best_test_loss = 1e9
1352
+ torch.cuda.empty_cache()
1353
+
1354
+
1355
+ # In[44]:
1356
+
1357
+
1358
+ # load multisubject stage1 ckpt if set
1359
+ if multisubject_ckpt is not None and not resume_from_ckpt:
1360
+ load_ckpt("last",outdir=multisubject_ckpt,load_lr=False,load_optimizer=False,load_epoch=False,strict=False,multisubj_loading=True)
1361
+
1362
+
1363
+ # In[45]:
1364
+
1365
+
1366
+ # checkpoint = torch.load(multisubject_ckpt+'/last.pth', map_location='cpu')
1367
+ # state_dict = checkpoint['model_state_dict']
1368
+ # model.load_state_dict(state_dict, strict=False)
1369
+
1370
+
1371
+ # In[46]:
1372
+
1373
+
1374
+ # train_dls = [train_dl[f'subj0{s}'] for s in subj_list]
1375
+
1376
+ model, optimizer, train_dl, lr_scheduler = accelerator.prepare(model, optimizer, train_dl, lr_scheduler)
1377
+ # leaving out test_dl since we will only have local_rank 0 device do evals
1378
+
1379
+
1380
+ # In[56]:
1381
+
1382
+
1383
+ print(f"{model_name} starting with epoch {epoch} / {num_epochs}")
1384
+ progress_bar = tqdm(range(epoch,num_epochs), ncols=1200, disable=(local_rank!=0))
1385
+ test_image, test_voxel = None, None
1386
+ mse = nn.MSELoss()
1387
+ l1 = nn.L1Loss()
1388
+ soft_loss_temps = utils.cosine_anneal(0.004, 0.0075, num_epochs - int(mixup_pct * num_epochs))
1389
+ skip_train = True if epoch>=(num_epochs-1) else False # skip training if you are resuming from a fully trained model
1390
+
1391
+ for epoch in progress_bar:
1392
+ model.train()
1393
+
1394
+ fwd_percent_correct = 0.
1395
+ bwd_percent_correct = 0.
1396
+ test_fwd_percent_correct = 0.
1397
+ test_bwd_percent_correct = 0.
1398
+
1399
+ recon_cossim = 0.
1400
+ test_recon_cossim = 0.
1401
+ recon_mse = 0.
1402
+ test_recon_mse = 0.
1403
+
1404
+ loss_clip_total = 0.
1405
+ loss_blurry_total = 0.
1406
+ loss_blurry_cont_total = 0.
1407
+ test_loss_clip_total = 0.
1408
+
1409
+ loss_prior_total = 0.
1410
+ test_loss_prior_total = 0.
1411
+
1412
+ blurry_pixcorr = 0.
1413
+ test_blurry_pixcorr = 0.
1414
+
1415
+ # you now have voxel_iters and image_iters with num_iterations_per_epoch batches each
1416
+ for train_i, behav in enumerate(train_dl):
1417
+ with torch.cuda.amp.autocast(dtype=data_type):
1418
+ optimizer.zero_grad()
1419
+ loss = 0.
1420
+
1421
+ behav = behav[0]
1422
+
1423
+ image = images[behav.long().cpu()].to(device)
1424
+ voxel = vox[behav.long().cpu()]
1425
+ # voxel = (voxel - train_mean) / train_std
1426
+ voxel = torch.Tensor(voxel).unsqueeze(1).to(device)
1427
+
1428
+ if use_image_aug:
1429
+ image = img_augment(image)
1430
+
1431
+ clip_target = clip_img_embedder(image)
1432
+ assert not torch.any(torch.isnan(clip_target))
1433
+
1434
+ if epoch < int(mixup_pct * num_epochs):
1435
+ voxel, perm, betas, select = utils.mixco(voxel)
1436
+
1437
+ voxel_ridge = model.ridge(voxel,0) #[model.ridge(voxel_list[si],si) for si,s in enumerate(subj_list)]
1438
+ # voxel_ridge = torch.cat(voxel_ridge_list, dim=0)
1439
+
1440
+ backbone, clip_voxels, blurry_image_enc_ = model.backbone(voxel_ridge)
1441
+
1442
+ if clip_scale>0:
1443
+ clip_voxels_norm = nn.functional.normalize(clip_voxels.flatten(1), dim=-1)
1444
+ clip_target_norm = nn.functional.normalize(clip_target.flatten(1), dim=-1)
1445
+
1446
+ if use_prior:
1447
+ loss_prior, prior_out = model.diffusion_prior(text_embed=backbone, image_embed=clip_target)
1448
+ loss_prior_total += loss_prior.item()
1449
+ loss_prior *= prior_scale
1450
+ loss += loss_prior
1451
+
1452
+ recon_cossim += nn.functional.cosine_similarity(prior_out, clip_target).mean().item()
1453
+ recon_mse += mse(prior_out, clip_target).item()
1454
+
1455
+ if clip_scale>0:
1456
+ if epoch < int(mixup_pct * num_epochs):
1457
+ loss_clip = utils.mixco_nce(
1458
+ clip_voxels_norm,
1459
+ clip_target_norm,
1460
+ temp=.006,
1461
+ perm=perm, betas=betas, select=select)
1462
+ else:
1463
+ epoch_temp = soft_loss_temps[epoch-int(mixup_pct*num_epochs)]
1464
+ loss_clip = utils.soft_clip_loss(
1465
+ clip_voxels_norm,
1466
+ clip_target_norm,
1467
+ temp=epoch_temp)
1468
+
1469
+ loss_clip_total += loss_clip.item()
1470
+ loss_clip *= clip_scale
1471
+ loss += loss_clip
1472
+
1473
+ if blurry_recon:
1474
+ image_enc_pred, transformer_feats = blurry_image_enc_
1475
+
1476
+ image_enc = autoenc.encode(2*image-1).latent_dist.mode() * 0.18215
1477
+ loss_blurry = l1(image_enc_pred, image_enc)
1478
+ loss_blurry_total += loss_blurry.item()
1479
+
1480
+ if epoch < int(mixup_pct * num_epochs):
1481
+ image_enc_shuf = image_enc[perm]
1482
+ betas_shape = [-1] + [1]*(len(image_enc.shape)-1)
1483
+ image_enc[select] = image_enc[select] * betas[select].reshape(*betas_shape) + \
1484
+ image_enc_shuf[select] * (1 - betas[select]).reshape(*betas_shape)
1485
+
1486
+ image_norm = (image - mean)/std
1487
+ image_aug = (blur_augs(image) - mean)/std
1488
+ _, cnx_embeds = cnx(image_norm)
1489
+ _, cnx_aug_embeds = cnx(image_aug)
1490
+
1491
+ cont_loss = utils.soft_cont_loss(
1492
+ nn.functional.normalize(transformer_feats.reshape(-1, transformer_feats.shape[-1]), dim=-1),
1493
+ nn.functional.normalize(cnx_embeds.reshape(-1, cnx_embeds.shape[-1]), dim=-1),
1494
+ nn.functional.normalize(cnx_aug_embeds.reshape(-1, cnx_embeds.shape[-1]), dim=-1),
1495
+ temp=0.2)
1496
+ loss_blurry_cont_total += cont_loss.item()
1497
+
1498
+ loss += (loss_blurry + 0.1*cont_loss) * blur_scale #/.18215
1499
+
1500
+ if clip_scale>0:
1501
+ # forward and backward top 1 accuracy
1502
+ labels = torch.arange(len(clip_voxels_norm)).to(clip_voxels_norm.device)
1503
+ fwd_percent_correct += utils.topk(utils.batchwise_cosine_similarity(clip_voxels_norm, clip_target_norm), labels, k=1).item()
1504
+ bwd_percent_correct += utils.topk(utils.batchwise_cosine_similarity(clip_target_norm, clip_voxels_norm), labels, k=1).item()
1505
+
1506
+ if blurry_recon:
1507
+ with torch.no_grad():
1508
+ # only doing pixcorr eval on a subset of the samples per batch because its costly & slow to compute autoenc.decode()
1509
+ random_samps = np.random.choice(np.arange(len(image)), size=len(image)//5, replace=False)
1510
+ blurry_recon_images = (autoenc.decode(image_enc_pred[random_samps]/0.18215).sample/ 2 + 0.5).clamp(0,1)
1511
+ pixcorr = utils.pixcorr(image[random_samps], blurry_recon_images)
1512
+ blurry_pixcorr += pixcorr.item()
1513
+
1514
+ utils.check_loss(loss)
1515
+ accelerator.backward(loss)
1516
+ optimizer.step()
1517
+
1518
+ losses.append(loss.item())
1519
+ lrs.append(optimizer.param_groups[0]['lr'])
1520
+
1521
+ if lr_scheduler_type is not None:
1522
+ lr_scheduler.step()
1523
+
1524
+ if train_i >= num_iterations_per_epoch-1:
1525
+ break
1526
+
1527
+ model.eval()
1528
+ logs = {}
1529
+
1530
+ if local_rank == 0:
1531
+ with torch.no_grad(), torch.cuda.amp.autocast(dtype=data_type):
1532
+ for i in range(1):
1533
+ for j in range(2):
1534
+ subset_indices = MST_idx[:, i, j].reshape(-1)
1535
+ subset_dataset = torch.utils.data.TensorDataset(torch.tensor(subset_indices))
1536
+ subset_dl = torch.utils.data.DataLoader(
1537
+ subset_dataset, batch_size=24, shuffle=False,
1538
+ drop_last=True, pin_memory=True
1539
+ )
1540
+
1541
+ # Reset metrics for this subset
1542
+ test_losses = []
1543
+ test_loss_clip_total = 0
1544
+ test_loss_prior_total = 0
1545
+ test_blurry_pixcorr = 0
1546
+ test_fwd_percent_correct = 0
1547
+ test_bwd_percent_correct = 0
1548
+ test_recon_cossim = 0
1549
+ test_recon_mse = 0
1550
+
1551
+ for test_i, behav in enumerate(subset_dl):
1552
+ behav = behav[0]
1553
+ loss = 0.
1554
+
1555
+ if behav.ndim > 1:
1556
+ image = images[behav[:, 0].long().cpu()].to(device)
1557
+ voxel = vox[behav.long().cpu()].mean(1)
1558
+ else:
1559
+ image = images[behav.long().cpu()].to(device)
1560
+ voxel = vox[behav.long().cpu()]
1561
+
1562
+ voxel = torch.Tensor(voxel).unsqueeze(1).to(device)
1563
+
1564
+ clip_img_embedder = clip_img_embedder.to(device)
1565
+ clip_target = clip_img_embedder(image.float())
1566
+
1567
+ voxel_ridge = model.ridge(voxel, 0)
1568
+ backbone, clip_voxels, blurry_image_enc_ = model.backbone(voxel_ridge)
1569
+
1570
+ if clip_scale > 0:
1571
+ clip_voxels_norm = nn.functional.normalize(clip_voxels.flatten(1), dim=-1)
1572
+ clip_target_norm = nn.functional.normalize(clip_target.flatten(1), dim=-1)
1573
+
1574
+ random_samps = np.random.choice(np.arange(len(image)), size=len(image) // 5, replace=False)
1575
+
1576
+ if use_prior:
1577
+ loss_prior, contaminated_prior_out = model.diffusion_prior(
1578
+ text_embed=backbone[random_samps], image_embed=clip_target[random_samps])
1579
+ test_loss_prior_total += loss_prior.item()
1580
+ loss_prior *= prior_scale
1581
+ loss += loss_prior
1582
+
1583
+ if clip_scale > 0:
1584
+ loss_clip = utils.soft_clip_loss(
1585
+ clip_voxels_norm,
1586
+ clip_target_norm,
1587
+ temp=0.006
1588
+ )
1589
+ test_loss_clip_total += loss_clip.item()
1590
+ loss_clip *= clip_scale
1591
+ loss += loss_clip
1592
+
1593
+ if blurry_recon:
1594
+ image_enc_pred, _ = blurry_image_enc_
1595
+ blurry_recon_images = (autoenc.decode(image_enc_pred[random_samps] / 0.18215).sample / 2 + 0.5).clamp(0, 1)
1596
+ pixcorr = utils.pixcorr(image[random_samps], blurry_recon_images)
1597
+ test_blurry_pixcorr += pixcorr.item()
1598
+
1599
+ if clip_scale > 0:
1600
+ labels = torch.arange(len(clip_voxels_norm)).to(clip_voxels_norm.device)
1601
+ test_fwd_percent_correct += utils.topk(utils.batchwise_cosine_similarity(clip_voxels_norm, clip_target_norm), labels, k=1).item()
1602
+ test_bwd_percent_correct += utils.topk(utils.batchwise_cosine_similarity(clip_target_norm, clip_voxels_norm), labels, k=1).item()
1603
+
1604
+ utils.check_loss(loss)
1605
+ test_losses.append(loss.item())
1606
+
1607
+ logs.update({
1608
+ f"subset_{i}_{j}_test/loss": np.mean(test_losses),
1609
+ f"subset_{i}_{j}_test/loss_clip_total": test_loss_clip_total / (test_i + 1),
1610
+ f"subset_{i}_{j}_test/loss_prior": test_loss_prior_total / (test_i + 1),
1611
+ f"subset_{i}_{j}_test/blurry_pixcorr": test_blurry_pixcorr / (test_i + 1),
1612
+ f"subset_{i}_{j}_test/fwd_pct_correct": test_fwd_percent_correct / (test_i + 1),
1613
+ f"subset_{i}_{j}_test/bwd_pct_correct": test_bwd_percent_correct / (test_i + 1),
1614
+ })
1615
+ print(f"--- Subset ({i},{j}) ---")
1616
+ for k, v in logs.items():
1617
+ if f"subset_{i}_{j}" in k:
1618
+ print(f"{k}: {v:.4f}")
1619
+
1620
+ # After subset loop: add train (and global test, if you want) metrics
1621
+ logs.update({
1622
+ "train/loss": np.mean(losses[-(train_i+1):]),
1623
+ "train/lr": lrs[-1],
1624
+ "train/num_steps": len(losses),
1625
+ "train/fwd_pct_correct": fwd_percent_correct / (train_i + 1),
1626
+ "train/bwd_pct_correct": bwd_percent_correct / (train_i + 1),
1627
+ "train/loss_clip_total": loss_clip_total / (train_i + 1),
1628
+ "train/loss_blurry_total": loss_blurry_total / (train_i + 1),
1629
+ "train/loss_blurry_cont_total": loss_blurry_cont_total / (train_i + 1),
1630
+ "train/blurry_pixcorr": blurry_pixcorr / (train_i + 1),
1631
+ "train/recon_cossim": recon_cossim / (train_i + 1),
1632
+ "train/recon_mse": recon_mse / (train_i + 1),
1633
+ "train/loss_prior": loss_prior_total / (train_i + 1),
1634
+ })
1635
+
1636
+
1637
+ # if finished training, save jpg recons if they exist
1638
+ if (epoch == num_epochs-1) or (epoch % ckpt_interval == 0):
1639
+ if blurry_recon:
1640
+ image_enc = autoenc.encode(2*image[:4]-1).latent_dist.mode() * 0.18215
1641
+ # transform blurry recon latents to images and plot it
1642
+ fig, axes = plt.subplots(1, 8, figsize=(10, 4))
1643
+ jj=-1
1644
+ for j in [0,1,2,3]:
1645
+ jj+=1
1646
+ axes[jj].imshow(utils.torch_to_Image((autoenc.decode(image_enc[[j]]/0.18215).sample / 2 + 0.5).clamp(0,1)))
1647
+ axes[jj].axis('off')
1648
+ jj+=1
1649
+ axes[jj].imshow(utils.torch_to_Image((autoenc.decode(image_enc_pred[[j]]/0.18215).sample / 2 + 0.5).clamp(0,1)))
1650
+ axes[jj].axis('off')
1651
+ plt.show()
1652
+
1653
+ progress_bar.set_postfix(**logs)
1654
+
1655
+ if wandb_log: wandb.log(logs)
1656
+
1657
+ # Save model checkpoint and reconstruct
1658
+ if (ckpt_saving) and (epoch % ckpt_interval == 0):
1659
+ save_ckpt(f'last')
1660
+
1661
+ # wait for other GPUs to catch up if needed
1662
+ accelerator.wait_for_everyone()
1663
+ torch.cuda.empty_cache()
1664
+
1665
+ print("\n===Finished!===\n")
1666
+ if ckpt_saving:
1667
+ save_ckpt(f'last')
1668
+
1669
+
1670
+ # In[48]:
1671
+
1672
+
1673
+ len(test_data)
1674
+
1675
+
1676
+ # In[49]:
1677
+
1678
+
1679
+ # # Track metrics here:
1680
+ # https://docs.google.com/spreadsheets/d/1-dbmr4ovl2-4-MFNAL1DqLS651KM_ihjDkkUeP1kHXs/edit?gid=1494588999#gid=1494588999
1681
+
1682
+
1683
+ # **To tell if the model is working I'm looking at test_bwd/fwd_pct_correct and seeing if that is doing better than chance (1/batch_size)**
1684
+
1685
+ # In[50]:
1686
+
1687
+
1688
+ # MST_pairmate_names
1689
+
1690
+
1691
+ # In[51]:
1692
+
1693
+
1694
+ x = [im for im in image_names if str(im) not in ('blank.jpg', 'nan')]
1695
+ assert len(image_idx) == len(x)
1696
+ pairs = np.empty(shape=MST_pairmate_names.shape, dtype=int)
1697
+ for i, p in enumerate(MST_pairmate_names):
1698
+ assert p[0] != p[1] # no duplicate images
1699
+ pairs[i,0] = x.index(p[0])
1700
+ pairs[i,1] = x.index(p[1])
1701
+
1702
+ # print(pairs)
1703
+
1704
+
1705
+ # In[52]:
1706
+
1707
+
1708
+ # if sub=="sub-002":
1709
+ # unique_images_pairs = [
1710
+ # (2,3),(4,5),(7,8),(15,16),
1711
+ # (483, 484), (485, 486), (487, 488), (491, 492), (495, 496), (499, 500), (501, 502),
1712
+ # (503, 504), (512, 513),
1713
+ # ]
1714
+ # elif sub != 'sub-001' and session != 'ses-05':
1715
+ # unique_images_pairs = [
1716
+ # (1,2),(3,4),(5,6),(7,8),(9,10),(11,12),(13,14),(15,16),
1717
+ # (17,18),(19,20),(21,22),(23,24),(25,26),(27,28),(29,30),
1718
+ # (31,32),(33,34),(35,36),
1719
+ # (787, 788), (789, 790), (791, 792), (793, 794), (795, 796),
1720
+ # (797, 798), (799, 800), (801, 802), (803, 804), (805, 806),
1721
+ # (807, 808), (809, 810), (811, 812), (813, 814), (815, 816),
1722
+ # (817, 818), (819, 820), (821, 822), (823, 824), (825, 826),
1723
+ # (827, 828), (829, 830), (831, 832), (833, 834), (835, 836),
1724
+ # (837, 838), (839, 840), (841, 842), (843, 844), (845, 846),
1725
+ # (847, 848), (849, 850)
1726
+ # ]
1727
+ # else:
1728
+ # # unique_images = unique_images[unique_images!='blank.jpg'][:50]
1729
+ # unique_images_pairs = find_mst_pairs(x)
1730
+ # # unique_images[unique_images_pairs]
1731
+
1732
+
1733
+ # In[53]:
1734
+
1735
+
1736
+ def evaluate_mst_pairs(mst_pairs):
1737
+ score = 0
1738
+ total = 0
1739
+
1740
+ with torch.no_grad(), torch.cuda.amp.autocast(dtype=data_type):
1741
+ for pair in mst_pairs:
1742
+ voxel = vox[image_idx[pair[0]]].to(device)[None]
1743
+ voxel = torch.Tensor(voxel).unsqueeze(1).to(device)
1744
+
1745
+ imageA = images[image_idx[pair[0]]].to(device)[None]
1746
+ imageB = images[image_idx[pair[1]]].to(device)[None]
1747
+
1748
+ clip_targetA = clip_img_embedder(imageA.float())
1749
+ clip_targetB = clip_img_embedder(imageB.float())
1750
+
1751
+ voxel_ridge = model.ridge(voxel,0)
1752
+ backbone, clip_voxels, _ = model.backbone(voxel_ridge)
1753
+
1754
+ clip_voxels_norm = nn.functional.normalize(clip_voxels.flatten(1), dim=-1)
1755
+ clip_targetA_norm = nn.functional.normalize(clip_targetA.flatten(1), dim=-1)
1756
+ clip_targetB_norm = nn.functional.normalize(clip_targetB.flatten(1), dim=-1)
1757
+
1758
+ if utils.batchwise_cosine_similarity(clip_voxels_norm, clip_targetA_norm) > utils.batchwise_cosine_similarity(clip_voxels_norm, clip_targetB_norm):
1759
+ score += 1
1760
+ total += 1
1761
+
1762
+ voxel = vox[image_idx[pair[1]]].to(device)[None]
1763
+ voxel = torch.Tensor(voxel).unsqueeze(1).to(device)
1764
+
1765
+ voxel_ridge = model.ridge(voxel,0)
1766
+ backbone, clip_voxels, _ = model.backbone(voxel_ridge)
1767
+ clip_voxels_norm = nn.functional.normalize(clip_voxels.flatten(1), dim=-1)
1768
+
1769
+ if utils.batchwise_cosine_similarity(clip_voxels_norm, clip_targetB_norm) > utils.batchwise_cosine_similarity(clip_voxels_norm, clip_targetA_norm):
1770
+ score += 1
1771
+ total += 1
1772
+
1773
+ return score/total
1774
+
1775
+ print(evaluate_mst_pairs(pairs))
1776
+
1777
+
1778
+ # In[55]:
1779
+
1780
+
1781
+ model.eval()
1782
+ logs = {}
1783
+ if local_rank == 0:
1784
+ with torch.no_grad(), torch.cuda.amp.autocast(dtype=data_type):
1785
+ for i in range(1):
1786
+ for j in range(2):
1787
+ subset_indices = MST_idx[:, i, j].reshape(-1)
1788
+ subset_dataset = torch.utils.data.TensorDataset(torch.tensor(subset_indices))
1789
+ subset_dl = torch.utils.data.DataLoader(
1790
+ subset_dataset, batch_size=len(MST_idx), shuffle=False,
1791
+ drop_last=True, pin_memory=True
1792
+ )
1793
+
1794
+ # Reset metrics for this subset
1795
+ test_fwd_percent_correct = 0
1796
+ test_bwd_percent_correct = 0
1797
+
1798
+ for test_i, behav in enumerate(subset_dl):
1799
+ behav = behav[0]
1800
+ loss = 0.
1801
+ image = images[behav.long().cpu()].to(device)
1802
+ voxel = vox[behav.long().cpu()]
1803
+ voxel = torch.Tensor(voxel).unsqueeze(1).to(device)
1804
+ clip_img_embedder = clip_img_embedder.to(device)
1805
+ clip_target = clip_img_embedder(image.float())
1806
+
1807
+ voxel_ridge = model.ridge(voxel, 0)
1808
+ backbone, clip_voxels, blurry_image_enc_ = model.backbone(voxel_ridge)
1809
+
1810
+ clip_voxels_norm = torch.nn.functional.normalize(clip_voxels, dim=-1)
1811
+ clip_target_norm = torch.nn.functional.normalize(clip_target, dim=-1)
1812
+
1813
+ if clip_scale > 0:
1814
+ labels = torch.arange(len(clip_voxels_norm)).to(clip_voxels_norm.device)
1815
+ test_fwd_percent_correct += utils.topk(utils.batchwise_cosine_similarity(clip_voxels_norm, clip_target_norm), labels, k=1).item()
1816
+ test_bwd_percent_correct += utils.topk(utils.batchwise_cosine_similarity(clip_target_norm, clip_voxels_norm), labels, k=1).item()
1817
+ print(test_fwd_percent_correct)
1818
+ print(test_bwd_percent_correct)
1819
+ logs.update({
1820
+ f"subset_{i}_{j}_test/fwd_pct_correct": test_fwd_percent_correct / (test_i + 1),
1821
+ f"subset_{i}_{j}_test/bwd_pct_correct": test_bwd_percent_correct / (test_i + 1),
1822
+ })
1823
+
1824
+ print("--- Full Dataset Evaluation ---")
1825
+ for k, v in logs.items():
1826
+ print(f"{k}: {v:.4f}")
1827
+
1828
+
1829
+ # In[54]:
1830
+
1831
+
1832
+ # Compare first few pairs
1833
+ for pair in pairs: # Checking first 2 pairs
1834
+ print("Indices in mst_pairs:", pair)
1835
+ print("Corresponding filenames:")
1836
+ print(f"Image 1: {x[pair[0]]}")
1837
+ print(f"Image 2: {x[pair[1]]}\n")
1838
+
1839
+
1840
+ # In[ ]:
1841
+
1842
+
1843
+ # for i in range(len(pairs)):
1844
+ # fig, ax = plt.subplots(1, 2, figsize=(10,8))
1845
+
1846
+ # ax[0].imshow(images[pairs[i][0]].permute(1,2,0).numpy())
1847
+ # ax[0].set_title(f"Repeat 1")
1848
+
1849
+ # ax[1].imshow(images[pairs[i][1]].permute(1,2,0).numpy())
1850
+ # ax[1].set_title(f"Repeat 2")
1851
+
1852
+ # plt.setp(ax, xticks=[], yticks=[])
1853
+ # plt.tight_layout()
1854
+ # plt.show()
1855
+
1856
+
1857
+ # In[ ]:
1858
+
1859
+
1860
+ # score = 0
1861
+ # total = 0
1862
+ # with torch.no_grad(), torch.cuda.amp.autocast(dtype=data_type):
1863
+ # for pair in unique_images_pairs:
1864
+ # imageA_idx, imageB_idx = pair
1865
+ # imageA_idx = np.where(image_idx == imageA_idx)[0].item()
1866
+ # imageB_idx = np.where(image_idx == imageB_idx)[0].item()
1867
+
1868
+ # voxel = vox[imageA_idx].to(device)[None]
1869
+ # voxel = torch.Tensor(voxel).unsqueeze(1).to(device)
1870
+
1871
+ # imageA = images[imageA_idx].to(device)[None]
1872
+ # imageB = images[imageB_idx].to(device)[None]
1873
+
1874
+ # clip_targetA = clip_img_embedder(imageA.float())
1875
+ # clip_targetB = clip_img_embedder(imageB.float())
1876
+
1877
+ # voxel_ridge = model.ridge(voxel,0)
1878
+ # backbone, clip_voxels, blurry_image_enc_ = model.backbone(voxel_ridge)
1879
+
1880
+ # clip_voxels_norm = nn.functional.normalize(clip_voxels.flatten(1), dim=-1)
1881
+ # clip_targetA_norm = nn.functional.normalize(clip_targetA.flatten(1), dim=-1)
1882
+ # clip_targetB_norm = nn.functional.normalize(clip_targetB.flatten(1), dim=-1)
1883
+
1884
+ # cossimA = utils.batchwise_cosine_similarity(clip_voxels_norm, clip_targetA_norm)
1885
+ # cossimB = utils.batchwise_cosine_similarity(clip_voxels_norm, clip_targetB_norm)
1886
+
1887
+ # if cossimA > cossimB:
1888
+ # score += 1
1889
+ # total += 1
1890
+
1891
+ # for pair in unique_images_pairs:
1892
+ # imageA_idx, imageB_idx = pair
1893
+ # imageA_idx = np.where(image_idx == imageA_idx)[0].item()
1894
+ # imageB_idx = np.where(image_idx == imageB_idx)[0].item()
1895
+
1896
+ # voxel = vox[imageB_idx].to(device)[None]
1897
+ # voxel = torch.Tensor(voxel).unsqueeze(1).to(device)
1898
+
1899
+ # imageA = images[imageA_idx].to(device)[None]
1900
+ # imageB = images[imageB_idx].to(device)[None]
1901
+
1902
+ # clip_targetA = clip_img_embedder(imageA.float())
1903
+ # clip_targetB = clip_img_embedder(imageB.float())
1904
+
1905
+ # voxel_ridge = model.ridge(voxel,0)
1906
+ # backbone, clip_voxels, blurry_image_enc_ = model.backbone(voxel_ridge)
1907
+
1908
+ # clip_voxels_norm = nn.functional.normalize(clip_voxels.flatten(1), dim=-1)
1909
+ # clip_targetA_norm = nn.functional.normalize(clip_targetA.flatten(1), dim=-1)
1910
+ # clip_targetB_norm = nn.functional.normalize(clip_targetB.flatten(1), dim=-1)
1911
+
1912
+ # cossimA = utils.batchwise_cosine_similarity(clip_voxels_norm, clip_targetA_norm)
1913
+ # cossimB = utils.batchwise_cosine_similarity(clip_voxels_norm, clip_targetB_norm)
1914
+
1915
+ # if cossimB > cossimA:
1916
+ # score += 1
1917
+ # total += 1
1918
+
1919
+ # print(score/total)
1920
+
1921
+
1922
+ # In[ ]:
1923
+
1924
+
1925
+ #display(utils.torch_to_Image(imageA))
1926
+ #display(utils.torch_to_Image(imageB))
1927
+
1928
+
1929
+ # In[ ]:
1930
+
1931
+
1932
+ # from scipy.stats import binomtest
1933
+
1934
+ # total_samples = len(np.array(unique_images_pairs).flatten())
1935
+ # assert total_samples == 100
1936
+
1937
+ # correct_predictions = int((score/total) * total_samples) # calculate the number of correct predictions
1938
+ # expected_accuracy = 0.5 # expected accuracy under the null hypothesis
1939
+
1940
+ # # Perform the binomial test
1941
+ # binom_stats = binomtest(correct_predictions, total_samples, expected_accuracy, alternative='greater')
1942
+ # p_value = binom_stats.pvalue
1943
+
1944
+ # # Output the result
1945
+ # print(f"P-value: {p_value}")
1946
+ # if p_value < 0.05:
1947
+ # print("The decoder's accuracy is significantly better than chance.")
1948
+ # else:
1949
+ # print("The decoder's accuracy is not significantly better than chance.")
1950
+
1951
+
1952
+ # In[ ]:
1953
+
1954
+
1955
+
1956
+
main-multisession-sub-005_ses-03_union_mask_sdxl_turbo.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
main-multisession.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
main-test.py ADDED
@@ -0,0 +1,1962 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding: utf-8
3
+
4
+ # # Import packages & functions
5
+
6
+ # In[1]:
7
+
8
+
9
+ print("importing modules")
10
+ import os
11
+ import sys
12
+ import json
13
+ import argparse
14
+ import numpy as np
15
+ import time
16
+ import random
17
+ import string
18
+ import h5py
19
+ from tqdm import tqdm
20
+ import webdataset as wds
21
+ from PIL import Image
22
+ import pandas as pd
23
+ import nibabel as nib
24
+ import nilearn
25
+
26
+ import matplotlib.pyplot as plt
27
+ import torch
28
+ import torch.nn as nn
29
+ from torchvision import transforms
30
+
31
+ # tf32 data type is faster than standard float32
32
+ torch.backends.cuda.matmul.allow_tf32 = True
33
+
34
+ import utils
35
+ from utils import load_preprocess_betas, resample, applyxfm, apply_thresh, resample_betas
36
+
37
+ # imports utils from mindeye_preproc as "preproc"
38
+ import importlib.util
39
+ parent_utils_path = "/home/ri4541/mindeye_preproc/analysis/utils.py"
40
+ spec = importlib.util.spec_from_file_location("utils", parent_utils_path)
41
+ preproc = importlib.util.module_from_spec(spec)
42
+ parent_dir = os.path.dirname(parent_utils_path)
43
+ if parent_dir not in sys.path:
44
+ sys.path.append(parent_dir)
45
+ spec.loader.exec_module(preproc)
46
+
47
+ if utils.is_interactive():
48
+ from IPython.display import clear_output # function to clear print outputs in cell
49
+ get_ipython().run_line_magic('load_ext', 'autoreload')
50
+ # this allows you to change functions in models.py or utils.py and have this notebook automatically update with your revisions
51
+ get_ipython().run_line_magic('autoreload', '2')
52
+
53
+ seed = utils.get_slurm_seed()
54
+
55
+
56
+ # # Princeton data prep
57
+
58
+ # ## Load Data & Design
59
+
60
+ # In[2]:
61
+
62
+
63
+ if utils.is_interactive():
64
+ sub = "sub-005"
65
+ session = "ses-03"
66
+ task = 'C' # 'study' or 'A'; used to search for functional run in bids format
67
+ func_task_name = 'C' # 'study' or 'A'; used to search for functional run in bids format
68
+ else:
69
+ sub = os.environ["sub"]
70
+ session = os.environ["session"]
71
+ task = os.environ["task"]
72
+
73
+ if session == "all":
74
+ ses_list = ["ses-01", "ses-02", "ses-03"] # list of actual session IDs
75
+ design_ses_list = ["ses-01", "ses-02", "ses-03"] # list of session IDs to search for design matrix
76
+ else:
77
+ ses_list = [session]
78
+ design_ses_list = [session]
79
+
80
+ task_name = f"_task-{task}" if task != 'study' else ''
81
+ resample_voxel_size = False
82
+ resample_post_glmsingle = False # do you want to do voxel resampling here? if resample_voxel_size = True and resample_post_glmsingle = False, assume the resampling has been done prior to GLMsingle, so just use resampled directory but otherwise proceed as normal
83
+ load_from_resampled_file = False # do you want to load resampled data from file? if True, assume resampling was done in this notebook before, and that we're not using the GLMsingle resampled data
84
+
85
+ train_test_split = 'MST' # 'MST', 'orig', 'unique'
86
+ remove_close_to_MST = False
87
+ remove_random_n = False
88
+
89
+ if remove_close_to_MST or remove_random_n:
90
+ assert remove_close_to_MST != remove_random_n # don't remove both sets of images
91
+
92
+ n_to_remove = 0
93
+ if remove_random_n:
94
+ assert train_test_split == 'MST' # MST images are excluded from the n images removed, so only makes sense if they're not in the training set
95
+ n_to_remove = 150
96
+
97
+ if resample_voxel_size:
98
+ # voxel size was unchanged in glmsingle, want to perform resampling here
99
+ resampled_vox_size = 2.5
100
+ resample_method = "sinc" # {trilinear,nearestneighbour,sinc,spline}, credit: https://johnmuschelli.com/fslr/reference/flirt.help.html
101
+
102
+ # file name helper variables
103
+ vox_dim_str = str(resampled_vox_size).replace('.', '_') # in case the voxel size has a decimal, replace with an underscore
104
+ resampled_suffix = f"resampled_{vox_dim_str}mm_{resample_method}"
105
+ mask_resampled_suffix = resampled_suffix
106
+ if resample_post_glmsingle:
107
+ resampled_suffix += '_postglmsingle'
108
+ else:
109
+ resampled_suffix += '_preglmsingle'
110
+
111
+
112
+ # In[3]:
113
+
114
+
115
+ session_label = preproc.get_session_label(ses_list)
116
+ print('session label:', session_label)
117
+ n_runs, _ = preproc.get_runs_per_session(sub, session, ses_list)
118
+
119
+
120
+ # In[4]:
121
+
122
+
123
+ if utils.is_interactive():
124
+ glmsingle_path = f"/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/glmsingle_{sub}_{session_label}_task-{task}"
125
+ else:
126
+ glmsingle_path = os.environ["glmsingle_path"]
127
+
128
+ designdir = "/home/ri4541/real_time_mindEye2"
129
+ print(glmsingle_path)
130
+
131
+ if resample_voxel_size:
132
+ # option 1: we are using original (non-resampled) GLMsingle outputs and doing the resampling here
133
+ # option 2: doing resampling pre-GLMsingle and using those outputs; no resampling involved here
134
+ if resample_post_glmsingle:
135
+ # option 1
136
+ orig_glmsingle_path = glmsingle_path
137
+ glmsingle_path += f"_{resampled_suffix}"
138
+ print("resampled glmsingle path:", glmsingle_path)
139
+ if load_from_resampled_file:
140
+ # resampling is already done; load from file
141
+ assert os.path.exists(glmsingle_path) # the new directory must have been created if we reached here
142
+ else:
143
+ # don't load from file; do resampling here
144
+ os.makedirs(glmsingle_path,exist_ok=True)
145
+ else:
146
+ # option 2
147
+ glmsingle_path += f"_{resampled_suffix}"
148
+ print("glmsingle path:", glmsingle_path)
149
+
150
+ assert os.path.exists(glmsingle_path)
151
+ print("glmsingle path exists!")
152
+
153
+
154
+ # In[ ]:
155
+
156
+
157
+ data, starts, images, is_new_run, image_names, unique_images, len_unique_images = preproc.load_design_files(
158
+ sub=sub,
159
+ session=session,
160
+ func_task_name=task,
161
+ designdir=designdir,
162
+ design_ses_list=design_ses_list
163
+ )
164
+
165
+ if sub == 'sub-001':
166
+ if session == 'ses-01':
167
+ assert image_names[0] == 'images/image_686_seed_1.png'
168
+ elif session in ('ses-02', 'all'):
169
+ assert image_names[0] == 'all_stimuli/special515/special_40840.jpg'
170
+ elif session == 'ses-03':
171
+ assert image_names[0] == 'all_stimuli/special515/special_69839.jpg'
172
+ elif session == 'ses-04':
173
+ assert image_names[0] == 'all_stimuli/rtmindeye_stimuli/image_686_seed_1.png'
174
+ elif sub == 'sub-003':
175
+ assert image_names[0] == 'all_stimuli/rtmindeye_stimuli/image_686_seed_1.png'
176
+
177
+ unique_images = np.unique(image_names.astype(str))
178
+ unique_images = unique_images[(unique_images!="nan")]
179
+ len_unique_images = len(unique_images)
180
+ print("n_runs",n_runs)
181
+
182
+ if (sub == 'sub-001' and session == 'ses-04') or (sub == 'sub-003' and session == 'ses-01'):
183
+ assert len(unique_images) == 851
184
+
185
+ print(image_names[:4])
186
+ print(starts[:4])
187
+ print(is_new_run[:4])
188
+
189
+ if remove_random_n:
190
+ # want to remove 150 imgs
191
+ # 100 special515 imgs are repeated 3x (300 total)
192
+ # all other train imgs are only shown once (558 total)
193
+ # of the 150, want to sample proportionally since we're cutting all repeats for special515
194
+ # so take out 51 (17 unique) from special515 and 99 from rest = removing 150 total
195
+ np.random.seed(seed)
196
+ options_to_remove = [x for x in set(image_names) if str(x) != 'nan' and x != 'blank.jpg' and 'MST_pairs' not in x and 'special515' not in x and list(image_names).count(x)==1] # all the imgs that only appear once (this is O(N^2) b/c of count() within list comprehension but image_names is a relatively small list)
197
+ options_to_remove_special515 = [x for x in set(image_names) if str(x) != 'nan' and x != 'blank.jpg' and 'MST_pairs' not in x and 'special515' in x and list(image_names).count(x)>1] # all the special515 images that are repeated (count()>1 necessary because there are special515 that are not repeated)
198
+ imgs_to_remove = np.random.choice(options_to_remove, size=99, replace=False)
199
+ imgs_to_remove = np.append(imgs_to_remove, np.random.choice(options_to_remove_special515, size=17, replace=False))
200
+
201
+ image_idx = np.array([]) # contains the unique index of each presented image
202
+ vox_image_names = np.array([]) # contains the names of the images corresponding to image_idx
203
+ all_MST_images = dict()
204
+ for i, im in enumerate(image_names):
205
+ # skip if blank, nan
206
+ if im == "blank.jpg":
207
+ i+=1
208
+ continue
209
+ if str(im) == "nan":
210
+ i+=1
211
+ continue
212
+ vox_image_names = np.append(vox_image_names, im)
213
+ if remove_close_to_MST: # optionally skip close_to_MST images
214
+ if "closest_pairs" in im:
215
+ i+=1
216
+ continue
217
+ elif remove_random_n:
218
+ if im in imgs_to_remove:
219
+ i+=1
220
+ continue
221
+
222
+ image_idx_ = np.where(im==unique_images)[0].item()
223
+ image_idx = np.append(image_idx, image_idx_)
224
+
225
+ if (sub == 'sub-001' and session == 'ses-04') or (sub == 'sub-003' and session == 'ses-01'): # MST images are ones that matched these image titles
226
+ import re
227
+ if ('w_' in im or 'paired_image_' in im or re.match(r'all_stimuli/rtmindeye_stimuli/\d{1,2}_\d{1,3}\.png$', im) or re.match(r'images/\d{1,2}_\d{1,3}\.png$', im)):
228
+ # the regexp here looks for **_***.png, allows 1-2 chars before underscore and 1-3 chars after it
229
+ # print(im)
230
+ all_MST_images[i] = im
231
+ i+=1
232
+ elif 'MST' in im:
233
+ all_MST_images[i] = im
234
+ i+=1
235
+
236
+ image_idx = torch.Tensor(image_idx).long()
237
+ # for im in new_image_names[MST_images]:
238
+ # assert 'MST_pairs' in im
239
+ # assert len(all_MST_images) == 300
240
+
241
+ unique_MST_images = np.unique(list(all_MST_images.values()))
242
+
243
+ MST_ID = np.array([], dtype=int)
244
+ if remove_close_to_MST:
245
+ close_to_MST_idx = np.array([], dtype=int)
246
+ if remove_random_n:
247
+ random_n_idx = np.array([], dtype=int)
248
+
249
+ vox_idx = np.array([], dtype=int)
250
+ j=0 # this is a counter keeping track of the remove_random_n used later to index vox based on the removed images; unused otherwise
251
+ for i, im in enumerate(image_names): # need unique_MST_images to be defined, so repeating the same loop structure
252
+ # skip if blank, nan
253
+ if im == "blank.jpg":
254
+ i+=1
255
+ continue
256
+ if str(im) == "nan":
257
+ i+=1
258
+ continue
259
+ if remove_close_to_MST: # optionally skip close_to_MST images
260
+ if "closest_pairs" in im:
261
+ close_to_MST_idx = np.append(close_to_MST_idx, i)
262
+ i+=1
263
+ continue
264
+ if remove_random_n:
265
+ if im in imgs_to_remove:
266
+ vox_idx = np.append(vox_idx, j)
267
+ i+=1
268
+ j+=1
269
+ continue
270
+ j+=1
271
+ curr = np.where(im == unique_MST_images)
272
+ # print(curr)
273
+ if curr[0].size == 0:
274
+ MST_ID = np.append(MST_ID, np.array(len(unique_MST_images))) # add a value that should be out of range based on the for loop, will index it out later
275
+ else:
276
+ MST_ID = np.append(MST_ID, curr)
277
+
278
+ assert len(MST_ID) == len(image_idx)
279
+ # assert len(np.argwhere(pd.isna(data['current_image']))) + len(np.argwhere(data['current_image'] == 'blank.jpg')) + len(image_idx) == len(data)
280
+ # MST_ID = torch.tensor(MST_ID[MST_ID != len(unique_MST_images)], dtype=torch.uint8) # torch.tensor (lowercase) allows dtype kwarg, Tensor (uppercase) is an alias for torch.FloatTensor
281
+ print(MST_ID.shape)
282
+ if (sub == 'sub-001' and session == 'ses-04') or (sub == 'sub-003' and session == 'ses-01'):
283
+ assert len(all_MST_images) == 100
284
+
285
+
286
+ # ## Load images
287
+
288
+ # In[ ]:
289
+
290
+
291
+ import imageio.v2 as imageio
292
+ resize_transform = transforms.Resize((224, 224))
293
+ MST_images = []
294
+ images = None
295
+ for im_name in tqdm(image_idx):
296
+ if sub == 'sub-001' and session == 'ses-01':
297
+ image_file = f"all_stimuli/rtmindeye_stimuli/{unique_images[im_name]}"
298
+ else:
299
+ image_file = f"{unique_images[im_name]}"
300
+ im = imageio.imread(image_file)
301
+ im = torch.Tensor(im / 255).permute(2,0,1)
302
+ im = resize_transform(im.unsqueeze(0))
303
+ if images is None:
304
+ images = im
305
+ else:
306
+ images = torch.vstack((images, im))
307
+ if (sub == 'sub-001' and session == 'ses-04') or (sub == 'sub-003' and session == 'ses-01'):
308
+ if ('w_' in image_file or 'paired_image_' in image_file or re.match(r'all_stimuli/rtmindeye_stimuli/\d{1,2}_\d{1,3}\.png$', image_file) or re.match(r'all_stimuli/rtmindeye_stimuli/images/\d{1,2}_\d{1,3}\.png$', image_file)):
309
+ MST_images.append(True)
310
+ else:
311
+ MST_images.append(False)
312
+ else:
313
+ if ("MST_pairs" in image_file): # ("_seed_" not in unique_images[im_name]) and (unique_images[im_name] != "blank.jpg")
314
+ MST_images.append(True)
315
+ else:
316
+ MST_images.append(False)
317
+
318
+ print("images", images.shape)
319
+ MST_images = np.array(MST_images)
320
+ print("MST_images", len(MST_images))
321
+ if (sub == 'sub-001' and session == 'ses-04') or (sub == 'sub-003' and session == 'ses-01'):
322
+ assert len(MST_images[MST_images==True]) == 100
323
+ print("MST_images==True", len(MST_images[MST_images==True]))
324
+
325
+
326
+ # In[ ]:
327
+
328
+
329
+ # want IDs of pairmates based on MST_images
330
+ # create "MST_pairmates" which is a 25x2 array with indices of the 25 pairs based on MST_images == True
331
+
332
+ assert unique_MST_images.shape[0] % 2 == 0 # make sure it's divisible by 2
333
+ MST_pairmate_names = unique_MST_images.reshape(int(unique_MST_images.shape[0]/2),2)
334
+ # print(MST_pairmate_names)
335
+
336
+ MST_pairmate_indices = np.empty(shape=MST_pairmate_names.shape, dtype=int)
337
+ for p, pair in enumerate(MST_pairmate_names):
338
+ for i, im in enumerate(pair):
339
+ MST_pairmate_indices[p][i] = np.where(np.isin(list(all_MST_images.values()), im))[0][0] # just take the first repeated instance of an image
340
+
341
+ print(MST_pairmate_indices.shape, MST_pairmate_indices)
342
+
343
+
344
+ # In[ ]:
345
+
346
+
347
+ if (sub == 'sub-001' and session in ('ses-02', 'ses-03', 'all')):
348
+ # MST_pairs contains the indices of repeats based on all_MST_images
349
+ # all_MST_images contains the indices of images from image_names
350
+ MST_pairs = utils.find_paired_indices(torch.tensor(MST_ID))
351
+ MST_pairs = np.array(sorted(MST_pairs[:-1], key=lambda x: x[0])) # we added a fake value as a placeholder so index out the last group of pairs
352
+
353
+ # assert images[MST_pairs]
354
+
355
+ fig, ax = plt.subplots(1, 3, figsize=(10,4))
356
+ fig.suptitle('Sample MST pairs')
357
+
358
+ ax[0].imshow(images[MST_pairs[-1][0]].permute(1,2,0).numpy())
359
+ ax[0].set_title(f"Trial 0")
360
+
361
+ ax[1].imshow(images[MST_pairs[-1][1]].permute(1,2,0).numpy())
362
+ ax[1].set_title(f"Trial 1")
363
+
364
+ ax[2].imshow(images[MST_pairs[-1][2]].permute(1,2,0).numpy())
365
+ ax[2].set_title(f"Trial 2")
366
+
367
+ plt.setp(ax, xticks=[], yticks=[])
368
+ plt.tight_layout()
369
+ plt.show()
370
+
371
+
372
+ # In[ ]:
373
+
374
+
375
+ # pairs has the indices of all repeated images
376
+ pairs = utils.find_paired_indices(image_idx)
377
+ pairs = sorted(pairs, key=lambda x: x[0])
378
+
379
+ fig, axes = plt.subplots(1, 3, figsize=(6, 2)) # 1 row, 3 columns
380
+ for i, ax in enumerate(axes):
381
+ ax.imshow(images[i].permute(1, 2, 0).numpy())
382
+ ax.set_title(f"Trial {i}")
383
+ ax.axis("off") # Hide axes for better visualization
384
+
385
+ plt.tight_layout()
386
+ # output_path = os.path.join(output_dir, "trials_plot.png")
387
+ # plt.savefig(output_path, dpi=300) # Save figure
388
+ plt.show()
389
+
390
+
391
+ # In[ ]:
392
+
393
+
394
+ p=0
395
+
396
+ # plot 2 repeats (anything in pairs should have 2 repeats, even if there's more)
397
+ fig, ax = plt.subplots(1, 2, figsize=(10,8))
398
+
399
+ ax[0].imshow(images[pairs[p][0]].permute(1,2,0).numpy())
400
+ ax[0].set_title(f"Repeat 1")
401
+
402
+ ax[1].imshow(images[pairs[p][1]].permute(1,2,0).numpy())
403
+ ax[1].set_title(f"Repeat 2")
404
+
405
+ plt.setp(ax, xticks=[], yticks=[])
406
+ plt.tight_layout()
407
+ plt.show()
408
+
409
+
410
+ # In[ ]:
411
+
412
+
413
+ if resample_voxel_size:
414
+ from nilearn.masking import apply_mask, unmask
415
+ ref_name = f'{glmsingle_path}/boldref_resampled.nii.gz'
416
+ omat_name = f'{glmsingle_path}/boldref_omat'
417
+
418
+
419
+ # In[ ]:
420
+
421
+
422
+ def get_image_pairs(sub, session, func_task_name, designdir):
423
+ """Loads design files and processes image pairs for a given session."""
424
+ _, _, _, _, image_names, unique_images, _ = preproc.load_design_files(
425
+ sub=sub,
426
+ session=session,
427
+ func_task_name=func_task_name,
428
+ designdir=designdir,
429
+ design_ses_list=[session] # Ensure it's a list
430
+ )
431
+ return utils.process_images(image_names, unique_images)
432
+
433
+
434
+ # In[ ]:
435
+
436
+
437
+ from collections import defaultdict
438
+
439
+ all_dicts = []
440
+ for s_idx, s in enumerate(ses_list):
441
+ im, vo, _ = get_image_pairs(sub, s, func_task_name, designdir)
442
+ assert len(im) == len(vo)
443
+ all_dicts.append({k:v for k,v in enumerate(vo)})
444
+
445
+ assert session_label == 'ses-03'
446
+ image_to_indices = defaultdict(lambda: [[] for _ in range(len(ses_list))])
447
+ for ses_idx, idx_to_name in enumerate(all_dicts):
448
+ for idx, name in idx_to_name.items():
449
+ image_to_indices[name][ses_idx].append(idx)
450
+
451
+ image_to_indices = dict(image_to_indices)
452
+
453
+
454
+ # In[5]:
455
+
456
+
457
+ from nilearn.plotting import plot_roi
458
+ assert sub == 'sub-005' and session == "ses-03"
459
+ print('loading brain mask')
460
+ # func_masks, avg_mask, nsd_masks, roi = utils.get_mask(['ses-01', 'ses-02', 'ses-03'], sub, func_task_name)
461
+ avg_mask = nib.load('/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/glmsingle_sub-005_task-C/sub-005_final_brain.nii.gz')
462
+ final_mask = nib.load('/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/glmsingle_sub-005_task-C/sub-005_final_mask.nii.gz')
463
+
464
+ # mask info
465
+ dimsize=avg_mask.header.get_zooms()
466
+ affine_mat = avg_mask.affine
467
+ brain=avg_mask.get_fdata()
468
+ xyz=brain.shape #xyz dimensionality of brain mask and epi data
469
+
470
+ print('Mask dimensions:', dimsize)
471
+ print('')
472
+ print('Affine:')
473
+ print(affine_mat)
474
+ print('')
475
+ print(f'There are {int(np.sum(brain))} voxels in the included brain mask\n')
476
+
477
+ plot_roi(final_mask, bg_img=avg_mask)
478
+ plt.show()
479
+
480
+
481
+ # In[6]:
482
+
483
+
484
+ union_mask = np.load('/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/glmsingle_sub-005_task-C/union_mask_from_ses-01-02.npy')
485
+
486
+
487
+ # In[ ]:
488
+
489
+
490
+ # path = f"/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/glmsingle_{sub}_{s}_task-{func_task_name}/TYPED_FITHRF_GLMDENOISE_RR.npz"
491
+ # vox =
492
+ # ses_vox = []
493
+ # for i, s in enumerate([]):
494
+ # v = nilearn.masking.unmask(vox_list[i][:,0,0], func_masks[i])
495
+ # final_mask = nilearn.masking.intersect_masks([avg_mask, roi])
496
+ # ses_vox.append(nilearn.masking.apply_mask(v, final_mask))
497
+
498
+ # vox = np.concatenate(ses_vox)
499
+ # print('vox shape:', vox.shape)
500
+
501
+
502
+ # ## Load GLMSingle voxel data
503
+
504
+ # In[ ]:
505
+
506
+
507
+ vox = None
508
+ needs_postprocessing = False
509
+ params = (session, ses_list, remove_close_to_MST, image_names, remove_random_n, vox_idx)
510
+
511
+ if resample_post_glmsingle == True:
512
+ glm_save_path_resampled = f"{glmsingle_path}/vox_resampled.nii.gz"
513
+ if load_from_resampled_file == True:
514
+ # resampling was done in this notebook so we can load from file
515
+ vox = nib.load(glm_save_path_resampled)
516
+ else:
517
+ # do resampling here
518
+ assert os.path.exists(ref_name) and os.path.exists(omat_name), "need to generate the boldref and omat separately since we don't have access to the functional data here; either do so using flirt on the command line or copy over the glmsingle resampled outputs"
519
+ vox = load_preprocess_betas(orig_glmsingle_path, *params)
520
+ vox = resample_betas(orig_glmsingle_path, sub, session, task_name, vox, glmsingle_path, glm_save_path_resampled, ref_name, omat_name)
521
+ needs_postprocessing = True
522
+
523
+ if vox is None:
524
+ # either resampling was done in glmsingle or we aren't resampling
525
+ vox = load_preprocess_betas(glmsingle_path, *params)
526
+
527
+ if needs_postprocessing == True:
528
+ vox = nilearn.masking.apply_mask(vox, avg_mask)
529
+ vox = vox.reshape(-1, vox.shape[-1]) # flatten the 3D image into np array with shape (voxels, images)
530
+ print(vox.shape)
531
+
532
+ assert len(vox) == len(image_idx)
533
+
534
+
535
+ # In[ ]:
536
+
537
+
538
+ ses_mask = nib.load(f'/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/glmsingle_sub-005_{session_label}_task-C/sub-005_{session_label}_task-C_brain.nii.gz')
539
+ assert np.all(ses_mask.affine == final_mask.affine)
540
+ assert np.all(ses_mask.shape == final_mask.shape)
541
+
542
+
543
+ # In[ ]:
544
+
545
+
546
+ # get vox into the same shape as the union mask
547
+ v = nilearn.masking.unmask(vox, ses_mask) # move back to 3D based on own session mask
548
+ # final_mask = nilearn.masking.intersect_masks([avg_mask, roi])
549
+ vox = nilearn.masking.apply_mask(v, final_mask) # re-flatten based on final mask so everything is in the same shape now
550
+ print(vox.shape)
551
+ vox = vox[:, union_mask]
552
+ print("applied union roi mask")
553
+ print(vox.shape)
554
+
555
+
556
+ # In[ ]:
557
+
558
+
559
+ pairs_homog = np.array([[p[0], p[1]] for p in pairs])
560
+
561
+
562
+ # In[ ]:
563
+
564
+
565
+ same_corrs = []
566
+ diff_corrs = []
567
+ for isamp, samp in enumerate(vox[pairs_homog]):
568
+ avg_same_img = []
569
+ for i in range(samp.shape[0]):
570
+ for j in range(i, samp.shape[0]):
571
+ if i != j:
572
+ avg_same_img.append(np.array([np.corrcoef(samp[i, :], samp[j, :])[0,1]]))
573
+
574
+ same_corrs.append(np.mean(avg_same_img))
575
+
576
+ avg_diff_img = []
577
+ for isamp_j, samp_j in enumerate(vox[pairs_homog]):
578
+ if isamp_j != isamp:
579
+ for i in range(samp_j.shape[0]):
580
+ for j in range(i, samp_j.shape[0]):
581
+ if i != j:
582
+ avg_diff_img.append(np.array([np.corrcoef(samp[i, :], samp_j[j, :])[0,1]]))
583
+
584
+ # print(len(avg_diff_img))
585
+ diff_corrs.append(np.mean(avg_diff_img))
586
+
587
+
588
+ print(len(same_corrs), len(diff_corrs))
589
+ same_corrs = np.array(same_corrs)
590
+ diff_corrs = np.array(diff_corrs)
591
+
592
+
593
+ plt.figure(figsize=(5,4))
594
+ plt.title(f"{sub}_{session} same/diff Pearson corr.")
595
+ plt.plot(np.sort(same_corrs),c='blue',label='same')
596
+ plt.plot(np.sort(diff_corrs),c='cyan',label='diff')
597
+ plt.axhline(0,c='k',ls='--')
598
+ plt.legend()
599
+ plt.xlabel("sample")
600
+ plt.ylabel("Pearson R")
601
+ plt.show()
602
+
603
+
604
+ # In[ ]:
605
+
606
+
607
+ vox_pairs = utils.zscore(vox[pairs_homog])
608
+ plt.figure(figsize=(5,4))
609
+ plt.title(f"{sub}_{session} same minus diff difference Pearson corr.")
610
+ plt.plot(np.sort(same_corrs) - np.sort(diff_corrs),c='cyan',label='difference')
611
+ plt.axhline(0,c='k',ls='--')
612
+ plt.legend()
613
+ plt.xlabel("sample")
614
+ plt.ylabel("Pearson R")
615
+ plt.show()
616
+
617
+
618
+ # # Training MindEye
619
+
620
+ # In[ ]:
621
+
622
+
623
+ utils.seed_everything(seed)
624
+ MST_idx = np.array([v for k,v in image_to_indices.items() if 'MST_pairs' in k])
625
+ # train_image_indices = np.array([]).astype(np.int8)
626
+ # test_image_indices = np.concatenate([np.where(MST_images == True)[0], np.where(MST_images == False)[0]])
627
+ train_image_indices = np.where(MST_images == False)[0]
628
+ test_image_indices = np.where(MST_images == True)[0]
629
+ print(len(train_image_indices), len(test_image_indices))
630
+
631
+
632
+ # In[ ]:
633
+
634
+
635
+ train_mean = np.mean(vox[train_image_indices],axis=0)
636
+ train_std = np.std(vox[train_image_indices],axis=0)
637
+ # no train imgs so use train mean and std from main-multisession-3tasks with ses-01-02 multisession
638
+ # train_mean = -0.0318167
639
+ # train_std = 1.0120773
640
+
641
+ vox = utils.zscore(vox,train_mean=train_mean,train_std=train_std)
642
+ print("voxels have been zscored")
643
+ print(vox[:,0].mean(), vox[:,0].std())
644
+ print("vox", vox.shape)
645
+
646
+
647
+ # In[ ]:
648
+
649
+
650
+ # for idx in deleted_indices:
651
+ # # check image names to be deleted match
652
+ # original_name = vox_image_dict[idx]
653
+ # matching_indices = [i for i in deleted_indices if vox_image_dict[i] == original_name]
654
+ # assert all(vox_image_dict[i] == original_name for i in matching_indices), \
655
+ # f"Mismatch in image names for deleted indices {matching_indices}"
656
+
657
+ # # check image data to be deleted match
658
+ # base_image = images[matching_indices[0]] # Reference image
659
+ # for i in matching_indices[1:]:
660
+ # assert np.array_equal(base_image, images[i]), \
661
+ # f"Mismatch in image data for {vox_image_dict[i]} at index {i}"
662
+
663
+ # images = images[kept_indices]
664
+
665
+
666
+ # In[ ]:
667
+
668
+
669
+ images = torch.Tensor(images)
670
+ vox = torch.Tensor(vox)
671
+ assert len(images) == len(vox)
672
+
673
+
674
+ # In[ ]:
675
+
676
+
677
+ ### Multi-GPU config ###
678
+ from accelerate import Accelerator, DeepSpeedPlugin
679
+
680
+ local_rank = os.getenv('RANK')
681
+ if local_rank is None:
682
+ local_rank = 0
683
+ else:
684
+ local_rank = int(local_rank)
685
+ print("LOCAL RANK ", local_rank)
686
+
687
+ data_type = torch.float32 # change depending on your mixed_precision
688
+
689
+ accelerator = Accelerator(split_batches=False)
690
+ batch_size = 8
691
+
692
+
693
+ # In[ ]:
694
+
695
+
696
+ print("PID of this process =",os.getpid())
697
+ device = accelerator.device
698
+ print("device:",device)
699
+ world_size = accelerator.state.num_processes
700
+ distributed = not accelerator.state.distributed_type == 'NO'
701
+ num_devices = torch.cuda.device_count()
702
+ global_batch_size = batch_size * num_devices
703
+ print("global_batch_size", global_batch_size)
704
+ if num_devices==0 or not distributed: num_devices = 1
705
+ num_workers = num_devices
706
+ print(accelerator.state)
707
+
708
+ # set data_type to match your mixed precision (automatically set based on deepspeed config)
709
+ if accelerator.mixed_precision == "bf16":
710
+ data_type = torch.bfloat16
711
+ elif accelerator.mixed_precision == "fp16":
712
+ data_type = torch.float16
713
+ else:
714
+ data_type = torch.float32
715
+
716
+ print("distributed =",distributed, "num_devices =", num_devices, "local rank =", local_rank, "world size =", world_size, "data_type =", data_type)
717
+ print = accelerator.print # only print if local_rank=0
718
+
719
+
720
+ # ## Configurations
721
+
722
+ # In[ ]:
723
+
724
+
725
+ # if running this interactively, can specify jupyter_args here for argparser to use
726
+ if utils.is_interactive():
727
+ model_name = 'testing_MST' # 'sub-001_multi_bs24_MST_rishab_MSTsplit_remove_150_random_seed_0'
728
+ print("model_name:", model_name)
729
+
730
+ # global_batch_size and batch_size should already be defined in the above cells
731
+ # other variables can be specified in the following string:
732
+ # jupyter_args = f"--data_path=/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2 --model_name={model_name}"
733
+
734
+ jupyter_args = f"--data_path=/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2 \
735
+ --model_name={model_name} \
736
+ --no-multi_subject --subj=1 --batch_size={batch_size} \
737
+ --hidden_dim=1024 --clip_scale=1. \
738
+ --no-blurry_recon --blur_scale=.5 \
739
+ --use_prior --prior_scale=30 \
740
+ --n_blocks=4 --max_lr=3e-4 --mixup_pct=.33 --num_epochs=30 --no-use_image_aug \
741
+ --ckpt_interval=999 --no-ckpt_saving --new_test \
742
+ --multisubject_ckpt=/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/train_logs/multisubject_subj01_1024hid_nolow_300ep"
743
+ print(jupyter_args)
744
+ jupyter_args = jupyter_args.split()
745
+
746
+
747
+ # In[ ]:
748
+
749
+
750
+ parser = argparse.ArgumentParser(description="Model Training Configuration")
751
+ parser.add_argument(
752
+ "--model_name", type=str, default="testing",
753
+ help="name of model, used for ckpt saving and wandb logging (if enabled)",
754
+ )
755
+ parser.add_argument(
756
+ "--data_path", type=str, default="/weka/proj-fmri/shared/natural-scenes-dataset",
757
+ help="Path to where NSD data is stored / where to download it to",
758
+ )
759
+ parser.add_argument(
760
+ "--subj",type=int, default=1, choices=[1,2,3,4,5,6,7,8],
761
+ help="Validate on which subject?",
762
+ )
763
+ parser.add_argument(
764
+ "--multisubject_ckpt", type=str, default=None,
765
+ help="Path to pre-trained multisubject model to finetune a single subject from. multisubject must be False.",
766
+ )
767
+ parser.add_argument(
768
+ "--num_sessions", type=int, default=0,
769
+ help="Number of training sessions to include (if multi_subject, this variable doesnt matter)",
770
+ )
771
+ parser.add_argument(
772
+ "--use_prior",action=argparse.BooleanOptionalAction,default=False,
773
+ help="whether to train diffusion prior (True) or just rely on retrieval part of the pipeline (False)",
774
+ )
775
+ parser.add_argument(
776
+ "--batch_size", type=int, default=32,
777
+ help="Batch size can be increased by 10x if only training v2c and not diffusion diffuser",
778
+ )
779
+ parser.add_argument(
780
+ "--wandb_log",action=argparse.BooleanOptionalAction,default=False,
781
+ help="whether to log to wandb",
782
+ )
783
+ parser.add_argument(
784
+ "--resume_from_ckpt",action=argparse.BooleanOptionalAction,default=False,
785
+ help="if not using wandb and want to resume from a ckpt",
786
+ )
787
+ parser.add_argument(
788
+ "--wandb_project",type=str,default="stability",
789
+ help="wandb project name",
790
+ )
791
+ parser.add_argument(
792
+ "--mixup_pct",type=float,default=.33,
793
+ help="proportion of way through training when to switch from BiMixCo to SoftCLIP",
794
+ )
795
+ parser.add_argument(
796
+ "--low_mem",action=argparse.BooleanOptionalAction,default=False,
797
+ help="whether to preload images to cpu to speed things up but consume more memory",
798
+ )
799
+ parser.add_argument(
800
+ "--blurry_recon",action=argparse.BooleanOptionalAction,default=True,
801
+ help="whether to output blurry reconstructions",
802
+ )
803
+ parser.add_argument(
804
+ "--blur_scale",type=float,default=.5,
805
+ help="multiply loss from blurry recons by this number",
806
+ )
807
+ parser.add_argument(
808
+ "--clip_scale",type=float,default=1.,
809
+ help="multiply contrastive loss by this number",
810
+ )
811
+ parser.add_argument(
812
+ "--prior_scale",type=float,default=30,
813
+ help="multiply diffusion prior loss by this",
814
+ )
815
+ parser.add_argument(
816
+ "--use_image_aug",action=argparse.BooleanOptionalAction,default=True,
817
+ help="whether to use image augmentation",
818
+ )
819
+ parser.add_argument(
820
+ "--num_epochs",type=int,default=120,
821
+ help="number of epochs of training",
822
+ )
823
+ parser.add_argument(
824
+ "--multi_subject",action=argparse.BooleanOptionalAction,default=False,
825
+ )
826
+ parser.add_argument(
827
+ "--new_test",action=argparse.BooleanOptionalAction,default=True,
828
+ )
829
+ parser.add_argument(
830
+ "--n_blocks",type=int,default=2,
831
+ )
832
+ parser.add_argument(
833
+ "--hidden_dim",type=int,default=1024,
834
+ )
835
+ parser.add_argument(
836
+ "--seq_past",type=int,default=0,
837
+ )
838
+ parser.add_argument(
839
+ "--seq_future",type=int,default=0,
840
+ )
841
+ parser.add_argument(
842
+ "--lr_scheduler_type",type=str,default='cycle',choices=['cycle','linear'],
843
+ )
844
+ parser.add_argument(
845
+ "--ckpt_saving",action=argparse.BooleanOptionalAction,default=True,
846
+ )
847
+ parser.add_argument(
848
+ "--ckpt_interval",type=int,default=5,
849
+ help="save backup ckpt and reconstruct every x epochs",
850
+ )
851
+ parser.add_argument(
852
+ "--seed",type=int,default=42,
853
+ )
854
+ parser.add_argument(
855
+ "--max_lr",type=float,default=3e-4,
856
+ )
857
+
858
+ if utils.is_interactive():
859
+ args = parser.parse_args(jupyter_args)
860
+ else:
861
+ args = parser.parse_args()
862
+
863
+ # create global variables without the args prefix
864
+ for attribute_name in vars(args).keys():
865
+ globals()[attribute_name] = getattr(args, attribute_name)
866
+
867
+ outdir = os.path.abspath(f'/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/train_logs/{model_name}')
868
+ if not os.path.exists(outdir) and ckpt_saving:
869
+ os.makedirs(outdir,exist_ok=True)
870
+
871
+ if use_image_aug or blurry_recon:
872
+ import kornia
873
+ import kornia.augmentation as K
874
+ from kornia.augmentation.container import AugmentationSequential
875
+ if use_image_aug:
876
+ img_augment = AugmentationSequential(
877
+ kornia.augmentation.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.1, p=0.3),
878
+ same_on_batch=False,
879
+ data_keys=["input"],
880
+ )
881
+ # Define the blurring augmentations
882
+ blur_augment = K.RandomGaussianBlur(kernel_size=(21, 21), sigma=(51.0, 51.0), p=1.)
883
+
884
+ if multi_subject:
885
+ subj_list = np.arange(1,9)
886
+ subj_list = subj_list[subj_list != subj]
887
+ else:
888
+ subj_list = [subj]
889
+
890
+ print("subj_list", subj_list, "num_sessions", num_sessions)
891
+
892
+
893
+ # ## Prep data, models, and dataloaders
894
+
895
+ # In[ ]:
896
+
897
+
898
+ if ckpt_saving:
899
+ # save MST_ID for 2-alternative forced-choice retrieval evaluation
900
+ if 'MST' in model_name:
901
+ eval_dir = os.environ["eval_dir"]
902
+ print('saving MST info in', eval_dir)
903
+ # Saving ##
904
+ if not os.path.exists(eval_dir):
905
+ os.mkdir(eval_dir)
906
+
907
+ np.save(f"{eval_dir}/MST_ID.npy", MST_ID)
908
+ np.save(f"{eval_dir}/MST_pairmate_indices.npy", MST_pairmate_indices)
909
+
910
+ if remove_random_n:
911
+ np.save(f"{eval_dir}/imgs_to_remove.npy", imgs_to_remove)
912
+
913
+ np.save(f"{eval_dir}/train_image_indices.npy", train_image_indices)
914
+ np.save(f"{eval_dir}/test_image_indices.npy", test_image_indices)
915
+ np.save(f"{eval_dir}/images.npy", images)
916
+ np.save(f"{eval_dir}/vox.npy", vox)
917
+
918
+
919
+ # ### Creating wds dataloader, preload betas and all 73k possible images
920
+
921
+ # In[ ]:
922
+
923
+
924
+ def my_split_by_node(urls): return urls
925
+ num_voxels_list = []
926
+
927
+ if multi_subject:
928
+ nsessions_allsubj=np.array([40, 40, 32, 30, 40, 32, 40, 30])
929
+ num_samples_per_epoch = (750*40) // num_devices
930
+ else:
931
+ # num_samples_per_epoch = (750*num_sessions) // num_devices
932
+ num_samples_per_epoch = len(train_image_indices)
933
+
934
+ print("dividing batch size by subj_list, which will then be concatenated across subj during training...")
935
+ batch_size = batch_size // len(subj_list)
936
+
937
+ num_iterations_per_epoch = num_samples_per_epoch // (batch_size*len(subj_list))
938
+
939
+ print("batch_size =", batch_size, "num_iterations_per_epoch =",num_iterations_per_epoch, "num_samples_per_epoch =",num_samples_per_epoch)
940
+
941
+
942
+ # In[ ]:
943
+
944
+
945
+ train_data = {}
946
+ train_dl = {}
947
+
948
+ train_data[f'subj0{subj}'] = torch.utils.data.TensorDataset(torch.tensor(train_image_indices))
949
+ test_data = torch.utils.data.TensorDataset(torch.tensor(test_image_indices))
950
+
951
+
952
+ # In[ ]:
953
+
954
+
955
+ num_voxels = {}
956
+ voxels = {}
957
+ for s in subj_list:
958
+ print(f"Training with {num_sessions} sessions")
959
+ train_dl = torch.utils.data.DataLoader(train_data[f'subj0{s}'], batch_size=len(train_data), shuffle=False, drop_last=True, pin_memory=True)
960
+ # train_dl = []
961
+
962
+ num_voxels_list.append(vox[0].shape[-1])
963
+ num_voxels[f'subj0{s}'] = vox[0].shape[-1]
964
+ voxels[f'subj0{s}'] = vox
965
+ print(f"num_voxels for subj0{s}: {num_voxels[f'subj0{s}']}")
966
+
967
+ print("Loaded all subj train dls and vox!\n")
968
+
969
+ # Validate only on one subject
970
+ if multi_subject:
971
+ subj = subj_list[0] # cant validate on the actual held out person so picking first in subj_list
972
+ test_dl = torch.utils.data.DataLoader(test_data, batch_size=len(test_data), shuffle=False, drop_last=True, pin_memory=True)
973
+
974
+ print(f"Loaded test dl for subj{subj}!\n")
975
+
976
+
977
+ # ## Load models
978
+
979
+ # ### CLIP image embeddings model
980
+
981
+ # In[ ]:
982
+
983
+
984
+ ## USING OpenCLIP ViT-bigG ###
985
+ sys.path.append('generative_models/')
986
+ import sgm
987
+ from generative_models.sgm.modules.encoders.modules import FrozenOpenCLIPImageEmbedder
988
+ # from generative_models.sgm.models.diffusion import DiffusionEngine
989
+ # from omegaconf import OmegaConf
990
+
991
+ try:
992
+ print(clip_img_embedder)
993
+ except:
994
+ clip_img_embedder = FrozenOpenCLIPImageEmbedder(
995
+ arch="ViT-bigG-14",
996
+ version="laion2b_s39b_b160k",
997
+ output_tokens=True,
998
+ only_tokens=True,
999
+ )
1000
+ clip_img_embedder.to(device)
1001
+ clip_seq_dim = 256
1002
+ clip_emb_dim = 1664
1003
+
1004
+ # ## USING OPEN AI CLIP ViT-L ###
1005
+ # import clip
1006
+ # try:
1007
+ # print(clip_model)
1008
+ # except:
1009
+ # clip_model, preprocess = clip.load("ViT-L/14", device=device)
1010
+ # preprocess = transforms.Compose([
1011
+ # transforms.Resize(224, interpolation=transforms.InterpolationMode.BILINEAR),
1012
+ # transforms.Normalize(mean=[0.48145466, 0.4578275, 0.40821073],
1013
+ # std=[0.26862954, 0.26130258, 0.27577711]),
1014
+ # ])
1015
+ # def clip_img_embedder(image):
1016
+ # preproc_img = preprocess(image)
1017
+ # return clip_model.encode_image(preproc_img)
1018
+ # clip_seq_dim = 1
1019
+ # clip_emb_dim = 768
1020
+
1021
+
1022
+ # ### MindEye modules
1023
+
1024
+ # In[ ]:
1025
+
1026
+
1027
+ model = utils.prepare_model_and_training(
1028
+ num_voxels_list=num_voxels_list,
1029
+ n_blocks=n_blocks,
1030
+ hidden_dim=hidden_dim,
1031
+ clip_emb_dim=clip_emb_dim,
1032
+ clip_seq_dim=clip_seq_dim,
1033
+ use_prior=use_prior,
1034
+ clip_scale=clip_scale
1035
+ )
1036
+
1037
+
1038
+ # In[ ]:
1039
+
1040
+
1041
+ # test on subject 1 with fake data
1042
+ b = torch.randn((2,1,num_voxels_list[0]))
1043
+ print(b.shape, model.ridge(b,0).shape)
1044
+
1045
+
1046
+ # In[ ]:
1047
+
1048
+
1049
+ # test that the model works on some fake data
1050
+ b = torch.randn((2,1,hidden_dim))
1051
+ print("b.shape",b.shape)
1052
+
1053
+ backbone_, clip_, blur_ = model.backbone(b)
1054
+ print(backbone_.shape, clip_.shape, blur_[0].shape, blur_[1].shape)
1055
+
1056
+
1057
+ # ### Adding diffusion prior + unCLIP if use_prior=True
1058
+
1059
+ # In[ ]:
1060
+
1061
+
1062
+ if use_prior:
1063
+ from models import *
1064
+
1065
+ # setup diffusion prior network
1066
+ out_dim = clip_emb_dim
1067
+ depth = 6
1068
+ dim_head = 52
1069
+ heads = clip_emb_dim//52 # heads * dim_head = clip_emb_dim
1070
+ timesteps = 100
1071
+
1072
+ prior_network = VersatileDiffusionPriorNetwork(
1073
+ dim=out_dim,
1074
+ depth=depth,
1075
+ dim_head=dim_head,
1076
+ heads=heads,
1077
+ causal=False,
1078
+ num_tokens = clip_seq_dim,
1079
+ learned_query_mode="pos_emb"
1080
+ )
1081
+
1082
+ model.diffusion_prior = BrainDiffusionPrior(
1083
+ net=prior_network,
1084
+ image_embed_dim=out_dim,
1085
+ condition_on_text_encodings=False,
1086
+ timesteps=timesteps,
1087
+ cond_drop_prob=0.2,
1088
+ image_embed_scale=None,
1089
+ )
1090
+
1091
+ utils.count_params(model.diffusion_prior)
1092
+ utils.count_params(model)
1093
+
1094
+
1095
+ # ### Setup optimizer / lr / ckpt saving
1096
+
1097
+ # In[ ]:
1098
+
1099
+
1100
+ no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
1101
+
1102
+ opt_grouped_parameters = [
1103
+ {'params': [p for n, p in model.ridge.named_parameters()], 'weight_decay': 1e-2},
1104
+ {'params': [p for n, p in model.backbone.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': 1e-2},
1105
+ {'params': [p for n, p in model.backbone.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0},
1106
+ ]
1107
+ # model.backbone.requires_grad_(False)
1108
+
1109
+ if use_prior:
1110
+ opt_grouped_parameters.extend([
1111
+ {'params': [p for n, p in model.diffusion_prior.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': 1e-2},
1112
+ {'params': [p for n, p in model.diffusion_prior.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
1113
+ ])
1114
+
1115
+ optimizer = torch.optim.AdamW(opt_grouped_parameters, lr=max_lr)
1116
+
1117
+ if lr_scheduler_type == 'linear':
1118
+ lr_scheduler = torch.optim.lr_scheduler.LinearLR(
1119
+ optimizer,
1120
+ total_iters=int(np.floor(num_epochs*num_iterations_per_epoch)),
1121
+ last_epoch=-1
1122
+ )
1123
+ elif lr_scheduler_type == 'cycle':
1124
+ if num_iterations_per_epoch==0:
1125
+ num_iterations_per_epoch=1
1126
+ total_steps=int(np.floor(num_epochs*num_iterations_per_epoch))
1127
+ print("total_steps", total_steps)
1128
+ lr_scheduler = torch.optim.lr_scheduler.OneCycleLR(
1129
+ optimizer,
1130
+ max_lr=max_lr,
1131
+ total_steps=total_steps,
1132
+ final_div_factor=1000,
1133
+ last_epoch=-1, pct_start=2/num_epochs
1134
+ )
1135
+
1136
+ def save_ckpt(tag):
1137
+ ckpt_path = outdir+f'/{tag}.pth'
1138
+ if accelerator.is_main_process:
1139
+ unwrapped_model = accelerator.unwrap_model(model)
1140
+ torch.save({
1141
+ 'epoch': epoch,
1142
+ 'model_state_dict': unwrapped_model.state_dict(),
1143
+ 'optimizer_state_dict': optimizer.state_dict(),
1144
+ 'lr_scheduler': lr_scheduler.state_dict(),
1145
+ 'train_losses': losses,
1146
+ 'test_losses': test_losses,
1147
+ 'lrs': lrs,
1148
+ }, ckpt_path)
1149
+ print(f"\n---saved {outdir}/{tag} ckpt!---\n")
1150
+
1151
+ def load_ckpt(tag,load_lr=True,load_optimizer=True,load_epoch=True,strict=True,outdir=outdir,multisubj_loading=False):
1152
+ print(f"\n---loading {outdir}/{tag}.pth ckpt---\n")
1153
+ checkpoint = torch.load(outdir+'/last.pth', map_location='cpu')
1154
+ state_dict = checkpoint['model_state_dict']
1155
+ if multisubj_loading: # remove incompatible ridge layer that will otherwise error
1156
+ state_dict.pop('ridge.linears.0.weight',None)
1157
+ model.load_state_dict(state_dict, strict=strict)
1158
+ if load_epoch:
1159
+ globals()["epoch"] = checkpoint['epoch']
1160
+ print("Epoch",epoch)
1161
+ if load_optimizer:
1162
+ optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
1163
+ if load_lr:
1164
+ lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
1165
+ del checkpoint
1166
+
1167
+ print("\nDone with model preparations!")
1168
+ num_params = utils.count_params(model)
1169
+
1170
+
1171
+ # # Wandb
1172
+
1173
+ # In[ ]:
1174
+
1175
+
1176
+ # if local_rank==0 and wandb_log: # only use main process for wandb logging
1177
+ # import wandb
1178
+ # import time
1179
+
1180
+ # wandb_project = 'rtmindeye'
1181
+ # print(f"wandb {wandb_project} run {model_name}")
1182
+
1183
+ # # Need to configure wandb beforehand in terminal with "wandb init"!
1184
+ # wandb_config = {
1185
+ # "model_name": model_name,
1186
+ # "global_batch_size": global_batch_size,
1187
+ # "batch_size": batch_size,
1188
+ # "num_epochs": num_epochs,
1189
+ # "num_sessions": num_sessions,
1190
+ # "num_params": num_params,
1191
+ # "clip_scale": clip_scale,
1192
+ # "prior_scale": prior_scale,
1193
+ # "blur_scale": blur_scale,
1194
+ # "use_image_aug": use_image_aug,
1195
+ # "max_lr": max_lr,
1196
+ # "mixup_pct": mixup_pct,
1197
+ # "num_samples_per_epoch": num_samples_per_epoch,
1198
+ # "ckpt_interval": ckpt_interval,
1199
+ # "ckpt_saving": ckpt_saving,
1200
+ # "seed": seed, # SLURM array task ID
1201
+ # "distributed": distributed,
1202
+ # "num_devices": num_devices,
1203
+ # "world_size": world_size,
1204
+ # }
1205
+ # print("wandb_config:\n", wandb_config)
1206
+ # print("wandb_id:", model_name)
1207
+
1208
+ # # Initialize wandb
1209
+ # wandb.init(
1210
+ # id=model_name,
1211
+ # project=wandb_project,
1212
+ # name=model_name,
1213
+ # config=wandb_config,
1214
+ # resume="allow",
1215
+ # save_code=True,
1216
+ # )
1217
+
1218
+ # # Get SLURM job & array ID
1219
+ # slurm_job_id = utils.get_slurm_job()
1220
+ # slurm_array_id = seed # seed corresponds to SLURM_ARRAY_TASK_ID
1221
+
1222
+ # # Define SLURM log paths
1223
+ # log_dir = "slurms"
1224
+ # log_files = [
1225
+ # f"{log_dir}/{slurm_job_id}_{slurm_array_id}.out",
1226
+ # f"{log_dir}/{slurm_job_id}_{slurm_array_id}.err",
1227
+ # ]
1228
+
1229
+ # # Ensure logs exist before logging them
1230
+ # for log_file in log_files:
1231
+ # wait_time = 0
1232
+ # while not os.path.exists(log_file) and wait_time < 60: # Wait max 60s
1233
+ # time.sleep(5)
1234
+ # wait_time += 5
1235
+
1236
+ # # Log SLURM logs as artifacts
1237
+ # artifact = wandb.Artifact(f"slurm_logs_{slurm_job_id}_{slurm_array_id}", type="logs")
1238
+ # for log_file in log_files:
1239
+ # if os.path.exists(log_file):
1240
+ # artifact.add_file(log_file)
1241
+
1242
+ # wandb.log_artifact(artifact)
1243
+ # else:
1244
+ # wandb_log = False
1245
+
1246
+
1247
+ # # Train the model
1248
+
1249
+ # In[ ]:
1250
+
1251
+
1252
+ epoch = 0
1253
+ losses, test_losses, lrs = [], [], []
1254
+ best_test_loss = 1e9
1255
+ torch.cuda.empty_cache()
1256
+
1257
+
1258
+ # In[ ]:
1259
+
1260
+
1261
+ # # load multisubject stage1 ckpt if set
1262
+ # multisubject_ckpt = '/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/train_logs/sub-005_all_task-C_bs24_MST_rishab_MSTsplit_union_mask_finetune_0'
1263
+ # # multisubject_ckpt = '/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/train_logs/multisubject_subj01_1024hid_nolow_300ep'
1264
+ # if multisubject_ckpt is not None and not resume_from_ckpt:
1265
+ # load_ckpt("last",outdir=multisubject_ckpt,load_lr=False,load_optimizer=False,load_epoch=False,strict=False,multisubj_loading=True)
1266
+
1267
+
1268
+ # In[ ]:
1269
+
1270
+
1271
+ # Load pretrained model ckpt
1272
+ tag='last'
1273
+ # outdir = os.path.abspath(f'/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/train_logs/{model_name}')
1274
+ outdir = "/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/train_logs/sub-005_all_task-C_bs24_MST_rishab_MSTsplit_union_mask_finetune_0"
1275
+ print(f"\n---loading {outdir}/{tag}.pth ckpt---\n")
1276
+ checkpoint = torch.load(outdir+f'/{tag}.pth', map_location='cpu')
1277
+ state_dict = checkpoint['model_state_dict']
1278
+ model.load_state_dict(state_dict, strict=True)
1279
+ del checkpoint
1280
+ print("ckpt loaded!")
1281
+
1282
+
1283
+ # In[ ]:
1284
+
1285
+
1286
+ # checkpoint = torch.load(multisubject_ckpt+'/last.pth', map_location='cpu')
1287
+ # state_dict = checkpoint['model_state_dict']
1288
+ # model.load_state_dict(state_dict, strict=False)
1289
+
1290
+
1291
+ # In[ ]:
1292
+
1293
+
1294
+ def freeze_model(model):
1295
+ for param in model.parameters():
1296
+ param.requires_grad = False
1297
+ return model
1298
+
1299
+ model = freeze_model(model)
1300
+
1301
+
1302
+ # In[ ]:
1303
+
1304
+
1305
+ # train_dls = [train_dl[f'subj0{s}'] for s in subj_list]
1306
+
1307
+ model, optimizer, train_dl, lr_scheduler = accelerator.prepare(model, optimizer, train_dl, lr_scheduler)
1308
+ # leaving out test_dl since we will only have local_rank 0 device do evals
1309
+
1310
+
1311
+ # In[ ]:
1312
+
1313
+
1314
+ model.eval()
1315
+ logs = {}
1316
+ if local_rank == 0:
1317
+ with torch.no_grad(), torch.cuda.amp.autocast(dtype=data_type):
1318
+ for i in range(1):
1319
+ for j in range(2):
1320
+ subset_indices = MST_idx[:, i, j].reshape(-1)
1321
+ subset_dataset = torch.utils.data.TensorDataset(torch.tensor(subset_indices))
1322
+ subset_dl = torch.utils.data.DataLoader(
1323
+ subset_dataset, batch_size=len(MST_idx), shuffle=False,
1324
+ drop_last=True, pin_memory=True
1325
+ )
1326
+
1327
+ # Reset metrics for this subset
1328
+ test_fwd_percent_correct = 0
1329
+ test_bwd_percent_correct = 0
1330
+
1331
+ for test_i, behav in enumerate(subset_dl):
1332
+ behav = behav[0]
1333
+ loss = 0.
1334
+ image = images[behav.long().cpu()].to(device)
1335
+ voxel = vox[behav.long().cpu()]
1336
+ voxel = torch.Tensor(voxel).unsqueeze(1).to(device)
1337
+ clip_img_embedder = clip_img_embedder.to(device)
1338
+ clip_target = clip_img_embedder(image.float())
1339
+
1340
+ voxel_ridge = model.ridge(voxel, 0)
1341
+ backbone, clip_voxels, blurry_image_enc_ = model.backbone(voxel_ridge)
1342
+
1343
+ clip_voxels_norm = torch.nn.functional.normalize(clip_voxels, dim=-1)
1344
+ clip_target_norm = torch.nn.functional.normalize(clip_target, dim=-1)
1345
+
1346
+ if clip_scale > 0:
1347
+ labels = torch.arange(len(clip_voxels_norm)).to(clip_voxels_norm.device)
1348
+ test_fwd_percent_correct += utils.topk(utils.batchwise_cosine_similarity(clip_voxels_norm, clip_target_norm), labels, k=1).item()
1349
+ test_bwd_percent_correct += utils.topk(utils.batchwise_cosine_similarity(clip_target_norm, clip_voxels_norm), labels, k=1).item()
1350
+ print(test_fwd_percent_correct)
1351
+ print(test_bwd_percent_correct)
1352
+ logs.update({
1353
+ f"subset_{i}_{j}_test/fwd_pct_correct": test_fwd_percent_correct / (test_i + 1),
1354
+ f"subset_{i}_{j}_test/bwd_pct_correct": test_bwd_percent_correct / (test_i + 1),
1355
+ })
1356
+
1357
+ print("--- Full Dataset Evaluation ---")
1358
+ for k, v in logs.items():
1359
+ print(f"{k}: {v:.4f}")
1360
+
1361
+
1362
+ # In[ ]:
1363
+
1364
+
1365
+ top_k = 5
1366
+
1367
+ for x in range(len(MST_idx)):
1368
+ # Get top-k indices
1369
+ y = torch.topk(utils.batchwise_cosine_similarity(clip_voxels_norm, clip_target_norm)[x], k=top_k).indices.to('cpu').tolist()
1370
+
1371
+ # Set up the plot with original + top_k images in one row
1372
+ fig, axs = plt.subplots(1, top_k + 1, figsize=(3 * (top_k + 1), 3))
1373
+
1374
+ # Plot the original image
1375
+ orig_img = utils.torch_to_Image(images[MST_idx[x]])
1376
+ axs[0].imshow(orig_img)
1377
+ axs[0].set_title("Original")
1378
+ axs[0].axis("off")
1379
+
1380
+ # Plot the top-k retrieved images
1381
+ for idx, i in enumerate(y):
1382
+ pred_img = utils.torch_to_Image(images[MST_idx[i]])
1383
+ axs[idx + 1].imshow(pred_img)
1384
+ axs[idx + 1].set_title(f"Top {idx+1}")
1385
+ axs[idx + 1].axis("off")
1386
+
1387
+ plt.tight_layout()
1388
+ plt.show()
1389
+
1390
+
1391
+ # In[ ]:
1392
+
1393
+
1394
+ clip_voxels_norm.shape, clip_target_norm.shape
1395
+
1396
+
1397
+ # In[ ]:
1398
+
1399
+
1400
+ len(test_data)
1401
+
1402
+
1403
+ # In[ ]:
1404
+
1405
+
1406
+ # # Track metrics here:
1407
+ # https://docs.google.com/spreadsheets/d/1-dbmr4ovl2-4-MFNAL1DqLS651KM_ihjDkkUeP1kHXs/edit?gid=1494588999#gid=1494588999
1408
+
1409
+
1410
+ # **To tell if the model is working I'm looking at test_bwd/fwd_pct_correct and seeing if that is doing better than chance (1/batch_size)**
1411
+
1412
+ # In[ ]:
1413
+
1414
+
1415
+ # MST_pairmate_names
1416
+
1417
+
1418
+ # In[ ]:
1419
+
1420
+
1421
+ x = [im for im in image_names if str(im) not in ('blank.jpg', 'nan')]
1422
+ assert len(image_idx) == len(x)
1423
+ pairs = []
1424
+ for i, p in enumerate(MST_pairmate_names):
1425
+ assert p[0] != p[1] # no duplicate images
1426
+ pairs.append([utils.find_all_indices(x,p[0]), utils.find_all_indices(x,p[1])])
1427
+
1428
+ pairs = np.array(pairs)
1429
+ # print(pairs)
1430
+
1431
+
1432
+ # In[ ]:
1433
+
1434
+
1435
+ pairs[0][0][0]
1436
+
1437
+
1438
+ # In[ ]:
1439
+
1440
+
1441
+ pairs[0]
1442
+
1443
+
1444
+ # In[ ]:
1445
+
1446
+
1447
+ ix = 2
1448
+ x[pairs[ix][0][0]], x[pairs[ix][0][1]], x[pairs[ix][1][0]], x[pairs[ix][1][1]]
1449
+
1450
+
1451
+ # In[ ]:
1452
+
1453
+
1454
+ pairs.shape
1455
+
1456
+
1457
+ # In[ ]:
1458
+
1459
+
1460
+ # if sub=="sub-002":
1461
+ # unique_images_pairs = [
1462
+ # (2,3),(4,5),(7,8),(15,16),
1463
+ # (483, 484), (485, 486), (487, 488), (491, 492), (495, 496), (499, 500), (501, 502),
1464
+ # (503, 504), (512, 513),
1465
+ # ]
1466
+ # elif sub != 'sub-001' and session != 'ses-05':
1467
+ # unique_images_pairs = [
1468
+ # (1,2),(3,4),(5,6),(7,8),(9,10),(11,12),(13,14),(15,16),
1469
+ # (17,18),(19,20),(21,22),(23,24),(25,26),(27,28),(29,30),
1470
+ # (31,32),(33,34),(35,36),
1471
+ # (787, 788), (789, 790), (791, 792), (793, 794), (795, 796),
1472
+ # (797, 798), (799, 800), (801, 802), (803, 804), (805, 806),
1473
+ # (807, 808), (809, 810), (811, 812), (813, 814), (815, 816),
1474
+ # (817, 818), (819, 820), (821, 822), (823, 824), (825, 826),
1475
+ # (827, 828), (829, 830), (831, 832), (833, 834), (835, 836),
1476
+ # (837, 838), (839, 840), (841, 842), (843, 844), (845, 846),
1477
+ # (847, 848), (849, 850)
1478
+ # ]
1479
+ # else:
1480
+ # # unique_images = unique_images[unique_images!='blank.jpg'][:50]
1481
+ # unique_images_pairs = find_mst_pairs(x)
1482
+ # # unique_images[unique_images_pairs]
1483
+
1484
+
1485
+ # In[ ]:
1486
+
1487
+
1488
+ def evaluate_mst_pairs(mst_pairs):
1489
+ score = 0
1490
+ total = 0
1491
+
1492
+ with torch.no_grad(), torch.cuda.amp.autocast(dtype=data_type):
1493
+ for pair in tqdm(mst_pairs):
1494
+ voxel = vox[image_idx[pair[0]]].to(device)[None]
1495
+ voxel = torch.Tensor(voxel).unsqueeze(1).to(device)
1496
+
1497
+ imageA = images[image_idx[pair[0]]].to(device)[None]
1498
+ imageB = images[image_idx[pair[1]]].to(device)[None]
1499
+
1500
+ clip_targetA = clip_img_embedder(imageA.float())
1501
+ clip_targetB = clip_img_embedder(imageB.float())
1502
+
1503
+ voxel_ridge = model.ridge(voxel,0)
1504
+ backbone, clip_voxels, _ = model.backbone(voxel_ridge)
1505
+
1506
+ u = clip_voxels.flatten(1)[0]
1507
+ a = clip_targetA.flatten(1)[0]
1508
+ b = clip_targetB.flatten(1)[0]
1509
+
1510
+ clip_voxels_norm = nn.functional.normalize(clip_voxels.flatten(1), dim=-1)
1511
+ clip_targetA_norm = nn.functional.normalize(clip_targetA.flatten(1), dim=-1)
1512
+ clip_targetB_norm = nn.functional.normalize(clip_targetB.flatten(1), dim=-1)
1513
+
1514
+ u_norm = clip_voxels_norm.flatten(1)[0]
1515
+ a_norm = clip_targetA_norm.flatten(1)[0]
1516
+ b_norm = clip_targetB_norm.flatten(1)[0]
1517
+
1518
+ assert not torch.allclose(u, u_norm)
1519
+ assert not torch.allclose(a, a_norm)
1520
+ assert not torch.allclose(b, b_norm)
1521
+
1522
+ if utils.batchwise_cosine_similarity(clip_voxels_norm, clip_targetA_norm) > utils.batchwise_cosine_similarity(clip_voxels_norm, clip_targetB_norm):
1523
+ score += 1
1524
+ total += 1
1525
+
1526
+ voxel = vox[image_idx[pair[1]]].to(device)[None]
1527
+ voxel = torch.Tensor(voxel).unsqueeze(1).to(device)
1528
+
1529
+ voxel_ridge = model.ridge(voxel,0)
1530
+ backbone, clip_voxels, _ = model.backbone(voxel_ridge)
1531
+ clip_voxels_norm = nn.functional.normalize(clip_voxels.flatten(1), dim=-1)
1532
+
1533
+ if utils.batchwise_cosine_similarity(clip_voxels_norm, clip_targetB_norm) > utils.batchwise_cosine_similarity(clip_voxels_norm, clip_targetA_norm):
1534
+ score += 1
1535
+ total += 1
1536
+
1537
+ sim_A = utils.batchwise_cosine_similarity(clip_voxels_norm, clip_targetA_norm)
1538
+ sim_B = utils.batchwise_cosine_similarity(clip_voxels_norm, clip_targetB_norm)
1539
+
1540
+ return score/total, score, total
1541
+
1542
+
1543
+ # In[ ]:
1544
+
1545
+
1546
+ def evaluate_mst_pairs(mst_pairs):
1547
+ with torch.no_grad(), torch.cuda.amp.autocast(dtype=data_type):
1548
+ failed_A = []
1549
+ failed_B = []
1550
+ failed_non_corr = []
1551
+
1552
+ # Get all unique image indices
1553
+ all_indices = np.unique(mst_pairs.flatten())
1554
+
1555
+ # Pre-load all images and betas to device
1556
+ all_images = images[image_idx[all_indices]].to(device)
1557
+ all_voxels = torch.Tensor(vox[image_idx[all_indices]]).unsqueeze(1).to(device)
1558
+
1559
+ # Get CLIP embeddings for all images
1560
+ all_clip_targets = clip_img_embedder(all_images.float())
1561
+ all_clip_targets_norm = nn.functional.normalize(all_clip_targets.flatten(1), dim=-1)
1562
+
1563
+ # Pass all betas through model to get MindEye embeddings
1564
+ all_voxel_ridge = model.ridge(all_voxels, 0)
1565
+ _, all_clip_voxels, _ = model.backbone(all_voxel_ridge)
1566
+ all_clip_voxels_norm = nn.functional.normalize(all_clip_voxels.flatten(1), dim=-1)
1567
+
1568
+ # Dict mapping idx (which indexes the "vox" and "images" tensors) to pos (their position in the flattened array "all_indices")
1569
+ idx_to_pos = {idx: pos for pos, idx in enumerate(all_indices)}
1570
+
1571
+ # Initialize scores
1572
+ corr_score = 0
1573
+ non_corr_score = 0
1574
+ corr_total = len(mst_pairs) * 2
1575
+ non_corr_total = len(mst_pairs) * (len(mst_pairs)-1) * 4 # number of elements in the matrix excluding the diagonal is n*(n-1)*4 since we're doing this twice each for pairmate A and B
1576
+
1577
+
1578
+ # Pre-load voxelwise beta-based embeddings from MindEye and CLIP image embeddings
1579
+ idxA = np.array([pair[0] for pair in mst_pairs])
1580
+ idxB = np.array([pair[1] for pair in mst_pairs])
1581
+
1582
+ posA = np.array([idx_to_pos[idx] for idx in idxA])
1583
+ posB = np.array([idx_to_pos[idx] for idx in idxB])
1584
+
1585
+ voxA_embeddings = all_clip_voxels_norm[posA]
1586
+ voxB_embeddings = all_clip_voxels_norm[posB]
1587
+ imgA_embeddings = all_clip_targets_norm[posA]
1588
+ imgB_embeddings = all_clip_targets_norm[posB]
1589
+
1590
+ simA_A = utils.batchwise_cosine_similarity(voxA_embeddings, imgA_embeddings)
1591
+ simA_B = utils.batchwise_cosine_similarity(voxA_embeddings, imgB_embeddings)
1592
+ simB_B = utils.batchwise_cosine_similarity(voxB_embeddings, imgB_embeddings)
1593
+ simB_A = utils.batchwise_cosine_similarity(voxB_embeddings, imgA_embeddings)
1594
+
1595
+
1596
+ # corresponding 2-AFC
1597
+ # is the voxel embedding for image 1 pairmate A more similar to the CLIP embedding for image 1 pairmate A or the CLIP embedding for image 1 pairmate B?
1598
+ correct_A = torch.diag(simA_A) > torch.diag(simA_B)
1599
+ # is the voxel embedding for image 1 pairmate B more similar to the CLIP embedding for image 1 pairmate B or the CLIP embedding for image 1 pairmate A?
1600
+ correct_B = torch.diag(simB_B) > torch.diag(simB_A)
1601
+
1602
+ corr_score += correct_A.sum().item()
1603
+ corr_score += correct_B.sum().item()
1604
+
1605
+ # Store indices where AFC fails
1606
+ failed_A = [i for i, correct in enumerate(correct_A.cpu()) if not correct]
1607
+ failed_B = [i for i, correct in enumerate(correct_B.cpu()) if not correct]
1608
+
1609
+ # non-corresponding 2-AFC
1610
+ N = len(mst_pairs)
1611
+ # Create a mask that is True for all off-diagonal elements
1612
+ row_idx = torch.arange(N).unsqueeze(1) # (N, 1)
1613
+ col_idx = torch.arange(N).unsqueeze(0) # (1, N)
1614
+ off_diag_mask = row_idx != col_idx # shape (N, N)
1615
+
1616
+ diagA_A = simA_A.diag().unsqueeze(1).expand(-1, N) # Get diagonal values and expand to (N, N) by duplicating the diagonal element along the rows (since each row is the cosine similarity between a single voxel embedding and all CLIP embeddings)
1617
+ diagB_B = simB_B.diag().unsqueeze(1).expand(-1, N)
1618
+
1619
+ # pdb.set_trace()
1620
+
1621
+ # Compare each element in the row to the diagonal element
1622
+ off_diag_mask_device = off_diag_mask.to(device)
1623
+
1624
+ fail_AA = (simA_A < diagA_A) & off_diag_mask_device
1625
+ fail_AB = (simA_B < diagA_A) & off_diag_mask_device
1626
+ fail_BB = (simB_B < diagB_B) & off_diag_mask_device
1627
+ fail_BA = (simB_A < diagB_B) & off_diag_mask_device
1628
+
1629
+ non_corr_score += fail_AA.sum().item()
1630
+ non_corr_score += fail_AB.sum().item()
1631
+ non_corr_score += fail_BB.sum().item()
1632
+ non_corr_score += fail_BA.sum().item()
1633
+
1634
+ # Log failed indices
1635
+ fail_sources = [fail_AA, fail_AB, fail_BB, fail_BA]
1636
+ for fail_matrix, label in zip(fail_sources, ["AA", "AB", "BB", "BA"]):
1637
+ fail_coords = torch.nonzero(fail_matrix, as_tuple=False).cpu().numpy()
1638
+ for i, j in fail_coords:
1639
+ failed_non_corr.append({"type": label, "i": i, "j": j, "pair_i": mst_pairs[i], "pair_j": mst_pairs[j]})
1640
+
1641
+ return corr_score, corr_total, int(non_corr_score), non_corr_total, failed_A, failed_B, failed_non_corr
1642
+
1643
+
1644
+ # In[ ]:
1645
+
1646
+
1647
+ all_scores = []
1648
+ all_failures = []
1649
+
1650
+ for i in range(2):
1651
+ for j in range(2):
1652
+ mst_pairs = np.stack([pairs[:, 0, i], pairs[:, 1, j]], axis=1) # shape (31, 2)
1653
+ corr_score, corr_total, non_corr_score, non_corr_total, failed_A, failed_B, failed_non_corr = evaluate_mst_pairs(mst_pairs)
1654
+
1655
+ # Store scores and failure info together
1656
+ all_scores.append((corr_score, corr_total, non_corr_score, non_corr_total))
1657
+ all_failures.append({
1658
+ "repeat_A": i,
1659
+ "repeat_B": j,
1660
+ "failed_A": failed_A,
1661
+ "failed_B": failed_B,
1662
+ "failed_non_corr": failed_non_corr,
1663
+ "mst_pairs": mst_pairs,
1664
+ })
1665
+
1666
+ # Print summary
1667
+ print(f"pairmate A repeat {i} vs pairmate B repeat {j}:")
1668
+ print(f"2-AFC corresponding = {corr_score}/{corr_total} ({corr_score/corr_total:.2%})")
1669
+ print(f"2-AFC non-corresponding = {non_corr_score}/{non_corr_total} ({non_corr_score/non_corr_total:.2%})")
1670
+ print("")
1671
+
1672
+
1673
+ # In[ ]:
1674
+
1675
+
1676
+ # def generate_random_nonmatching_pairs(pairs, num_images_per_source=5, num_repeats=2):
1677
+ # n_imgs, n_pairmates, n_repeats = pairs.shape
1678
+ # nonmatch_pairs = []
1679
+
1680
+ # for i in range(n_imgs):
1681
+ # other_idxs = [j for j in range(n_imgs) if j != i]
1682
+ # sampled_j = np.random.choice(other_idxs, size=num_images_per_source, replace=False)
1683
+
1684
+ # for j in sampled_j:
1685
+ # for _ in range(num_repeats):
1686
+ # a_side = np.random.randint(2)
1687
+ # b_side = np.random.randint(2)
1688
+ # a_repeat = np.random.randint(n_repeats)
1689
+ # b_repeat = np.random.randint(n_repeats)
1690
+
1691
+ # pair_a = pairs[i, a_side, a_repeat]
1692
+ # pair_b = pairs[j, b_side, b_repeat]
1693
+ # nonmatch_pairs.append([pair_a, pair_b])
1694
+
1695
+ # return np.array(nonmatch_pairs)
1696
+
1697
+
1698
+ # In[ ]:
1699
+
1700
+
1701
+ # images[nonmatch_pairs[0]].shape, vox[
1702
+
1703
+
1704
+ # In[ ]:
1705
+
1706
+
1707
+ # nonmatch_pairs = generate_random_nonmatching_pairs(pairs, num_images_per_source=5, num_repeats=2)
1708
+ # results = evaluate_mst_pairs(nonmatch_pairs)
1709
+ # print(results[0])
1710
+ # nonmatch_score = results[1]
1711
+ # nonmatch_total = results[2]
1712
+
1713
+
1714
+ # In[ ]:
1715
+
1716
+
1717
+ # scores = []
1718
+ # totals = []
1719
+ # for i in range(pairs.shape[-1]):
1720
+ # for j in range(pairs.shape[-1]):
1721
+ # mst_pairs = np.stack([pairs[:, 0, i], pairs[:, 1, j]], axis=1) # shape (31, 2)
1722
+ # results = evaluate_mst_pairs(mst_pairs)
1723
+ # scores.append(results[1])
1724
+ # totals.append(results[2])
1725
+ # print(f"pairmate A repeat {i} vs pairmate B repeat {j}: {results[0]}")
1726
+
1727
+
1728
+ # In[ ]:
1729
+
1730
+
1731
+ print(np.all(pairs[:, 0, 0] == pairs[:, 0, 1]))
1732
+ print(np.all(pairs[:, 1, 0] == pairs[:, 1, 1]))
1733
+
1734
+
1735
+ # In[ ]:
1736
+
1737
+
1738
+ print(np.unique(pairs[:, 0, :], axis=1).shape[-1])
1739
+ print(np.unique(pairs[:, 1, :], axis=1).shape[-1])
1740
+
1741
+
1742
+ # In[ ]:
1743
+
1744
+
1745
+ test1 = np.stack([pairs[:, 0, 0], pairs[:, 1, 0]], axis=1)
1746
+ test2 = np.stack([pairs[:, 0, 0], pairs[:, 1, 1]], axis=1)
1747
+ print(np.array_equal(test1, test2)) # Should be False
1748
+ # print(evaluate_mst_pairs(test1))
1749
+ # print(evaluate_mst_pairs(test2))
1750
+
1751
+
1752
+ # In[ ]:
1753
+
1754
+
1755
+ a = pairs[:, 0, 0]
1756
+ b = pairs[:, 1, 0]
1757
+ c = pairs[:, 1, 1]
1758
+
1759
+ print(np.mean(np.abs(b - c))) # should be > 0
1760
+ print(np.mean(np.abs(a - b))) # should also be > 0
1761
+ print(np.mean(np.abs(a - c))) # also > 0
1762
+
1763
+
1764
+ # In[ ]:
1765
+
1766
+
1767
+ ix = 5
1768
+ display(utils.torch_to_Image(images[mst_pairs[ix][0]]))
1769
+ display(utils.torch_to_Image(images[mst_pairs[ix][1]]))
1770
+
1771
+
1772
+ # In[ ]:
1773
+
1774
+
1775
+ subset_indices
1776
+
1777
+
1778
+ # In[ ]:
1779
+
1780
+
1781
+ MST_idx.shape
1782
+
1783
+
1784
+ # In[ ]:
1785
+
1786
+
1787
+ # utils.torch_to_Image(images[x[i]]) == utils.torch_to_Image(images[y[i]])
1788
+
1789
+
1790
+ # In[ ]:
1791
+
1792
+
1793
+ x = MST_idx[:, 0, 0].reshape(-1)
1794
+ y = MST_idx[:, 0, 1].reshape(-1)
1795
+ for i in range(len(x)):
1796
+ assert utils.torch_to_Image(images[x[i]]) == utils.torch_to_Image(images[y[i]])
1797
+ display(utils.torch_to_Image(images[x[i]]))
1798
+
1799
+
1800
+ # In[ ]:
1801
+
1802
+
1803
+ # # Compare first few pairs
1804
+ # for pair in pairs: # Checking first 2 pairs
1805
+ # print("Indices in mst_pairs:", pair)
1806
+ # print("Corresponding filenames:")
1807
+ # print(f"Image 1: {x[pair[0]]}")
1808
+ # print(f"Image 2: {x[pair[1]]}\n")
1809
+
1810
+
1811
+ # In[ ]:
1812
+
1813
+
1814
+ scores, totals
1815
+
1816
+
1817
+ # In[ ]:
1818
+
1819
+
1820
+ # from scipy.stats import binomtest
1821
+
1822
+ # total_samples = len(pairs.flatten())
1823
+ # assert total_samples == 124
1824
+
1825
+ # correct_predictions = int((np.mean(scores)/np.mean(totals)) * total_samples) # calculate the number of correct predictions
1826
+ # expected_accuracy = 0.5 # expected accuracy under the null hypothesis
1827
+
1828
+ # # Perform the binomial test
1829
+ # binom_stats = binomtest(correct_predictions, total_samples, expected_accuracy, alternative='greater')
1830
+ # p_value = binom_stats.pvalue
1831
+
1832
+ # # Output the result
1833
+ # print(f"P-value: {p_value}")
1834
+ # if p_value < 0.05:
1835
+ # print("The decoder's accuracy is significantly better than chance.")
1836
+ # else:
1837
+ # print("The decoder's accuracy is not significantly better than chance.")
1838
+
1839
+
1840
+ # In[ ]:
1841
+
1842
+
1843
+ # from scipy.stats import binomtest
1844
+
1845
+ # total_samples = len(nonmatch_pairs.flatten())
1846
+ # # assert total_samples == 124
1847
+
1848
+ # correct_predictions = int((np.mean(nonmatch_score)/np.mean(nonmatch_total)) * total_samples) # calculate the number of correct predictions
1849
+ # expected_accuracy = 0.5 # expected accuracy under the null hypothesis
1850
+
1851
+ # # Perform the binomial test
1852
+ # binom_stats = binomtest(correct_predictions, total_samples, expected_accuracy, alternative='greater')
1853
+ # p_value = binom_stats.pvalue
1854
+
1855
+ # # Output the result
1856
+ # print(f"P-value: {p_value}")
1857
+ # if p_value < 0.05:
1858
+ # print("The decoder's accuracy is significantly better than chance.")
1859
+ # else:
1860
+ # print("The decoder's accuracy is not significantly better than chance.")
1861
+
1862
+
1863
+ # In[ ]:
1864
+
1865
+
1866
+ # for i in range(len(pairs)):
1867
+ # fig, ax = plt.subplots(1, 2, figsize=(10,8))
1868
+
1869
+ # ax[0].imshow(images[pairs[i][0]].permute(1,2,0).numpy())
1870
+ # ax[0].set_title(f"Repeat 1")
1871
+
1872
+ # ax[1].imshow(images[pairs[i][1]].permute(1,2,0).numpy())
1873
+ # ax[1].set_title(f"Repeat 2")
1874
+
1875
+ # plt.setp(ax, xticks=[], yticks=[])
1876
+ # plt.tight_layout()
1877
+ # plt.show()
1878
+
1879
+
1880
+ # In[ ]:
1881
+
1882
+
1883
+ # score = 0
1884
+ # total = 0
1885
+ # with torch.no_grad(), torch.cuda.amp.autocast(dtype=data_type):
1886
+ # for pair in unique_images_pairs:
1887
+ # imageA_idx, imageB_idx = pair
1888
+ # imageA_idx = np.where(image_idx == imageA_idx)[0].item()
1889
+ # imageB_idx = np.where(image_idx == imageB_idx)[0].item()
1890
+
1891
+ # voxel = vox[imageA_idx].to(device)[None]
1892
+ # voxel = torch.Tensor(voxel).unsqueeze(1).to(device)
1893
+
1894
+ # imageA = images[imageA_idx].to(device)[None]
1895
+ # imageB = images[imageB_idx].to(device)[None]
1896
+
1897
+ # clip_targetA = clip_img_embedder(imageA.float())
1898
+ # clip_targetB = clip_img_embedder(imageB.float())
1899
+
1900
+ # voxel_ridge = model.ridge(voxel,0)
1901
+ # backbone, clip_voxels, blurry_image_enc_ = model.backbone(voxel_ridge)
1902
+
1903
+ # clip_voxels_norm = nn.functional.normalize(clip_voxels.flatten(1), dim=-1)
1904
+ # clip_targetA_norm = nn.functional.normalize(clip_targetA.flatten(1), dim=-1)
1905
+ # clip_targetB_norm = nn.functional.normalize(clip_targetB.flatten(1), dim=-1)
1906
+
1907
+ # cossimA = utils.batchwise_cosine_similarity(clip_voxels_norm, clip_targetA_norm)
1908
+ # cossimB = utils.batchwise_cosine_similarity(clip_voxels_norm, clip_targetB_norm)
1909
+
1910
+ # if cossimA > cossimB:
1911
+ # score += 1
1912
+ # total += 1
1913
+
1914
+ # for pair in unique_images_pairs:
1915
+ # imageA_idx, imageB_idx = pair
1916
+ # imageA_idx = np.where(image_idx == imageA_idx)[0].item()
1917
+ # imageB_idx = np.where(image_idx == imageB_idx)[0].item()
1918
+
1919
+ # voxel = vox[imageB_idx].to(device)[None]
1920
+ # voxel = torch.Tensor(voxel).unsqueeze(1).to(device)
1921
+
1922
+ # imageA = images[imageA_idx].to(device)[None]
1923
+ # imageB = images[imageB_idx].to(device)[None]
1924
+
1925
+ # clip_targetA = clip_img_embedder(imageA.float())
1926
+ # clip_targetB = clip_img_embedder(imageB.float())
1927
+
1928
+ # voxel_ridge = model.ridge(voxel,0)
1929
+ # backbone, clip_voxels, blurry_image_enc_ = model.backbone(voxel_ridge)
1930
+
1931
+ # clip_voxels_norm = nn.functional.normalize(clip_voxels.flatten(1), dim=-1)
1932
+ # clip_targetA_norm = nn.functional.normalize(clip_targetA.flatten(1), dim=-1)
1933
+ # clip_targetB_norm = nn.functional.normalize(clip_targetB.flatten(1), dim=-1)
1934
+
1935
+ # cossimA = utils.batchwise_cosine_similarity(clip_voxels_norm, clip_targetA_norm)
1936
+ # cossimB = utils.batchwise_cosine_similarity(clip_voxels_norm, clip_targetB_norm)
1937
+
1938
+ # if cossimB > cossimA:
1939
+ # score += 1
1940
+ # total += 1
1941
+
1942
+ # print(score/total)
1943
+
1944
+
1945
+ # In[ ]:
1946
+
1947
+
1948
+ #display(utils.torch_to_Image(imageA))
1949
+ #display(utils.torch_to_Image(imageB))
1950
+
1951
+
1952
+ # In[ ]:
1953
+
1954
+
1955
+
1956
+
1957
+
1958
+ # In[ ]:
1959
+
1960
+
1961
+
1962
+
main.ipynb ADDED
@@ -0,0 +1,1950 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "id": "b0f0f4f3",
6
+ "metadata": {},
7
+ "source": [
8
+ "# Import packages & functions"
9
+ ]
10
+ },
11
+ {
12
+ "cell_type": "code",
13
+ "execution_count": 2,
14
+ "id": "5bad764b-45c1-45ce-a716-8d055e09821a",
15
+ "metadata": {
16
+ "tags": []
17
+ },
18
+ "outputs": [],
19
+ "source": [
20
+ "import os\n",
21
+ "import sys\n",
22
+ "import json\n",
23
+ "import argparse\n",
24
+ "import numpy as np\n",
25
+ "import time\n",
26
+ "import random\n",
27
+ "import string\n",
28
+ "import h5py\n",
29
+ "from tqdm import tqdm\n",
30
+ "import webdataset as wds\n",
31
+ "from PIL import Image\n",
32
+ "import pandas as pd\n",
33
+ "import nibabel as nib\n",
34
+ "\n",
35
+ "import matplotlib.pyplot as plt\n",
36
+ "import torch\n",
37
+ "import torch.nn as nn\n",
38
+ "from torchvision import transforms\n",
39
+ "\n",
40
+ "# tf32 data type is faster than standard float32\n",
41
+ "torch.backends.cuda.matmul.allow_tf32 = True\n",
42
+ "\n",
43
+ "# custom functions #\n",
44
+ "seed = 0\n",
45
+ "import utils\n",
46
+ "\n",
47
+ "if utils.is_interactive():\n",
48
+ " from IPython.display import clear_output # function to clear print outputs in cell\n",
49
+ " %load_ext autoreload \n",
50
+ " # this allows you to change functions in models.py or utils.py and have this notebook automatically update with your revisions\n",
51
+ " %autoreload 2 "
52
+ ]
53
+ },
54
+ {
55
+ "cell_type": "markdown",
56
+ "id": "bae2b2ad-e1ef-4262-8263-6ae9a0766caa",
57
+ "metadata": {},
58
+ "source": [
59
+ "# Princeton data prep"
60
+ ]
61
+ },
62
+ {
63
+ "cell_type": "markdown",
64
+ "id": "c6dbeabe-9e9c-4d8d-a8c3-414d79d14e63",
65
+ "metadata": {},
66
+ "source": [
67
+ "## Load Data & Design"
68
+ ]
69
+ },
70
+ {
71
+ "cell_type": "code",
72
+ "execution_count": 3,
73
+ "id": "0f2d14fc-bfe3-40dc-b14e-070812c43406",
74
+ "metadata": {},
75
+ "outputs": [],
76
+ "source": [
77
+ "sub = \"sub-001\"\n",
78
+ "session = \"ses-02\"\n",
79
+ "n_runs = 16\n",
80
+ "train_test_split = 'MST'\n",
81
+ "remove_close_to_MST = True # optionally skip close_to_MST images "
82
+ ]
83
+ },
84
+ {
85
+ "cell_type": "code",
86
+ "execution_count": 4,
87
+ "id": "34c1e0c6-0641-4239-8201-f2c676532302",
88
+ "metadata": {},
89
+ "outputs": [
90
+ {
91
+ "name": "stdout",
92
+ "output_type": "stream",
93
+ "text": [
94
+ "csv/sub-001_ses-02.csv\n",
95
+ "len_unique_images 708\n",
96
+ "n_runs 16\n",
97
+ "['all_stimuli/special515/special_40840.jpg'\n",
98
+ " 'all_stimuli/unchosen_nsd_1000_images/unchosen_5137_cocoid_57944.png'\n",
99
+ " 'all_stimuli/shared1000_notspecial/notspecial_38278.png'\n",
100
+ " 'all_stimuli/special515/special_30632.jpg']\n",
101
+ "[658.05201488 662.06546921 666.06833092 670.06900812]\n",
102
+ "[0. 0. 0. 0.]\n",
103
+ "torch.Size([933])\n"
104
+ ]
105
+ }
106
+ ],
107
+ "source": [
108
+ "filename = f\"csv/{sub}_{session}.csv\"\n",
109
+ "print(filename)\n",
110
+ "data = pd.read_csv(filename)\n",
111
+ "image_names = data['current_image'].values[14:]\n",
112
+ "starts = data['trial.started'].values[14:]\n",
113
+ "is_new_run = data['is_new_run'].values[14:]\n",
114
+ "\n",
115
+ "unique_images = np.unique(image_names.astype(str))\n",
116
+ "unique_images = unique_images[(unique_images!=\"nan\")]\n",
117
+ "unique_images = unique_images[(unique_images!=\"blank.jpg\")]\n",
118
+ "len_unique_images = len(unique_images)\n",
119
+ "print(\"len_unique_images\",len_unique_images)\n",
120
+ "print(\"n_runs\",n_runs)\n",
121
+ "\n",
122
+ "print(image_names[:4])\n",
123
+ "print(starts[:4])\n",
124
+ "print(is_new_run[:4])\n",
125
+ "\n",
126
+ "image_idx = np.array([])\n",
127
+ "for i in range(len(image_names)):\n",
128
+ " if image_names[i] == \"blank.jpg\":\n",
129
+ " continue\n",
130
+ " if str(image_names[i]) == \"nan\":\n",
131
+ " continue\n",
132
+ " if remove_close_to_MST: # optionally skip close_to_MST images \n",
133
+ " if \"closest_pairs\" in image_names[i]:\n",
134
+ " continue\n",
135
+ "\n",
136
+ "\n",
137
+ " image_idx_ = np.where(image_names[i]==unique_images)[0].item()\n",
138
+ " image_idx = np.append(image_idx, image_idx_)\n",
139
+ "image_idx = torch.Tensor(image_idx).long()\n",
140
+ "\n",
141
+ "all_MST_images = []\n",
142
+ "for im in image_names:\n",
143
+ " if im == \"blank.jpg\":\n",
144
+ " continue\n",
145
+ " if str(im) == \"nan\":\n",
146
+ " continue\n",
147
+ " if remove_close_to_MST: # optionally skip close_to_MST images \n",
148
+ " if \"closest_pairs\" in im:\n",
149
+ " continue\n",
150
+ "\n",
151
+ " # print(im)\n",
152
+ " if 'MST' in im:\n",
153
+ " all_MST_images.append(im)\n",
154
+ "assert len(all_MST_images) == 150\n",
155
+ "\n",
156
+ "unique_MST_images = np.unique(all_MST_images) \n",
157
+ "\n",
158
+ "MST_ID = np.array([], dtype=int)\n",
159
+ "\n",
160
+ "for i in range(len(image_names)):\n",
161
+ " if image_names[i] == \"blank.jpg\":\n",
162
+ " continue\n",
163
+ " if str(image_names[i]) == \"nan\":\n",
164
+ " continue\n",
165
+ " if remove_close_to_MST: # optionally skip close_to_MST images \n",
166
+ " if \"closest_pairs\" in image_names[i]:\n",
167
+ " continue\n",
168
+ " # print(image_names[i])\n",
169
+ " curr = np.where(image_names[i] == unique_MST_images)\n",
170
+ " if curr[0].size == 0:\n",
171
+ " MST_ID = np.append(MST_ID, np.array(len(unique_MST_images))) # add a value that should be out of range based on the for loop, will index it out later\n",
172
+ " else:\n",
173
+ " MST_ID = np.append(MST_ID, curr)\n",
174
+ "MST_ID = torch.Tensor(MST_ID)\n",
175
+ "# assert len(MST_ID) == len(images)\n",
176
+ "print(MST_ID.shape)"
177
+ ]
178
+ },
179
+ {
180
+ "cell_type": "markdown",
181
+ "id": "e48ffe08-71ec-4a3f-9371-66fed2c21de4",
182
+ "metadata": {},
183
+ "source": [
184
+ "## Load images"
185
+ ]
186
+ },
187
+ {
188
+ "cell_type": "code",
189
+ "execution_count": null,
190
+ "id": "2ceb404f-b04f-42b6-afc4-283bb2b40c08",
191
+ "metadata": {},
192
+ "outputs": [
193
+ {
194
+ "name": "stderr",
195
+ "output_type": "stream",
196
+ "text": [
197
+ " 0%| | 0/933 [00:00<?, ?it/s]/tmp/ipykernel_983337/3793407494.py:7: DeprecationWarning: Starting with ImageIO v3 the behavior of this function will switch to that of iio.v3.imread. To keep the current behavior (and make this warning disappear) use `import imageio.v2 as imageio` or call `imageio.v2.imread` directly.\n",
198
+ " im = imageio.imread(f\"{unique_images[im_name]}\")\n",
199
+ "/home/ri4541/.conda/envs/rt_mindEye2/lib/python3.11/site-packages/torchvision/transforms/functional.py:1603: UserWarning: The default value of the antialias parameter of all the resizing transforms (Resize(), RandomResizedCrop(), etc.) will change from None to True in v0.17, in order to be consistent across the PIL and Tensor backends. To suppress this warning, directly pass antialias=True (recommended, future default), antialias=None (current default, which means False for Tensors and True for PIL), or antialias=False (only works on Tensors - PIL will still use antialiasing). This also applies if you are using the inference transforms from the models weights: update the call to weights.transforms(antialias=True).\n",
200
+ " warnings.warn(\n",
201
+ " 81%|████████ | 752/933 [00:14<00:06, 28.50it/s] "
202
+ ]
203
+ }
204
+ ],
205
+ "source": [
206
+ "import imageio\n",
207
+ "resize_transform = transforms.Resize((224, 224))\n",
208
+ "MST_images = []\n",
209
+ "images = None\n",
210
+ "for im_name in tqdm(image_idx):\n",
211
+ " # im = imageio.imread(f\"rtmindeye_stimuli/{(unique_images[im_name]).split('/')[-1]}\")\n",
212
+ " im = imageio.imread(f\"{unique_images[im_name]}\")\n",
213
+ " im = torch.Tensor(im / 255).permute(2,0,1)\n",
214
+ " im = resize_transform(im.unsqueeze(0))\n",
215
+ " if images is None:\n",
216
+ " images = im\n",
217
+ " else:\n",
218
+ " images = torch.vstack((images, im))\n",
219
+ " if (\"MST_pairs\" in unique_images[im_name]): # (\"_seed_\" not in unique_images[im_name]) and (unique_images[im_name] != \"blank.jpg\") \n",
220
+ " MST_images.append(True)\n",
221
+ " else:\n",
222
+ " MST_images.append(False)\n",
223
+ "\n",
224
+ "print(\"images\", images.shape)\n",
225
+ "MST_images = np.array(MST_images)\n",
226
+ "print(\"MST_images\", len(MST_images))\n",
227
+ "print(\"MST_images==True\", len(MST_images[MST_images==True]))"
228
+ ]
229
+ },
230
+ {
231
+ "cell_type": "code",
232
+ "execution_count": null,
233
+ "id": "dbb17afa-579e-40ee-9cd3-3083ad31b94c",
234
+ "metadata": {},
235
+ "outputs": [],
236
+ "source": [
237
+ "# pairs is a matrix of the trial numbers where the same image was repeated\n",
238
+ "pairs = utils.find_paired_indices(image_idx)\n",
239
+ "pairs = np.array(sorted(pairs, key=lambda x: x[0]))\n",
240
+ "\n",
241
+ "fig, ax = plt.subplots(1, 3, figsize=(10,8))\n",
242
+ "\n",
243
+ "ax[0].imshow(images[pairs[-1][0]].permute(1,2,0).numpy())\n",
244
+ "ax[0].set_title(f\"Trial 0\")\n",
245
+ "\n",
246
+ "ax[1].imshow(images[pairs[-1][1]].permute(1,2,0).numpy())\n",
247
+ "ax[1].set_title(f\"Trial 1\")\n",
248
+ "\n",
249
+ "ax[2].imshow(images[pairs[-1][2]].permute(1,2,0).numpy())\n",
250
+ "ax[2].set_title(f\"Trial 2\")\n",
251
+ "\n",
252
+ "plt.setp(ax, xticks=[], yticks=[])\n",
253
+ "plt.tight_layout()\n",
254
+ "plt.show()"
255
+ ]
256
+ },
257
+ {
258
+ "cell_type": "code",
259
+ "execution_count": null,
260
+ "id": "9ffc1697-09fd-4be2-b8b1-bf6d935e7cc6",
261
+ "metadata": {},
262
+ "outputs": [],
263
+ "source": [
264
+ "int(len(unique_MST_images)/2)"
265
+ ]
266
+ },
267
+ {
268
+ "cell_type": "code",
269
+ "execution_count": null,
270
+ "id": "d79c57ec-e4b6-464a-940f-4047f81ddd99",
271
+ "metadata": {},
272
+ "outputs": [],
273
+ "source": [
274
+ "MST_pairmate_names = unique_MST_images.reshape(int(len(unique_MST_images)/2),2)\n",
275
+ "print(MST_pairmate_names[:5])\n",
276
+ "\n",
277
+ "MST_pairmate_indices = np.empty(shape=MST_pairmate_names.shape, dtype=int)\n",
278
+ "for p, pair in enumerate(MST_pairmate_names):\n",
279
+ " for i, im in enumerate(pair):\n",
280
+ " MST_pairmate_indices[p][i] = np.where(np.array(all_MST_images) == im)[0][0] # just take the first repeated instance of an image\n",
281
+ " \n",
282
+ "# print(MST_pairmate_indices, MST_pairmate_indices.shape)"
283
+ ]
284
+ },
285
+ {
286
+ "cell_type": "markdown",
287
+ "id": "7804edab-5dc2-4499-8a91-91d77f78bd77",
288
+ "metadata": {},
289
+ "source": [
290
+ "## Load GLMSingle voxel data"
291
+ ]
292
+ },
293
+ {
294
+ "cell_type": "code",
295
+ "execution_count": 12,
296
+ "id": "3a5e1904-4944-4a5d-9b8d-d85f7cd161e4",
297
+ "metadata": {
298
+ "tags": []
299
+ },
300
+ "outputs": [
301
+ {
302
+ "name": "stdout",
303
+ "output_type": "stream",
304
+ "text": [
305
+ "vox (1008, 1, 1, 194410)\n",
306
+ "vox (1008, 194410)\n"
307
+ ]
308
+ }
309
+ ],
310
+ "source": [
311
+ "glmsingle = np.load(f\"/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/glmsingle_{session}/TYPED_FITHRF_GLMDENOISE_RR.npz\",allow_pickle=True) # moved this dir to scratch mindeyev2 src mindeye\n",
312
+ "\n",
313
+ "vox = glmsingle['betasmd'].T\n",
314
+ "print(\"vox\", vox.shape)\n",
315
+ "\n",
316
+ "if vox.ndim==4:\n",
317
+ " vox = vox[:,0,0]\n",
318
+ " print(\"vox\", vox.shape)\n",
319
+ " "
320
+ ]
321
+ },
322
+ {
323
+ "cell_type": "markdown",
324
+ "id": "e98f085f-d20d-4f9e-abc5-f823c90d7f49",
325
+ "metadata": {},
326
+ "source": [
327
+ "### Load nsdgeneral ROI"
328
+ ]
329
+ },
330
+ {
331
+ "cell_type": "code",
332
+ "execution_count": 13,
333
+ "id": "12ecfa73-dd47-47dd-afcb-b23651185ee8",
334
+ "metadata": {},
335
+ "outputs": [
336
+ {
337
+ "name": "stdout",
338
+ "output_type": "stream",
339
+ "text": [
340
+ "(78, 102, 78)\n"
341
+ ]
342
+ }
343
+ ],
344
+ "source": [
345
+ "avg_mask=nib.load(f'masks/{sub}_{session}_brain.nii.gz')\n",
346
+ "print(avg_mask.shape)"
347
+ ]
348
+ },
349
+ {
350
+ "cell_type": "code",
351
+ "execution_count": 14,
352
+ "id": "96be660e-55bb-4297-a824-73acb4e8a6e4",
353
+ "metadata": {},
354
+ "outputs": [
355
+ {
356
+ "name": "stdout",
357
+ "output_type": "stream",
358
+ "text": [
359
+ "[[ 1.79999995 0. 0. -71.30000305]\n",
360
+ " [ 0. 1.79999995 0. -78.40000153]\n",
361
+ " [ 0. 0. 1.79999995 -47.80000305]\n",
362
+ " [ 0. 0. 0. 1. ]]\n"
363
+ ]
364
+ }
365
+ ],
366
+ "source": [
367
+ "from nilearn.plotting import plot_roi, plot_anat, plot_epi\n",
368
+ "\n",
369
+ "avg_mask=nib.load(f'masks/{sub}_{session}_brain.nii.gz')\n",
370
+ "\n",
371
+ "# mask info\n",
372
+ "dimsize=avg_mask.header.get_zooms()\n",
373
+ "affine_mat = avg_mask.affine\n",
374
+ "brain=avg_mask.get_fdata()\n",
375
+ "xyz=brain.shape #xyz dimensionality of brain mask and epi data\n",
376
+ "print(affine_mat)"
377
+ ]
378
+ },
379
+ {
380
+ "cell_type": "code",
381
+ "execution_count": 15,
382
+ "id": "e846060a-d9c0-43d3-824b-08fa2b4b354e",
383
+ "metadata": {},
384
+ "outputs": [
385
+ {
386
+ "name": "stdout",
387
+ "output_type": "stream",
388
+ "text": [
389
+ "Mask dimensions: (1.8, 1.8, 1.8)\n",
390
+ "\n",
391
+ "Affine:\n",
392
+ "[[ 1.79999995 0. 0. -71.30000305]\n",
393
+ " [ 0. 1.79999995 0. -78.40000153]\n",
394
+ " [ 0. 0. 1.79999995 -47.80000305]\n",
395
+ " [ 0. 0. 0. 1. ]]\n",
396
+ "\n",
397
+ "There are 194410 voxels in the included brain mask\n",
398
+ "\n"
399
+ ]
400
+ },
401
+ {
402
+ "data": {
403
+ "text/plain": [
404
+ "<nilearn.plotting.displays._slicers.OrthoSlicer at 0x14e3a9783a90>"
405
+ ]
406
+ },
407
+ "execution_count": 15,
408
+ "metadata": {},
409
+ "output_type": "execute_result"
410
+ },
411
+ {
412
+ "data": {
413
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAqgAAAFyCAYAAAA59SiIAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjguMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8g+/7EAAAACXBIWXMAAA9hAAAPYQGoP6dpAAAeuUlEQVR4nO3de4xU5f0/8M8SkEVRvK4ULWoMxbRo8YpWojVqtdQY0trytWBBNE1FFKO0VSnVtLG1pdaoJd4V8FItSLG1YCsmqBgRsV7aaqS2C3yRwIpKDdZVZPf7B7/Z3yCz7OzsXJ6Zeb0SknVm9pxnzpzBN5/PeZ7TEBHtAQAAiehV6QEAAEA2ARUAgKQIqAAAJEVABQAgKQIqAABJEVABAEiKgAoAQFIEVAAAkiKgAgCQFAEVAICkCKgAACRFQAUAICkCKgAASRFQAQBIioAKAEBSBFQAAJIioAIAkBQBFQCApAioAAAkRUAFACApAioAAEkRUAEASIqACgBAUgRUAACSIqACAJAUARUAgKQIqAAAJEVABQAgKQIqAABJEVABAEiKgAoAQFIEVAAAkiKgAgCQFAEVAICkCKgAACRFQAUAICkCKgAASRFQAQBIioAKAEBSBFQAAJIioAIAkBQBFQCApAioAAAkRUAFACApAioAAEkRUAEASIqACgBAUgRUAACSIqACAJAUARUAgKQIqAAAJEVABQAgKQIqAABJEVABAEiKgAoAQFIEVAAAkiKgAgCQFAEVAICkCKgAACRFQAUAICkCKgAASRFQAQBIioAKAEBSeld6AAAAXVm4cGFerxs1alS3Xt8dmW1TeiqoAAAkRUAFAOpKU1NTNDU1VXoY7ERDRLRXehAAAJ9WijZ9RHSE05aWlh5vS9u/NFRQAaAKjB8/Ptrb2+Poo4+u9FCoAZnzKfNny5YtsXbt2rj33ntj0KBBlR6eSVIAQOWVqlpaatnjrsZq6vTp06O5uTkaGxvj+OOPjwkTJsTIkSNj2LBh8dFHH1VsXAIqAECdWrRoUbz44osREXH33XfHxo0b48orr4yzzz475s6dW7FxafEDABAREc8880xERBx66KEVHYcKKgCwg5213LvTyq7W1n0hiv1eK3HJwMEHHxwREe+9917Z951NQAUAqFMDBgyIffbZJxobG2PEiBFxzTXXRGtrazz22GMVHZeACgBQp5588snt/ru5uTnGjRsXb731VoVGtI2ACgB1rrut6Xpq21dSruNc7Lb/pEmTYuXKlTFgwICYOHFinHTSSRWdvZ8hoAIA1Knly5d3zOJfsGBBLF26NB588MEYOnRofPDBBxUbl4AKAHVIFbQ6dfW59aTC2tbWFldddVUsWbIkJk+eHL/4xS8K3lZPWWYKAICIiHjqqafi+eefj8suuyz69u1bsXGooAJAFZk4cWKceeaZOzx+0003xebNmyswImrNjBkzYt68eTFhwoS4/fbbKzIGARUAqsikSZNyPj5r1qwuA6q2fu0rxq1X58+fH2+++WZMnTo17rzzzmhrayvW8PLWEBHtZd8rAFB2Auo2TU1NERHR0tJS4ZGUViUW+i8WARUAapxgur16CagZ1RhUTZICACApAioAAEnR4geAGqSt37l6a/FntLS0xIQJEyo9jLyooAIA1IFMMK8GKqgAUENUTrtWrxXUbKlPnFJBBQAgKQIqAABJcScpAKhy2vrUGhVUAACSIqACAJAULf4a0djYGBERra2tFR4J1cj5A1Bfsi8LSXFGvwpqDWhsbIylS5fG0qVLO4IG5Mv5A0BqVFABoEqZHEWtqngFdfz48dHe3h5HH310pYdClcucS5k/W7ZsibVr18a9994bgwYNqvTwAIA8qaBSc6ZPnx7Nzc3R2NgYxx9/fEyYMCFGjhwZw4YNi48++qjSwwMAuiCgUnMWLVoUL774YkRE3H333bFx48a48sor4+yzz465c+dWeHQAkJbMpSIpTZaqeIsfSu2ZZ56JiIhDDz20wiMBAPIhoFLzDj744IiIeO+99yo7EAAgL1r81JwBAwbEPvvsE42NjTFixIi45pprorW1NR577LFKDw0AyIOASs158sknt/vv5ubmGDduXLz11lsVGhEA0B0CKjVn0qRJsXLlyhgwYEBMnDgxTjrpJLP3AaCKCKjUnOXLl3fM4l+wYEEsXbo0HnzwwRg6dGh88MEHFR4dANAVk6SoaW1tbXHVVVfFAQccEJMnT670cAB6bOHChR1/oFYJqNS8p556Kp5//vm47LLLom/fvpUeDgDQhWRa/BMnTowzzzxzh8dvuumm2Lx5cwVGRC2ZMWNGzJs3LyZMmBC33357pYcDAOxEMgF10qRJOR+fNWuWgEqPzZ8/P958882YOnVq3HnnndHW1lbpIQEAnah4i3/27NnR0NDQ6R9LA5GvzLmUmSCVrb29PYYMGRJDhgwRTgEgcRUPqAAAkE1ABQAgKQIqAABJEVABAEiKgAoAQFIEVAAAkiKgAgCQFAEVAICkCKgAACRFQAUAICkCKgAASeld6QFQWgsXLqz0ELYzatSoSg+h5qX2mQNAd6mgAgCQFBXUHkixUjV//vxKD2Gnso+ZamrxFOtcLNb547MFoCdUUAEASIqACgBAUuqyxZ9iax4Kkeq5XIpxuWwAoLRSugxPBRUAgKQIqAAAbGfhwoUV7dLVVYs/1XZovcr1eVS6pUC6iv39da4BpEsFFQCApNRsBVW1tDiOOvyoom7vr3/7606fz3xuqludc24Xh4lcVBvffeqJCioAAEkRUAEASEpNtfi1P3qm2O38nkhpLbZUOL/TV6zPyDkPpKJS/z9WQQUAIClVX0FNraqUXYXsakJQCnpSNc31/kpRhVVNpd6YwAXUOxVUAACSIqACAJCUqm3xp9Daz9XOzm57Z55PrdVfrDZ8IZczVNslEJWWwnlObXAnLqCaqKACAJCUqq2gpipXdbIeqoaFVGVTrTADXTORC+pPOSctq6ACAJAUARUAgKRUVYs/tQkj+a4DmloLu7PxlPtOUoUcl8w5oBUItcdELiBDBRUAgKRUVQUVAOpJap1DKBcBtQfybYl39rpUW//lbvWzI/9Tgp7r6ffIJQJQOVr8AAAkpSoqqNVeTUqtUtqVXHfDAqg3JmVC50r9/VBBBQAgKQIqAABJqYoWfyX1ZF3Tamvt51LsiVPFOiblvN1auVT7pSwAUCwqqAAAJEUFtchqoWqaS7knTmXvo1aPaT6q4c5kUOvK3bHRTaGalOr7oYIKAEBSBFQAAJKixZ+DtT93ricTp3K17uv5eGvlAcCOVFABAEhKshXUSlaWupqEUs8Vv2LK9zjW44Qp5xikqZR3z9FRodoV8/uhggoAQFIEVAAAkpJsi7+UStEyrpfWc7aero1ayCSpWml9a+UBQOdUUAEASErVV1B7WlHL/H49VkCLqZLV0FJOWignk8YAYBsVVAAAkiKgAgCQlKpv8RdLZ+3VXC1UbVVS4RIVAGqRCioAAEkRUAEASErVtvhrZT1M6kcp1z41sx/Ko9pXC4FyyP7/XaHfGRVUAACSUhUV1HJXS1WgKBedAADYkQoqAABJEVABAEhKVbT4C7mNJvXDeQFUq1JOnoRqpoIKAEBSqqKCqkJWXN25a1Z3ZW/D59a5ro5NT7oGlpwCoNqpoAIAkBQBFQCApCTb4tcern7a/dvkmgSRq/WefYyKdbwy29HqB6CaqKACAJCUZCuoFEeuSpxqWppKWXEuZOKUyVZQOpaXgp1TQQUAICkCKgAASUmuxa/t0X3Dhw2PdS3rOn7u1ZD73x1dra1ZyjZuOe4GtmDugo6fR39zdMn2Uyrlmkj26f20tbd1ef6YbAVAOamgAgCQFAEVAICkJNfir5R6aF0Wa0Z/sdu91kvdphqOQ763aAWAnlBBBQAgKXlXUBsbG0s5jh1kV2KGDxte1n1Xg+xj0tbelvPn7m7n5b+/vNPnP72Prj6XXNvLNb7O3kt3/c95/9Pxc7nP153JdZxyHZtsmfO/HOd+T84fACiFhohoz+eFK1asKPFQACBNLS0tERExatSoomzPijWV1dTUFBH//3OldAr9zmjxAwCQlLxb/CNHjizlODrMnz8/Isrf1u+q5dqVQsab7z672nZbe1usf3t9REQM3G9gp+ug5juGUhz7zH5Kse0Xlr8QERFjxo0p+raLId9zulyfxaf15PyJ6Pl3BwA+Le+A2traWspx7KC7/5OstFKOtzvb7tXQq8djSeW95Ku9fdtVKuU+R7urkPde7u9BMc4fAOgpy0z9P9nL5+S7VE5PlwLa2bJPlbqrULXvJ0WZ62/W/+/6HZ6rhqWlsllGCoCuFONabaUSAACSIqACAJAULf4ccrX7tcLTNfqboys9hIL5vKF+WFoK8qeCCgBAUpKroGYurM3+l2Ylq0wqXGlatmxZpYdQFNUyScrkKADKSQUVAICkCKgAACQluRY/1KqBnx3Y8XNmTVRtfQBqRTHWP81QQQUAICkCKgAASdHir1LZ7dfhw4ZXbiAANa6YbUsgPyqoAAAkJdkKava/WDMTSsjt5b+/HE1NTZUeRlnUyvqnmQlTqZ3bL//95UoPAQBUUAEASIuACgBAUpJt8WdLtR1abj1dl7Kr3095Tc5PG/3N0ZUeQlHkWhsVqBwToiANKqgAACSlISLaKz2IQtRqtanQKmlmklRLS0sxh1N02VXaQt5rvVQ3Fi5cuMNjmWOX67h1Vf3u6lhXy/kDhUrt745c33HKx995peFOUgAA1CwBFQCApFTFJKlcMpNLstsk1TTJJ6Lnk56qUT2+50Jk2iTZ53e+x84xrk+5Wmv12kZOrZ0PtaxU3zcVVAAAklK1FdSM7OS+s4kllaCSRbk412pfIVWKUlQ2cv09q2IJFJsKKgAASRFQAQBIStW3+LPlavf39O5Jmd/v6TqTAIVIrX2e2niqVa6JkLWkp2te17pcmcJx2p4KKgAASampCmohLN0DAJCWug+oAEBxKersqNrWaq80LX4AAJKiggqQsMwkGpOTalNXa3lT/fKtnJZ7Ylnqk79VUAEASIqACgBAUrT42Sltxcoq1vHXOgRSkmkvV7qNXCrlmhCV736yj3Mhlxzk2k6pqaACAJCUmq2gVsNdOopVHWtsbIylS5dGRMTXv/71aG1tLcp2qR07O9cKPX9S/m7VouzjrbMBdEcpKrql/ntIBRUAgKQIqAAAJKVmW/wZWmFQGiZwQXFVw6VpPeFOSsVRL8dRBRUAgKTUfAUVSFuxuxy1Wn3K5u5SVLtaXV6q1pXz7xwVVAAAkiKgAgCQFC1+oKaUogVVD5cNAKREBRUAgKSooAJ0wUQugPISUAGAksueuZ9Zy7PWZvOXco3Seln/NEOLHwCApKigApSZ9UvZmezzw+UgVNrAzw6syH5VUAEASIqACgBAUrT4AaAGFTKpplyTlmppclSxJi/V0jEpBhVUAACSooIKAERE7mpgLVf2hg8bHr0aetX0eyxUpSZHZaigAgCQFAEVAICkaPEDQKJSWBO11tr+w4cNj3Ut67Z7LN87W9X63ZyWLVvW8fOCuQsiImL0N0dXZCwqqAAAJEUFFQBqSL5VvuxqYa1XBrPfX1t7W16vK3eVONe+K/m5VKpymqGCCgBAUgRUAACSosUPAFUgM2Eqe7JUT1rAtd7Wz5bdrh8+bHhev1Pu41PJiWeZyVGVbutnU0EFACApKqgAQE3Ld5JUKspRvc1eUiqlymmGCioAAEkRUAEASIoWPwBUkVx3l6qnCU/5KtcxKcV6suVYBzXFtn42FVQAAJKiggoARdbY2FjW/VXDxJ9y6+yYZD9e7OOW+vaylfsczWhtbc3rdQIqABTZ0qVLy7q/dS3ryrq/pqamsu6vEPkck/Vvr+/xfrKPRU8+h1zHtJSfa7nP0Yxjjjkmr9dp8QMAkBQVVAAospEjR5Z1f/Pnz+/4Od87JfXEy39/uSjbyR5rZpu5HuvptrO1tbd1VE4H7jcwejUUVqvLNbbuHvvuvL9ifa4vLH8hIiLGjBtTlO2VStUF1N69e8crr7wSn//852Pq1Klxww03dDz3mc98Jn75y1/GscceG4MGDYqtW7fGypUrY+bMmTFnzpwKjhp65nOf+1x873vfixEjRsRRRx0VjY2NcfDBB8fq1asrPTQgh3yvsyuFQgNXJeQaa7HGn892ejX0KurxKuWxL9a229vbI6Ky52g+qi6gXnLJJTF48OCcz+27775x4IEHxrx582LNmjXRp0+fOP3002P27NkxdOjQmDZtWplHC8VxwgknxKWXXhqvvfZavP7663HkkUdWekgAUDINEdFe6UHka7/99ouVK1fGDTfcED/96U93qKB25g9/+EOccsopMWDAgGhrq72Zjo2NjR0XO48cOTL5fxXRfXvttVds2bIlNm/eHFdccUX86le/KloF1fkDtaUca6Nmr/3ZE90ZY7777Gqbbe1tHZOPBjUNqkjFuav3UsrPbuBnB5Zs28XUrU/ly1/+crS3t8fo0aN3eO7cc8+N9vb2OP7444s1th1cf/318cYbb8T999/frd9btWpV7LrrrrHLLruUaGTUu8bGxnj99dfj9ddf327pjr322ivWrVsXzz77bPTqVfhfgu+9915s3ry5GEMFgOR1q8W/ZMmSWLNmTYwdOzYWLFiw3XNjx46NN998M5YtWxa77LJL7L777nlt85133snrdccee2yMHz8+Ro4c2XH9RGcaGxtjt912i/79+8fJJ58c559/fjz33HMqQ5RMa2trjB8/Pp599tm47rrr4oorroiIiJkzZ8aAAQNiwoQJ0dbWVpLvBkC5ZVf4ilVNLWTf1STXcSrXe6mWqmm2bl+Dev/998fll18ee+yxR7z//vsRse3az6985Stx3XXXRcS2auqsWbPy2l5DQ0Ner7vlllvi4YcfjmXLlsVBBx2009dOmTIlrr/++o7/Xrx4cZx//vl57QcKtXz58vjlL38ZP/zhD+P3v/997L///nHuuefGlClT4p///GdElOa7AQC1ptsBdc6cOXH11VfHOeecE/fcc09ERIwZMyb69OnT0Xr/85//HKeddlrRBjlhwoQ4/PDD45xzzsnr9b/97W9jxYoVsd9++8VZZ50V+++/f/Tr169o44HOXHvttXHWWWfF7Nmzo3///rFkyZK4+eabO54v9ncDAGpRtwPqG2+8EcuXL4+xY8d2BNSxY8fGc889F//6178iImL9+vWxfn337s6QaclnbN26NTZu3Bi77757/PznP48ZM2bE2rVr89rWmjVrYs2aNRER8dBDD8Xtt98eixcvjqFDh2rzU1JbtmyJiRMnxooVK+LDDz/coXJfyHcDoDtGjRoVERHr/7c8f9f0pN2f/fpc7e58n69k27+rMeZ6rtyXRVSjgpaZmjNnTtx0001xwAEHRN++feOEE06Iiy++uOP5xsbGGDBgQF7b2rBhQ0RETJ06Na699tqOx1etWhWHHHJITJ06NXbZZZd4+OGHO1r7Bx54YERsm4By0EEHxbp162LLli2d7mPevHnx3e9+N0466aT4y1/+0t23m7zW1taORaEF8Mo744wzIiKiX79+MWTIkFi1alXHc4V8N0rN+QNAagoKqA899FD8+te/jnPPPTf69esXH3/8cTz88MMdz48ZM6bb19nNmTNnu/vCfvjhhxERMXjw4Nh7773jtdde2+F3p02bFtOmTYvhw4fHK6+80uk+Mu39fINBNRIs0nD44YfHj3/847jnnnti+PDhcdddd8Xhhx/ecb12Id+NcnD+AMWwswpivlXD7lQXU5gw1dUYKjk5atmyZWXZTykUFFDfeeedWLRoUYwbNy4aGxvj8ccf327GcSHX2TU3N0dzc/MOj9988807rBjQ1NQUd9xxR9x7773x6KOPdvzevvvuGxs3btxhGxdccEG0tbXFX/+qpE7p9O7dO2bNmhXr1q2LKVOmxCGHHBIvvPBC3HjjjXHBBRdEhGtQASAfBd9Jas6cOfHII49ERMT06dO3e66Y19m99NJL8dJLL233WKbV/49//CMeffTRjsenTZsWJ554Yjz++OOxZs2a2HvvveMb3/hGHHfccXHzzTd3XCMLpfCjH/0ohg8fHqeeemps3rw5/va3v8VPfvKTuO6662LevHmxaNGigr8be+yxR1xyySUREXHiiSdGRMTkyZNj06ZNsWnTppg5c2ZR3wsAVFLBd5Lq06dPrF+/Pnr16hUDBw6Mjz76qMhD69xBBx0Uq1at2uFOUqeddlpceumlcdRRR8V+++0Xra2t8eqrr8Zdd90Vs2fPLtv4qD9HHnlkPP/883HrrbfGlClTOh7v1atXPPfcc3HAAQfEF77whfjPf/5T0PYz53wumeu1ATpTrglTO1PIxKBStMJTuJNUKVXjmqe5FFxBbWtri08++ST++Mc/ljWcRkSsXr065/V5ixcvjsWLF5d1LBCxrdKf605lbW1tMWLEiB5vv7NzHgBqUcH/bBg9enQ0NTXFnDlzijkeAADqXLdb/Mcdd1wcccQRMX369Ni4cWMcffTRJRoaAFBMCxcu7Pg5lbVDP63U4yp3i7/cs/hrpcXf7U/loosuiltvvTVaWlriO9/5TinGBABAHSt4khQAUL0qOXGqniqo5ZC93unob46u3ECKqPo/FQAAaoqACgBAUrT4AaDOpbROqhZ/99XKxKhs1f+pAABQU1RQAYCISKuSGuFOUl2pxcppRvV+KgAA1CQBFQCApGjxAwA7SKHdXwrZLf6WlpaIqOxdtbqrltv62VRQAQBISu9KDwAASE+mUpddSS3XUlBskz1hbNSoURUcSfmpoAIAkBQBFQCApJgkBQDkZeHChTs8Vm3t/lyTpDJSeS+Z1n69tfWzqaACQBU59thjY+bMmbFixYr4+OOPo71953WmpqamuO2222Lt2rXx4YcfRnNzc9x1111lGi0UxiQpAKgio0aNigsvvDBeffXV+Pe//x1Dhw7t9LUHHnhgPPvssxERcdttt8Vbb70VgwYNiuOOO65cw4WCaPEDQBVpamqK999/P1pbW+OWW26JyZMnR0NDQ87X/ulPf4rDDjssjj322Hj33XdLMp7stn8qLfKdSbXFX88z9nPR4geAIjvooIOivb290z890dLSEq2trV2+bujQoTFq1KiYMWNGvPvuu9G3b9/o3VvjlOrgTAWAInv77bdj3Lhx2z3Wp0+fuPHGG+Pjjz+OiIh+/frFrrvu2uW2tm7dGps2ber2GE477bSIiNiwYUMsXrw4Tj311Pjkk0/iiSeeiIsuuihWr17d7W3mkqvaVy13oRo+bHj0aui1XfWynFRNOyegAkCR/fe//40HHnhgu8d+85vfRP/+/eP000+PiIgf/OAHce2113a5rVWrVsUhhxzS7TEMGTIkIiLuuOOOeOGFF+Jb3/pWDB48OK655ppYvHhxHHHEEfHhhx92e7vVriEaYlDToI6fSZOACgAldt5558XFF18cl19+eSxZsiQiIubMmRNLly7t8ncLDZH9+/ePiIj169fH1772tY5LC9auXRsPPfRQfPvb34677767oG1Xs4aGBsG0CgioAFBCX/ziF+O2226LBx98MG688caOx5ubm6O5ublk+80E29/97nfbXfc6d+7cuO++++JLX/pSyQJq5japEdXT7o+InK3+QiZO5XvJgLZ+5wRUACiRPffcMx555JFYuXJlXHjhhds9t9tuu3VUOXdm69atsXHjxm7ve926bTPVN2zYsN3jbW1t8c4778Ree+3V7W1CuQioAFACDQ0N8cADD8See+4Zp5122g6t+qlTp5b0GtQXX3wxIiIOOOCA7R7v06dP7LvvvvH22293e5uFyK6mflquO1Nly7d6mV2xzPxOZ1XMnlQtuxpvsfdXzwRUACiBa665Js4444z46le/GqtWrdrh+VJfg7pkyZLYsGFDjB07Nn72s5/FRx99FBEREyZMiN69e8cTTzxR0HahHARUACiyYcOGxfTp0+Ppp5+OpqamGDt27HbPP/DAAwVfgzp48OA477zzIiLimGOOiYiIadOmRUTE6tWr4/7774+IiI8//ji+//3vx5w5c+Lpp5+O++67LwYPHhxTpkyJp59+OubPn9+Ttwgl5U5SAFBkJ598csds/Vw6u/NTT7e9ZMmSOOWUU7Z7bMyYMXHllVfGYYcdFps2bYq5c+fG1VdfHZs3by54DKWkjU6EgAoAJERAJUJABQAgMb0qPQAAAMgmoAIAkBQBFQCApAioAAAkRUAFACApAioAAEkRUAEASIqACgBAUgRUAACSIqACAJAUARUAgKQIqAAAJEVABQAgKQIqAABJEVABAEiKgAoAQFIEVAAAkiKgAgCQFAEVAICkCKgAACRFQAUAICkCKgAASRFQAQBIioAKAEBSBFQAAJIioAIAkBQBFQCApAioAAAkRUAFACApAioAAEkRUAEASIqACgBAUgRUAACSIqACAJAUARUAgKQIqAAAJEVABQAgKQIqAABJEVABAEiKgAoAQFIEVAAAkiKgAgCQFAEVAICkCKgAACRFQAUAICkCKgAASRFQAQBIioAKAEBSBFQAAJIioAIAkBQBFQCApAioAAAkRUAFACApAioAAEkRUAEASIqACgBAUv4PhV94MdAkyuIAAAAASUVORK5CYII=",
414
+ "text/plain": [
415
+ "<Figure size 660x350 with 4 Axes>"
416
+ ]
417
+ },
418
+ "metadata": {},
419
+ "output_type": "display_data"
420
+ }
421
+ ],
422
+ "source": [
423
+ "from nilearn.plotting import plot_roi, plot_anat, plot_epi\n",
424
+ "\n",
425
+ "avg_mask=nib.load(f'masks/{sub}_{session}_brain.nii.gz')\n",
426
+ "\n",
427
+ "# mask info\n",
428
+ "dimsize=avg_mask.header.get_zooms()\n",
429
+ "affine_mat = avg_mask.affine\n",
430
+ "brain=avg_mask.get_fdata()\n",
431
+ "xyz=brain.shape #xyz dimensionality of brain mask and epi data\n",
432
+ "\n",
433
+ "print('Mask dimensions:', dimsize)\n",
434
+ "print('')\n",
435
+ "print('Affine:')\n",
436
+ "print(affine_mat)\n",
437
+ "print('')\n",
438
+ "print(f'There are {int(np.sum(brain))} voxels in the included brain mask\\n')\n",
439
+ "\n",
440
+ "roi = nib.load(f'masks/{sub}_nsdgeneral.nii.gz')\n",
441
+ "\n",
442
+ "plot_roi(roi, bg_img=avg_mask)"
443
+ ]
444
+ },
445
+ {
446
+ "cell_type": "code",
447
+ "execution_count": 16,
448
+ "id": "f7f2e9dd-88af-4ca9-bd80-17cbe429b6ce",
449
+ "metadata": {},
450
+ "outputs": [
451
+ {
452
+ "name": "stdout",
453
+ "output_type": "stream",
454
+ "text": [
455
+ "total voxels (whole brain) = 194410\n",
456
+ "nsdgeneral voxels = 25069\n"
457
+ ]
458
+ }
459
+ ],
460
+ "source": [
461
+ "avg_mask = avg_mask.get_fdata().flatten()\n",
462
+ "print(f\"total voxels (whole brain) = {int(avg_mask.sum())}\")\n",
463
+ "\n",
464
+ "roi = roi.get_fdata()\n",
465
+ "roi = roi.flatten()\n",
466
+ "roi = roi[avg_mask.astype(bool)]\n",
467
+ "roi[np.isnan(roi)] = 0\n",
468
+ "roi = roi.astype(bool)\n",
469
+ "print(f\"nsdgeneral voxels = {roi.sum()}\")"
470
+ ]
471
+ },
472
+ {
473
+ "cell_type": "markdown",
474
+ "id": "3bf38b1b-1270-4f65-bc01-07f90343963d",
475
+ "metadata": {},
476
+ "source": [
477
+ "### ROI voxel exclusion"
478
+ ]
479
+ },
480
+ {
481
+ "cell_type": "code",
482
+ "execution_count": 17,
483
+ "id": "de856487-6f6b-4971-8e84-8b30c9a9e943",
484
+ "metadata": {},
485
+ "outputs": [
486
+ {
487
+ "name": "stdout",
488
+ "output_type": "stream",
489
+ "text": [
490
+ "vox before ROI exclusion: (1008, 194410)\n",
491
+ "vox after ROI exclusion: (1008, 25069)\n"
492
+ ]
493
+ }
494
+ ],
495
+ "source": [
496
+ "# ROI masking?\n",
497
+ "print(f\"vox before ROI exclusion: {vox.shape}\")\n",
498
+ "vox = vox[:,roi]\n",
499
+ "print(f\"vox after ROI exclusion: {vox.shape}\")\n",
500
+ "\n",
501
+ "if np.any(np.isnan(vox)):\n",
502
+ " print(\"NaNs found! Removing voxels...\")\n",
503
+ " x,y = np.where(np.isnan(vox))\n",
504
+ " vox = vox[:,np.setdiff1d(np.arange(vox.shape[-1]), y)]"
505
+ ]
506
+ },
507
+ {
508
+ "cell_type": "markdown",
509
+ "id": "903c4f38-4f6e-44d7-8186-44c47c77507a",
510
+ "metadata": {},
511
+ "source": [
512
+ "## Reliability calculation"
513
+ ]
514
+ },
515
+ {
516
+ "cell_type": "markdown",
517
+ "id": "288e757f-fe51-4c3b-bb3c-e78b18c755c5",
518
+ "metadata": {},
519
+ "source": [
520
+ "### Calculate reliability (corr between first and second presentation of same image) for every voxel"
521
+ ]
522
+ },
523
+ {
524
+ "cell_type": "code",
525
+ "execution_count": null,
526
+ "id": "765ab07f-dbb3-4bdd-8de1-57fe85231d82",
527
+ "metadata": {},
528
+ "outputs": [],
529
+ "source": [
530
+ "vox_pairs = utils.zscore(vox[pairs])\n",
531
+ "rels = np.full(vox.shape[-1],np.nan)\n",
532
+ "for v in tqdm(range(vox.shape[-1])):\n",
533
+ " rels[v] = np.corrcoef(vox_pairs[:,0,v], vox_pairs[:,1,v])[1,0]\n",
534
+ "print(\"rels\", rels.shape)\n",
535
+ "assert np.sum(np.all(np.isnan(rels))) == 0"
536
+ ]
537
+ },
538
+ {
539
+ "cell_type": "markdown",
540
+ "id": "b646ecf4-dd03-4352-abcf-6c6ccb54d96b",
541
+ "metadata": {},
542
+ "source": [
543
+ "### Create representational similarity matrix"
544
+ ]
545
+ },
546
+ {
547
+ "cell_type": "code",
548
+ "execution_count": null,
549
+ "id": "81aebd28-a66b-43fd-8123-fbcd99851383",
550
+ "metadata": {},
551
+ "outputs": [],
552
+ "source": [
553
+ "# creating img x vox x repetitions matrix | shape=(150, 18419, 2)\n",
554
+ "vox0 = np.zeros((len(pairs), vox.shape[-1], 2))\n",
555
+ "for ipair, pair in enumerate(tqdm(pairs)):\n",
556
+ " pair = pair[:2] # to keep things consistent, just using the first two repeats\n",
557
+ " i,j = pair\n",
558
+ " vox0[ipair, :, :] = vox[pair].T\n",
559
+ "vox_avg = vox0.mean(-1) # average across the repetitions"
560
+ ]
561
+ },
562
+ {
563
+ "cell_type": "code",
564
+ "execution_count": null,
565
+ "id": "30fad9fc-5399-4820-8a8a-8b63fec5d371",
566
+ "metadata": {},
567
+ "outputs": [],
568
+ "source": [
569
+ "# Masking RDM for each reliability threshold\n",
570
+ "r_thresholds = np.array([.0, .1, .2, .3])\n",
571
+ "rdm = np.zeros((len(r_thresholds), len(pairs), len(pairs))) \n",
572
+ "for ir_thresh, r_thresh in enumerate(r_thresholds):\n",
573
+ " print(f\"reliability threshold = {r_thresh}\")\n",
574
+ " for i in tqdm(range(len(pairs))):\n",
575
+ " for j in range(len(pairs)):\n",
576
+ " rdm[ir_thresh,i,j] = np.corrcoef(vox_avg[i,rels>r_thresh], \n",
577
+ " vox_avg[j,rels>r_thresh])[0,1]\n",
578
+ "# rdm is shape (4, 150, 150)"
579
+ ]
580
+ },
581
+ {
582
+ "cell_type": "code",
583
+ "execution_count": null,
584
+ "id": "feac20e6-d50f-466f-98c9-eab730db65e9",
585
+ "metadata": {},
586
+ "outputs": [],
587
+ "source": [
588
+ "reliability_threshold_to_visualize = .1\n",
589
+ "plt.figure(figsize=(4,4))\n",
590
+ "plt.imshow(rdm[np.where(r_thresholds==reliability_threshold_to_visualize)[0].item()], clim=(-1,1))\n",
591
+ "plt.colorbar(shrink=0.8)\n",
592
+ "plt.title(f\"{sub}_{session}\\nreliability threshold={reliability_threshold_to_visualize}\\n\")\n",
593
+ "plt.show()"
594
+ ]
595
+ },
596
+ {
597
+ "cell_type": "code",
598
+ "execution_count": null,
599
+ "id": "4a4575dc-a6b1-449b-bcb8-31b3eef348ba",
600
+ "metadata": {},
601
+ "outputs": [],
602
+ "source": [
603
+ "for thresh in range(rdm.shape[0]):\n",
604
+ " for img in range(rdm.shape[1]):\n",
605
+ " assert np.isclose(rdm[thresh, img, img], 1)"
606
+ ]
607
+ },
608
+ {
609
+ "cell_type": "code",
610
+ "execution_count": null,
611
+ "id": "ef19ef06-afc2-408d-b870-fe4df3473f90",
612
+ "metadata": {},
613
+ "outputs": [],
614
+ "source": [
615
+ "vox0.shape"
616
+ ]
617
+ },
618
+ {
619
+ "cell_type": "code",
620
+ "execution_count": null,
621
+ "id": "2fafa9cf-0638-4a4d-9743-70e15bbc8801",
622
+ "metadata": {},
623
+ "outputs": [],
624
+ "source": [
625
+ "r=0\n",
626
+ "for isamp, samp in enumerate(vox0):\n",
627
+ " while r==isamp:\n",
628
+ " r = np.random.randint(len(vox0))\n",
629
+ " if isamp==0:\n",
630
+ " same_corrs = np.array([np.corrcoef(samp[:,0], samp[:,1])[0,1]])\n",
631
+ " diff_corrs = np.array([np.corrcoef(samp[:,0], vox0[r][:,0])[0,1]])\n",
632
+ " else:\n",
633
+ " same_corrs = np.append(same_corrs, np.corrcoef(samp[:,0], samp[:,1])[0,1])\n",
634
+ " diff_corrs = np.append(diff_corrs, np.corrcoef(samp[:,0], vox0[r][:,0])[0,1])\n",
635
+ "\n",
636
+ "plt.figure(figsize=(5,4))\n",
637
+ "plt.title(f\"{sub}_{session} same/diff Pearson corr.\")\n",
638
+ "plt.plot(np.sort(same_corrs),c='blue',label='same')\n",
639
+ "plt.plot(np.sort(diff_corrs),c='cyan',label='diff')\n",
640
+ "plt.axhline(0,c='k',ls='--')\n",
641
+ "plt.legend()\n",
642
+ "plt.xlabel(\"sample\")\n",
643
+ "plt.ylabel(\"Pearson R\")\n",
644
+ "plt.show()"
645
+ ]
646
+ },
647
+ {
648
+ "cell_type": "code",
649
+ "execution_count": null,
650
+ "id": "31646431-10ac-4820-ba5d-c35f1e104557",
651
+ "metadata": {},
652
+ "outputs": [],
653
+ "source": [
654
+ "vox_pairs = utils.zscore(vox[pairs])\n",
655
+ "plt.figure(figsize=(5,4))\n",
656
+ "plt.title(f\"{sub}_{session} same minus diff difference Pearson corr.\")\n",
657
+ "plt.plot(np.sort(same_corrs - diff_corrs),c='cyan',label='difference')\n",
658
+ "plt.axhline(0,c='k',ls='--')\n",
659
+ "plt.legend()\n",
660
+ "plt.xlabel(\"sample\")\n",
661
+ "plt.ylabel(\"Pearson R\")\n",
662
+ "plt.show()"
663
+ ]
664
+ },
665
+ {
666
+ "cell_type": "markdown",
667
+ "id": "a8866ce2-1cf2-459e-aa81-dec26a3dcd33",
668
+ "metadata": {},
669
+ "source": [
670
+ "# Training MindEye"
671
+ ]
672
+ },
673
+ {
674
+ "cell_type": "code",
675
+ "execution_count": null,
676
+ "id": "248ce3a0-f03b-4c97-aee1-920432664ae1",
677
+ "metadata": {},
678
+ "outputs": [],
679
+ "source": [
680
+ "# Reliability thresholding?\n",
681
+ "print(f\"\\nvox before reliability thresholding: {vox.shape}\")\n",
682
+ "vox = vox[:,rels>.2]\n",
683
+ "print(f\"\\nvox after reliability thresholding: {vox.shape}\")"
684
+ ]
685
+ },
686
+ {
687
+ "cell_type": "code",
688
+ "execution_count": null,
689
+ "id": "b80aeb2d-6d53-431c-90ed-658dca7ecebd",
690
+ "metadata": {},
691
+ "outputs": [],
692
+ "source": [
693
+ "print(images.shape)\n",
694
+ "print(vox.shape)\n",
695
+ "assert len(images) == len(vox)"
696
+ ]
697
+ },
698
+ {
699
+ "cell_type": "code",
700
+ "execution_count": null,
701
+ "id": "8f554db1-f7cd-40d2-ab62-5d1e282c2bc8",
702
+ "metadata": {},
703
+ "outputs": [],
704
+ "source": [
705
+ "utils.seed_everything(0)\n",
706
+ "\n",
707
+ "if train_test_split == 'orig':\n",
708
+ " # train = all images except images that were repeated\n",
709
+ " # test = average of the same-image presentations\n",
710
+ " imageTrain = np.arange(len(images))\n",
711
+ " train_image_indices = np.array([item for item in imageTrain if item not in pairs.flatten()])\n",
712
+ " test_image_indices = pairs\n",
713
+ " print(len(train_image_indices), len(test_image_indices))\n",
714
+ "elif train_test_split == 'MST':\n",
715
+ " # non-MST images are the train split\n",
716
+ " # MST images are the test split\n",
717
+ " train_image_indices = np.where(MST_images==False)[0]\n",
718
+ " test_image_indices = np.where(MST_images==True)[0]\n",
719
+ " print(len(train_image_indices), len(test_image_indices))\n",
720
+ "else:\n",
721
+ " raise Exception(\"invalid train_test_split\")\n",
722
+ " \n",
723
+ "for i in train_image_indices:\n",
724
+ " assert i not in test_image_indices"
725
+ ]
726
+ },
727
+ {
728
+ "cell_type": "code",
729
+ "execution_count": null,
730
+ "id": "590f2b4b-db7c-42a1-bfd0-cc578e6af988",
731
+ "metadata": {},
732
+ "outputs": [],
733
+ "source": [
734
+ "train_mean = np.mean(vox[train_image_indices],axis=0)\n",
735
+ "train_std = np.std(vox[train_image_indices],axis=0)\n",
736
+ "\n",
737
+ "vox = utils.zscore(vox,train_mean=train_mean,train_std=train_std)\n",
738
+ "print(\"voxels have been zscored\")\n",
739
+ "print(vox[:,0].mean(), vox[:,0].std())\n",
740
+ "print(\"vox\", vox.shape)\n",
741
+ "\n",
742
+ "images = torch.Tensor(images)\n",
743
+ "vox = torch.Tensor(vox)"
744
+ ]
745
+ },
746
+ {
747
+ "cell_type": "code",
748
+ "execution_count": null,
749
+ "id": "cc5d2e32-6027-4a19-bef4-5ca068db35bb",
750
+ "metadata": {},
751
+ "outputs": [],
752
+ "source": [
753
+ "### Multi-GPU config ###\n",
754
+ "from accelerate import Accelerator, DeepSpeedPlugin\n",
755
+ "\n",
756
+ "local_rank = os.getenv('RANK')\n",
757
+ "if local_rank is None: \n",
758
+ " local_rank = 0\n",
759
+ "else:\n",
760
+ " local_rank = int(local_rank)\n",
761
+ "print(\"LOCAL RANK \", local_rank) \n",
762
+ "\n",
763
+ "data_type = torch.float32 # change depending on your mixed_precision\n",
764
+ "\n",
765
+ "accelerator = Accelerator(split_batches=False)\n",
766
+ "batch_size = 8 "
767
+ ]
768
+ },
769
+ {
770
+ "cell_type": "code",
771
+ "execution_count": null,
772
+ "id": "b767ab6f-d4a9-47a5-b3bf-f56bf6760c0c",
773
+ "metadata": {},
774
+ "outputs": [],
775
+ "source": [
776
+ "print(\"PID of this process =\",os.getpid())\n",
777
+ "device = accelerator.device\n",
778
+ "print(\"device:\",device)\n",
779
+ "world_size = accelerator.state.num_processes\n",
780
+ "distributed = not accelerator.state.distributed_type == 'NO'\n",
781
+ "num_devices = torch.cuda.device_count()\n",
782
+ "global_batch_size = batch_size * num_devices\n",
783
+ "print(\"global_batch_size\", global_batch_size)\n",
784
+ "if num_devices==0 or not distributed: num_devices = 1\n",
785
+ "num_workers = num_devices\n",
786
+ "print(accelerator.state)\n",
787
+ "\n",
788
+ "# set data_type to match your mixed precision (automatically set based on deepspeed config)\n",
789
+ "if accelerator.mixed_precision == \"bf16\":\n",
790
+ " data_type = torch.bfloat16\n",
791
+ "elif accelerator.mixed_precision == \"fp16\":\n",
792
+ " data_type = torch.float16\n",
793
+ "else:\n",
794
+ " data_type = torch.float32\n",
795
+ "\n",
796
+ "print(\"distributed =\",distributed, \"num_devices =\", num_devices, \"local rank =\", local_rank, \"world size =\", world_size, \"data_type =\", data_type)\n",
797
+ "print = accelerator.print # only print if local_rank=0"
798
+ ]
799
+ },
800
+ {
801
+ "cell_type": "markdown",
802
+ "id": "9018b82b-c054-4463-9527-4b0c2a75bda6",
803
+ "metadata": {
804
+ "tags": []
805
+ },
806
+ "source": [
807
+ "## Configurations"
808
+ ]
809
+ },
810
+ {
811
+ "cell_type": "code",
812
+ "execution_count": 6,
813
+ "id": "2b61fec7-72a0-4b67-86da-1375f1d9fbd3",
814
+ "metadata": {},
815
+ "outputs": [
816
+ {
817
+ "name": "stdout",
818
+ "output_type": "stream",
819
+ "text": [
820
+ "model_name: sub-001_ses-02_bs24_MST_rishab_MSTsplit\n"
821
+ ]
822
+ },
823
+ {
824
+ "ename": "NameError",
825
+ "evalue": "name 'batch_size' is not defined",
826
+ "output_type": "error",
827
+ "traceback": [
828
+ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
829
+ "\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)",
830
+ "Cell \u001b[0;32mIn[6], line 10\u001b[0m\n\u001b[1;32m 4\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mmodel_name:\u001b[39m\u001b[38;5;124m\"\u001b[39m, model_name)\n\u001b[1;32m 6\u001b[0m \u001b[38;5;66;03m# global_batch_size and batch_size should already be defined in the above cells\u001b[39;00m\n\u001b[1;32m 7\u001b[0m \u001b[38;5;66;03m# other variables can be specified in the following string:\u001b[39;00m\n\u001b[1;32m 8\u001b[0m jupyter_args \u001b[38;5;241m=\u001b[39m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m--data_path=/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2 \u001b[39m\u001b[38;5;130;01m\\\u001b[39;00m\n\u001b[1;32m 9\u001b[0m \u001b[38;5;124m --model_name=\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mmodel_name\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m \u001b[39m\u001b[38;5;130;01m\\\u001b[39;00m\n\u001b[0;32m---> 10\u001b[0m \u001b[38;5;124m --no-multi_subject --subj=1 --batch_size=\u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[43mbatch_size\u001b[49m\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m \u001b[39m\u001b[38;5;130;01m\\\u001b[39;00m\n\u001b[1;32m 11\u001b[0m \u001b[38;5;124m --hidden_dim=1024 --clip_scale=1. \u001b[39m\u001b[38;5;130;01m\\\u001b[39;00m\n\u001b[1;32m 12\u001b[0m \u001b[38;5;124m --no-blurry_recon --blur_scale=.5 \u001b[39m\u001b[38;5;130;01m\\\u001b[39;00m\n\u001b[1;32m 13\u001b[0m \u001b[38;5;124m --no-use_prior --prior_scale=30 \u001b[39m\u001b[38;5;130;01m\\\u001b[39;00m\n\u001b[1;32m 14\u001b[0m \u001b[38;5;124m --n_blocks=4 --max_lr=3e-4 --mixup_pct=.33 --num_epochs=10 --no-use_image_aug \u001b[39m\u001b[38;5;130;01m\\\u001b[39;00m\n\u001b[1;32m 15\u001b[0m \u001b[38;5;124m --ckpt_interval=999 --no-ckpt_saving --new_test \u001b[39m\u001b[38;5;130;01m\\\u001b[39;00m\n\u001b[1;32m 16\u001b[0m \u001b[38;5;124m --multisubject_ckpt=/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/train_logs/multisubject_subj01_1024hid_nolow_300ep\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 17\u001b[0m \u001b[38;5;28mprint\u001b[39m(jupyter_args)\n\u001b[1;32m 18\u001b[0m jupyter_args \u001b[38;5;241m=\u001b[39m jupyter_args\u001b[38;5;241m.\u001b[39msplit()\n",
831
+ "\u001b[0;31mNameError\u001b[0m: name 'batch_size' is not defined"
832
+ ]
833
+ }
834
+ ],
835
+ "source": [
836
+ "# if running this interactively, can specify jupyter_args here for argparser to use\n",
837
+ "if utils.is_interactive():\n",
838
+ " model_name = f\"sub-001_{session}_bs24_MST_rishab_{train_test_split}split\"\n",
839
+ " print(\"model_name:\", model_name)\n",
840
+ " \n",
841
+ " # global_batch_size and batch_size should already be defined in the above cells\n",
842
+ " # other variables can be specified in the following string:\n",
843
+ " jupyter_args = f\"--data_path=/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2 \\\n",
844
+ " --model_name={model_name} \\\n",
845
+ " --no-multi_subject --subj=1 --batch_size={batch_size} \\\n",
846
+ " --hidden_dim=1024 --clip_scale=1. \\\n",
847
+ " --no-blurry_recon --blur_scale=.5 \\\n",
848
+ " --no-use_prior --prior_scale=30 \\\n",
849
+ " --n_blocks=4 --max_lr=3e-4 --mixup_pct=.33 --num_epochs=10 --no-use_image_aug \\\n",
850
+ " --ckpt_interval=999 --no-ckpt_saving --new_test \\\n",
851
+ " --multisubject_ckpt=/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/train_logs/multisubject_subj01_1024hid_nolow_300ep\"\n",
852
+ " print(jupyter_args)\n",
853
+ " jupyter_args = jupyter_args.split()"
854
+ ]
855
+ },
856
+ {
857
+ "cell_type": "code",
858
+ "execution_count": null,
859
+ "id": "2028bdf0-2f41-46d9-b6e7-86b870dbf16c",
860
+ "metadata": {},
861
+ "outputs": [],
862
+ "source": [
863
+ "parser = argparse.ArgumentParser(description=\"Model Training Configuration\")\n",
864
+ "parser.add_argument(\n",
865
+ " \"--model_name\", type=str, default=\"testing\",\n",
866
+ " help=\"name of model, used for ckpt saving and wandb logging (if enabled)\",\n",
867
+ ")\n",
868
+ "parser.add_argument(\n",
869
+ " \"--data_path\", type=str, default=\"/weka/proj-fmri/shared/natural-scenes-dataset\",\n",
870
+ " help=\"Path to where NSD data is stored / where to download it to\",\n",
871
+ ")\n",
872
+ "parser.add_argument(\n",
873
+ " \"--subj\",type=int, default=1, choices=[1,2,3,4,5,6,7,8],\n",
874
+ " help=\"Validate on which subject?\",\n",
875
+ ")\n",
876
+ "parser.add_argument(\n",
877
+ " \"--multisubject_ckpt\", type=str, default=None,\n",
878
+ " help=\"Path to pre-trained multisubject model to finetune a single subject from. multisubject must be False.\",\n",
879
+ ")\n",
880
+ "parser.add_argument(\n",
881
+ " \"--num_sessions\", type=int, default=0,\n",
882
+ " help=\"Number of training sessions to include (if multi_subject, this variable doesnt matter)\",\n",
883
+ ")\n",
884
+ "parser.add_argument(\n",
885
+ " \"--use_prior\",action=argparse.BooleanOptionalAction,default=False,\n",
886
+ " help=\"whether to train diffusion prior (True) or just rely on retrieval part of the pipeline (False)\",\n",
887
+ ")\n",
888
+ "parser.add_argument(\n",
889
+ " \"--batch_size\", type=int, default=32,\n",
890
+ " help=\"Batch size can be increased by 10x if only training v2c and not diffusion diffuser\",\n",
891
+ ")\n",
892
+ "parser.add_argument(\n",
893
+ " \"--wandb_log\",action=argparse.BooleanOptionalAction,default=False,\n",
894
+ " help=\"whether to log to wandb\",\n",
895
+ ")\n",
896
+ "parser.add_argument(\n",
897
+ " \"--resume_from_ckpt\",action=argparse.BooleanOptionalAction,default=False,\n",
898
+ " help=\"if not using wandb and want to resume from a ckpt\",\n",
899
+ ")\n",
900
+ "parser.add_argument(\n",
901
+ " \"--wandb_project\",type=str,default=\"stability\",\n",
902
+ " help=\"wandb project name\",\n",
903
+ ")\n",
904
+ "parser.add_argument(\n",
905
+ " \"--mixup_pct\",type=float,default=.33,\n",
906
+ " help=\"proportion of way through training when to switch from BiMixCo to SoftCLIP\",\n",
907
+ ")\n",
908
+ "parser.add_argument(\n",
909
+ " \"--low_mem\",action=argparse.BooleanOptionalAction,default=False,\n",
910
+ " help=\"whether to preload images to cpu to speed things up but consume more memory\",\n",
911
+ ")\n",
912
+ "parser.add_argument(\n",
913
+ " \"--blurry_recon\",action=argparse.BooleanOptionalAction,default=True,\n",
914
+ " help=\"whether to output blurry reconstructions\",\n",
915
+ ")\n",
916
+ "parser.add_argument(\n",
917
+ " \"--blur_scale\",type=float,default=.5,\n",
918
+ " help=\"multiply loss from blurry recons by this number\",\n",
919
+ ")\n",
920
+ "parser.add_argument(\n",
921
+ " \"--clip_scale\",type=float,default=1.,\n",
922
+ " help=\"multiply contrastive loss by this number\",\n",
923
+ ")\n",
924
+ "parser.add_argument(\n",
925
+ " \"--prior_scale\",type=float,default=30,\n",
926
+ " help=\"multiply diffusion prior loss by this\",\n",
927
+ ")\n",
928
+ "parser.add_argument(\n",
929
+ " \"--use_image_aug\",action=argparse.BooleanOptionalAction,default=True,\n",
930
+ " help=\"whether to use image augmentation\",\n",
931
+ ")\n",
932
+ "parser.add_argument(\n",
933
+ " \"--num_epochs\",type=int,default=120,\n",
934
+ " help=\"number of epochs of training\",\n",
935
+ ")\n",
936
+ "parser.add_argument(\n",
937
+ " \"--multi_subject\",action=argparse.BooleanOptionalAction,default=False,\n",
938
+ ")\n",
939
+ "parser.add_argument(\n",
940
+ " \"--new_test\",action=argparse.BooleanOptionalAction,default=True,\n",
941
+ ")\n",
942
+ "parser.add_argument(\n",
943
+ " \"--n_blocks\",type=int,default=2,\n",
944
+ ")\n",
945
+ "parser.add_argument(\n",
946
+ " \"--hidden_dim\",type=int,default=1024,\n",
947
+ ")\n",
948
+ "parser.add_argument(\n",
949
+ " \"--seq_past\",type=int,default=0,\n",
950
+ ")\n",
951
+ "parser.add_argument(\n",
952
+ " \"--seq_future\",type=int,default=0,\n",
953
+ ")\n",
954
+ "parser.add_argument(\n",
955
+ " \"--lr_scheduler_type\",type=str,default='cycle',choices=['cycle','linear'],\n",
956
+ ")\n",
957
+ "parser.add_argument(\n",
958
+ " \"--ckpt_saving\",action=argparse.BooleanOptionalAction,default=True,\n",
959
+ ")\n",
960
+ "parser.add_argument(\n",
961
+ " \"--ckpt_interval\",type=int,default=5,\n",
962
+ " help=\"save backup ckpt and reconstruct every x epochs\",\n",
963
+ ")\n",
964
+ "parser.add_argument(\n",
965
+ " \"--seed\",type=int,default=42,\n",
966
+ ")\n",
967
+ "parser.add_argument(\n",
968
+ " \"--max_lr\",type=float,default=3e-4,\n",
969
+ ")\n",
970
+ "\n",
971
+ "if utils.is_interactive():\n",
972
+ " args = parser.parse_args(jupyter_args)\n",
973
+ "else:\n",
974
+ " args = parser.parse_args()\n",
975
+ "\n",
976
+ "# create global variables without the args prefix\n",
977
+ "for attribute_name in vars(args).keys():\n",
978
+ " globals()[attribute_name] = getattr(args, attribute_name)\n",
979
+ " \n",
980
+ "# seed all random functions\n",
981
+ "utils.seed_everything(seed)\n",
982
+ "\n",
983
+ "outdir = os.path.abspath(f'/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/train_logs/{model_name}')\n",
984
+ "if not os.path.exists(outdir) and ckpt_saving:\n",
985
+ " os.makedirs(outdir,exist_ok=True)\n",
986
+ "\n",
987
+ "cache_dir = \"/scratch/gpfs/ri4541/MindEyeV2/src\"\n",
988
+ "\n",
989
+ " \n",
990
+ "if use_image_aug or blurry_recon:\n",
991
+ " import kornia\n",
992
+ " import kornia.augmentation as K\n",
993
+ " from kornia.augmentation.container import AugmentationSequential\n",
994
+ "if use_image_aug:\n",
995
+ " img_augment = AugmentationSequential(\n",
996
+ " kornia.augmentation.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.1, p=0.3),\n",
997
+ " same_on_batch=False,\n",
998
+ " data_keys=[\"input\"],\n",
999
+ " )\n",
1000
+ " # Define the blurring augmentations\n",
1001
+ " blur_augment = K.RandomGaussianBlur(kernel_size=(21, 21), sigma=(51.0, 51.0), p=1.)\n",
1002
+ " \n",
1003
+ "if multi_subject:\n",
1004
+ " subj_list = np.arange(1,9)\n",
1005
+ " subj_list = subj_list[subj_list != subj]\n",
1006
+ "else:\n",
1007
+ " subj_list = [subj]\n",
1008
+ "\n",
1009
+ "print(\"subj_list\", subj_list, \"num_sessions\", num_sessions)"
1010
+ ]
1011
+ },
1012
+ {
1013
+ "cell_type": "markdown",
1014
+ "id": "42d13c25-1369-4c49-81d4-83d713586096",
1015
+ "metadata": {
1016
+ "tags": []
1017
+ },
1018
+ "source": [
1019
+ "## Prep data, models, and dataloaders"
1020
+ ]
1021
+ },
1022
+ {
1023
+ "cell_type": "markdown",
1024
+ "id": "1c023f24-5233-4a15-a2f5-78487b3a8546",
1025
+ "metadata": {},
1026
+ "source": [
1027
+ "### Creating wds dataloader, preload betas and all 73k possible images"
1028
+ ]
1029
+ },
1030
+ {
1031
+ "cell_type": "code",
1032
+ "execution_count": 7,
1033
+ "id": "78dc192e-40dd-4d84-96c8-1c6b78fcb5bb",
1034
+ "metadata": {},
1035
+ "outputs": [
1036
+ {
1037
+ "name": "stdout",
1038
+ "output_type": "stream",
1039
+ "text": [
1040
+ "/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/evals/sub-001_ses-02_bs24_MST_rishab_MSTsplit\n"
1041
+ ]
1042
+ }
1043
+ ],
1044
+ "source": [
1045
+ "# save MST_ID for 2-alternative forced-choice retrieval evaluation \n",
1046
+ "if 'MST' in model_name:\n",
1047
+ " eval_dir = f\"/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/evals/{model_name}\"\n",
1048
+ " print(eval_dir)\n",
1049
+ " # Saving ##\n",
1050
+ " if not os.path.exists(eval_dir):\n",
1051
+ " os.mkdir(eval_dir)\n",
1052
+ "\n",
1053
+ " np.save(f\"{eval_dir}/{model_name}_MST_ID.npy\", MST_ID)\n",
1054
+ " np.save(f\"{eval_dir}/{model_name}_MST_pairmate_indices.npy\", MST_pairmate_indices)"
1055
+ ]
1056
+ },
1057
+ {
1058
+ "cell_type": "code",
1059
+ "execution_count": null,
1060
+ "id": "aefe7c27-ab39-4b2c-90f4-480f4087b7ab",
1061
+ "metadata": {},
1062
+ "outputs": [],
1063
+ "source": [
1064
+ "def my_split_by_node(urls): return urls\n",
1065
+ "num_voxels_list = []\n",
1066
+ "\n",
1067
+ "if multi_subject:\n",
1068
+ " nsessions_allsubj=np.array([40, 40, 32, 30, 40, 32, 40, 30])\n",
1069
+ " num_samples_per_epoch = (750*40) // num_devices \n",
1070
+ "else:\n",
1071
+ " # num_samples_per_epoch = (750*num_sessions) // num_devices \n",
1072
+ " num_samples_per_epoch = len(train_image_indices)\n",
1073
+ "\n",
1074
+ "print(\"dividing batch size by subj_list, which will then be concatenated across subj during training...\") \n",
1075
+ "batch_size = batch_size // len(subj_list)\n",
1076
+ "\n",
1077
+ "num_iterations_per_epoch = num_samples_per_epoch // (batch_size*len(subj_list))\n",
1078
+ "\n",
1079
+ "print(\"batch_size =\", batch_size, \"num_iterations_per_epoch =\",num_iterations_per_epoch, \"num_samples_per_epoch =\",num_samples_per_epoch)"
1080
+ ]
1081
+ },
1082
+ {
1083
+ "cell_type": "code",
1084
+ "execution_count": null,
1085
+ "id": "e1942b0e-1223-40e6-b543-2f7ff2e8ebcd",
1086
+ "metadata": {
1087
+ "tags": []
1088
+ },
1089
+ "outputs": [],
1090
+ "source": [
1091
+ "train_data = {}\n",
1092
+ "train_dl = {}\n",
1093
+ "\n",
1094
+ "train_data[f'subj0{subj}'] = torch.utils.data.TensorDataset(torch.tensor(train_image_indices))\n",
1095
+ "\n",
1096
+ "test_data = torch.utils.data.TensorDataset(torch.tensor(test_image_indices))"
1097
+ ]
1098
+ },
1099
+ {
1100
+ "cell_type": "code",
1101
+ "execution_count": null,
1102
+ "id": "81084834-035f-4465-ad59-59e6b806a2f5",
1103
+ "metadata": {},
1104
+ "outputs": [],
1105
+ "source": [
1106
+ "num_voxels = {}\n",
1107
+ "voxels = {}\n",
1108
+ "for s in subj_list:\n",
1109
+ " print(f\"Training with {num_sessions} sessions\")\n",
1110
+ " train_dl = torch.utils.data.DataLoader(train_data[f'subj0{s}'], batch_size=batch_size, shuffle=True, drop_last=True, pin_memory=True)\n",
1111
+ "\n",
1112
+ " num_voxels_list.append(vox[0].shape[-1])\n",
1113
+ " num_voxels[f'subj0{s}'] = vox[0].shape[-1]\n",
1114
+ " voxels[f'subj0{s}'] = vox\n",
1115
+ " print(f\"num_voxels for subj0{s}: {num_voxels[f'subj0{s}']}\")\n",
1116
+ "\n",
1117
+ "print(\"Loaded all subj train dls and vox!\\n\")\n",
1118
+ "\n",
1119
+ "# Validate only on one subject\n",
1120
+ "if multi_subject: \n",
1121
+ " subj = subj_list[0] # cant validate on the actual held out person so picking first in subj_list\n",
1122
+ "test_dl = torch.utils.data.DataLoader(test_data, batch_size=24, shuffle=False, drop_last=True, pin_memory=True)\n",
1123
+ "\n",
1124
+ "print(f\"Loaded test dl for subj{subj}!\\n\")"
1125
+ ]
1126
+ },
1127
+ {
1128
+ "cell_type": "markdown",
1129
+ "id": "10ec4517-dbdf-4ece-98f6-4714d5de4e15",
1130
+ "metadata": {},
1131
+ "source": [
1132
+ "## Load models"
1133
+ ]
1134
+ },
1135
+ {
1136
+ "cell_type": "markdown",
1137
+ "id": "48d6160e-1ee8-4da7-a755-9dbb452a6fa5",
1138
+ "metadata": {},
1139
+ "source": [
1140
+ "### CLIP image embeddings model"
1141
+ ]
1142
+ },
1143
+ {
1144
+ "cell_type": "code",
1145
+ "execution_count": null,
1146
+ "id": "b0420dc0-199e-4c1a-857d-b1747058b467",
1147
+ "metadata": {},
1148
+ "outputs": [],
1149
+ "source": [
1150
+ "## USING OpenCLIP ViT-bigG ###\n",
1151
+ "sys.path.append('generative_models/')\n",
1152
+ "import sgm\n",
1153
+ "from generative_models.sgm.modules.encoders.modules import FrozenOpenCLIPImageEmbedder\n",
1154
+ "# from generative_models.sgm.models.diffusion import DiffusionEngine\n",
1155
+ "# from omegaconf import OmegaConf\n",
1156
+ "\n",
1157
+ "try:\n",
1158
+ " print(clip_img_embedder)\n",
1159
+ "except:\n",
1160
+ " clip_img_embedder = FrozenOpenCLIPImageEmbedder(\n",
1161
+ " arch=\"ViT-bigG-14\",\n",
1162
+ " version=\"laion2b_s39b_b160k\",\n",
1163
+ " output_tokens=True,\n",
1164
+ " only_tokens=True,\n",
1165
+ " cache_dir=cache_dir\n",
1166
+ " )\n",
1167
+ " clip_img_embedder.to(device)\n",
1168
+ "clip_seq_dim = 256\n",
1169
+ "clip_emb_dim = 1664\n",
1170
+ "\n",
1171
+ "# ## USING OPEN AI CLIP ViT-L ###\n",
1172
+ "# import clip\n",
1173
+ "# try:\n",
1174
+ "# print(clip_model)\n",
1175
+ "# except:\n",
1176
+ "# clip_model, preprocess = clip.load(\"ViT-L/14\", device=device)\n",
1177
+ "# preprocess = transforms.Compose([\n",
1178
+ "# transforms.Resize(224, interpolation=transforms.InterpolationMode.BILINEAR),\n",
1179
+ "# transforms.Normalize(mean=[0.48145466, 0.4578275, 0.40821073],\n",
1180
+ "# std=[0.26862954, 0.26130258, 0.27577711]),\n",
1181
+ "# ])\n",
1182
+ "# def clip_img_embedder(image):\n",
1183
+ "# preproc_img = preprocess(image)\n",
1184
+ "# return clip_model.encode_image(preproc_img)\n",
1185
+ "# clip_seq_dim = 1\n",
1186
+ "# clip_emb_dim = 768"
1187
+ ]
1188
+ },
1189
+ {
1190
+ "cell_type": "markdown",
1191
+ "id": "260e5e4a-f697-4b2c-88fc-01f6a54886c0",
1192
+ "metadata": {},
1193
+ "source": [
1194
+ "### MindEye modules"
1195
+ ]
1196
+ },
1197
+ {
1198
+ "cell_type": "code",
1199
+ "execution_count": null,
1200
+ "id": "c44c271b-173f-472e-b059-a2eda0f4c4c5",
1201
+ "metadata": {},
1202
+ "outputs": [],
1203
+ "source": [
1204
+ "class MindEyeModule(nn.Module):\n",
1205
+ " def __init__(self):\n",
1206
+ " super(MindEyeModule, self).__init__()\n",
1207
+ " def forward(self, x):\n",
1208
+ " return x\n",
1209
+ " \n",
1210
+ "model = MindEyeModule()\n",
1211
+ "model"
1212
+ ]
1213
+ },
1214
+ {
1215
+ "cell_type": "code",
1216
+ "execution_count": null,
1217
+ "id": "038a5d61-4769-40b9-a004-f4e7b5b38bb0",
1218
+ "metadata": {},
1219
+ "outputs": [],
1220
+ "source": [
1221
+ "class RidgeRegression(torch.nn.Module):\n",
1222
+ " # make sure to add weight_decay when initializing optimizer\n",
1223
+ " def __init__(self, input_sizes, out_features, seq_len=1): \n",
1224
+ " super(RidgeRegression, self).__init__()\n",
1225
+ " self.seq_len = seq_len\n",
1226
+ " self.out_features = out_features\n",
1227
+ " self.linears = torch.nn.ModuleList([\n",
1228
+ " torch.nn.Linear(input_size, out_features) for input_size in input_sizes\n",
1229
+ " ])\n",
1230
+ " def forward(self, x, subj_idx=0):\n",
1231
+ " out = torch.cat([self.linears[subj_idx](x[:,seq]).unsqueeze(1) for seq in range(self.seq_len)], dim=1)\n",
1232
+ " return out\n",
1233
+ " \n",
1234
+ "model.ridge = RidgeRegression(num_voxels_list, out_features=hidden_dim)\n",
1235
+ "utils.count_params(model.ridge)\n",
1236
+ "utils.count_params(model)\n",
1237
+ "\n",
1238
+ "# test on subject 1 with fake data\n",
1239
+ "b = torch.randn((2,1,num_voxels_list[0]))\n",
1240
+ "print(b.shape, model.ridge(b,0).shape)"
1241
+ ]
1242
+ },
1243
+ {
1244
+ "cell_type": "code",
1245
+ "execution_count": null,
1246
+ "id": "7b8de65a-6d3b-4248-bea9-9b6f4d562321",
1247
+ "metadata": {},
1248
+ "outputs": [],
1249
+ "source": [
1250
+ "from functools import partial\n",
1251
+ "from diffusers.models.vae import Decoder\n",
1252
+ "class BrainNetwork(nn.Module):\n",
1253
+ " def __init__(self, h=4096, in_dim=15724, out_dim=768, seq_len=1, n_blocks=n_blocks, drop=.15, \n",
1254
+ " clip_size=768):\n",
1255
+ " super().__init__()\n",
1256
+ " self.seq_len = seq_len\n",
1257
+ " self.h = h\n",
1258
+ " self.clip_size = clip_size\n",
1259
+ " \n",
1260
+ " self.mixer_blocks1 = nn.ModuleList([\n",
1261
+ " self.mixer_block1(h, drop) for _ in range(n_blocks)\n",
1262
+ " ])\n",
1263
+ " self.mixer_blocks2 = nn.ModuleList([\n",
1264
+ " self.mixer_block2(seq_len, drop) for _ in range(n_blocks)\n",
1265
+ " ])\n",
1266
+ " \n",
1267
+ " # Output linear layer\n",
1268
+ " self.backbone_linear = nn.Linear(h * seq_len, out_dim, bias=True) \n",
1269
+ " if clip_scale>0:\n",
1270
+ " self.clip_proj = self.projector(clip_size, clip_size, h=clip_size)\n",
1271
+ " \n",
1272
+ " def projector(self, in_dim, out_dim, h=2048):\n",
1273
+ " return nn.Sequential(\n",
1274
+ " nn.LayerNorm(in_dim),\n",
1275
+ " nn.GELU(),\n",
1276
+ " nn.Linear(in_dim, h),\n",
1277
+ " nn.LayerNorm(h),\n",
1278
+ " nn.GELU(),\n",
1279
+ " nn.Linear(h, h),\n",
1280
+ " nn.LayerNorm(h),\n",
1281
+ " nn.GELU(),\n",
1282
+ " nn.Linear(h, out_dim)\n",
1283
+ " )\n",
1284
+ " \n",
1285
+ " def mlp(self, in_dim, out_dim, drop):\n",
1286
+ " return nn.Sequential(\n",
1287
+ " nn.Linear(in_dim, out_dim),\n",
1288
+ " nn.GELU(),\n",
1289
+ " nn.Dropout(drop),\n",
1290
+ " nn.Linear(out_dim, out_dim),\n",
1291
+ " )\n",
1292
+ " \n",
1293
+ " def mixer_block1(self, h, drop):\n",
1294
+ " return nn.Sequential(\n",
1295
+ " nn.LayerNorm(h),\n",
1296
+ " self.mlp(h, h, drop), # Token mixing\n",
1297
+ " )\n",
1298
+ "\n",
1299
+ " def mixer_block2(self, seq_len, drop):\n",
1300
+ " return nn.Sequential(\n",
1301
+ " nn.LayerNorm(seq_len),\n",
1302
+ " self.mlp(seq_len, seq_len, drop) # Channel mixing\n",
1303
+ " )\n",
1304
+ " \n",
1305
+ " def forward(self, x):\n",
1306
+ " # make empty tensors\n",
1307
+ " c,b = torch.Tensor([0.]), torch.Tensor([[0.],[0.]])\n",
1308
+ " \n",
1309
+ " # Mixer blocks\n",
1310
+ " residual1 = x\n",
1311
+ " residual2 = x.permute(0,2,1)\n",
1312
+ " for block1, block2 in zip(self.mixer_blocks1,self.mixer_blocks2):\n",
1313
+ " x = block1(x) + residual1\n",
1314
+ " residual1 = x\n",
1315
+ " x = x.permute(0,2,1)\n",
1316
+ " \n",
1317
+ " x = block2(x) + residual2\n",
1318
+ " residual2 = x\n",
1319
+ " x = x.permute(0,2,1)\n",
1320
+ " \n",
1321
+ " x = x.reshape(x.size(0), -1)\n",
1322
+ " backbone = self.backbone_linear(x).reshape(len(x), -1, self.clip_size)\n",
1323
+ " if clip_scale>0:\n",
1324
+ " c = self.clip_proj(backbone)\n",
1325
+ " \n",
1326
+ " return backbone, c, b\n",
1327
+ "\n",
1328
+ "model.backbone = BrainNetwork(h=hidden_dim, in_dim=hidden_dim, seq_len=1, \n",
1329
+ " clip_size=clip_emb_dim, out_dim=clip_emb_dim*clip_seq_dim)\n",
1330
+ "utils.count_params(model.backbone)\n",
1331
+ "utils.count_params(model)\n",
1332
+ "\n",
1333
+ "# test that the model works on some fake data\n",
1334
+ "b = torch.randn((2,1,hidden_dim))\n",
1335
+ "print(\"b.shape\",b.shape)\n",
1336
+ "\n",
1337
+ "backbone_, clip_, blur_ = model.backbone(b)\n",
1338
+ "print(backbone_.shape, clip_.shape, blur_[0].shape, blur_[1].shape)"
1339
+ ]
1340
+ },
1341
+ {
1342
+ "cell_type": "markdown",
1343
+ "id": "b397c0d7-52a3-4153-823b-c27d2eb3eeba",
1344
+ "metadata": {},
1345
+ "source": [
1346
+ "### Adding diffusion prior + unCLIP if use_prior=True"
1347
+ ]
1348
+ },
1349
+ {
1350
+ "cell_type": "code",
1351
+ "execution_count": null,
1352
+ "id": "69965344-9346-4592-9cc5-e537e31d5fce",
1353
+ "metadata": {
1354
+ "tags": []
1355
+ },
1356
+ "outputs": [],
1357
+ "source": [
1358
+ "if use_prior:\n",
1359
+ " from models import *\n",
1360
+ "\n",
1361
+ " # setup diffusion prior network\n",
1362
+ " out_dim = clip_emb_dim\n",
1363
+ " depth = 6\n",
1364
+ " dim_head = 52\n",
1365
+ " heads = clip_emb_dim//52 # heads * dim_head = clip_emb_dim\n",
1366
+ " timesteps = 100\n",
1367
+ "\n",
1368
+ " prior_network = VersatileDiffusionPriorNetwork(\n",
1369
+ " dim=out_dim,\n",
1370
+ " depth=depth,\n",
1371
+ " dim_head=dim_head,\n",
1372
+ " heads=heads,\n",
1373
+ " causal=False,\n",
1374
+ " num_tokens = clip_seq_dim,\n",
1375
+ " learned_query_mode=\"pos_emb\"\n",
1376
+ " )\n",
1377
+ "\n",
1378
+ " model.diffusion_prior = BrainDiffusionPrior(\n",
1379
+ " net=prior_network,\n",
1380
+ " image_embed_dim=out_dim,\n",
1381
+ " condition_on_text_encodings=False,\n",
1382
+ " timesteps=timesteps,\n",
1383
+ " cond_drop_prob=0.2,\n",
1384
+ " image_embed_scale=None,\n",
1385
+ " )\n",
1386
+ " \n",
1387
+ " utils.count_params(model.diffusion_prior)\n",
1388
+ " utils.count_params(model)"
1389
+ ]
1390
+ },
1391
+ {
1392
+ "cell_type": "markdown",
1393
+ "id": "ec25271a-2209-400c-8026-df3b8ddc1eef",
1394
+ "metadata": {},
1395
+ "source": [
1396
+ "### Setup optimizer / lr / ckpt saving"
1397
+ ]
1398
+ },
1399
+ {
1400
+ "cell_type": "code",
1401
+ "execution_count": null,
1402
+ "id": "e14d0482-dc42-43b9-9ce1-953c32f2c9c1",
1403
+ "metadata": {},
1404
+ "outputs": [],
1405
+ "source": [
1406
+ "no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']\n",
1407
+ "\n",
1408
+ "opt_grouped_parameters = [\n",
1409
+ " {'params': [p for n, p in model.ridge.named_parameters()], 'weight_decay': 1e-2},\n",
1410
+ " {'params': [p for n, p in model.backbone.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': 1e-2},\n",
1411
+ " {'params': [p for n, p in model.backbone.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0},\n",
1412
+ "]\n",
1413
+ "# model.backbone.requires_grad_(False)\n",
1414
+ "\n",
1415
+ "if use_prior:\n",
1416
+ " opt_grouped_parameters.extend([\n",
1417
+ " {'params': [p for n, p in model.diffusion_prior.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': 1e-2},\n",
1418
+ " {'params': [p for n, p in model.diffusion_prior.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}\n",
1419
+ " ])\n",
1420
+ "\n",
1421
+ "optimizer = torch.optim.AdamW(opt_grouped_parameters, lr=max_lr)\n",
1422
+ "\n",
1423
+ "if lr_scheduler_type == 'linear':\n",
1424
+ " lr_scheduler = torch.optim.lr_scheduler.LinearLR(\n",
1425
+ " optimizer,\n",
1426
+ " total_iters=int(np.floor(num_epochs*num_iterations_per_epoch)),\n",
1427
+ " last_epoch=-1\n",
1428
+ " )\n",
1429
+ "elif lr_scheduler_type == 'cycle':\n",
1430
+ " if num_iterations_per_epoch==0:\n",
1431
+ " num_iterations_per_epoch=1\n",
1432
+ " total_steps=int(np.floor(num_epochs*num_iterations_per_epoch))\n",
1433
+ " print(\"total_steps\", total_steps)\n",
1434
+ " lr_scheduler = torch.optim.lr_scheduler.OneCycleLR(\n",
1435
+ " optimizer, \n",
1436
+ " max_lr=max_lr,\n",
1437
+ " total_steps=total_steps,\n",
1438
+ " final_div_factor=1000,\n",
1439
+ " last_epoch=-1, pct_start=2/num_epochs\n",
1440
+ " )\n",
1441
+ " \n",
1442
+ "def save_ckpt(tag):\n",
1443
+ " ckpt_path = outdir+f'/{tag}.pth'\n",
1444
+ " if accelerator.is_main_process:\n",
1445
+ " unwrapped_model = accelerator.unwrap_model(model)\n",
1446
+ " torch.save({\n",
1447
+ " 'epoch': epoch,\n",
1448
+ " 'model_state_dict': unwrapped_model.state_dict(),\n",
1449
+ " 'optimizer_state_dict': optimizer.state_dict(),\n",
1450
+ " 'lr_scheduler': lr_scheduler.state_dict(),\n",
1451
+ " 'train_losses': losses,\n",
1452
+ " 'test_losses': test_losses,\n",
1453
+ " 'lrs': lrs,\n",
1454
+ " }, ckpt_path)\n",
1455
+ " print(f\"\\n---saved {outdir}/{tag} ckpt!---\\n\")\n",
1456
+ "\n",
1457
+ "def load_ckpt(tag,load_lr=True,load_optimizer=True,load_epoch=True,strict=True,outdir=outdir,multisubj_loading=False): \n",
1458
+ " print(f\"\\n---loading {outdir}/{tag}.pth ckpt---\\n\")\n",
1459
+ " checkpoint = torch.load(outdir+'/last.pth', map_location='cpu')\n",
1460
+ " state_dict = checkpoint['model_state_dict']\n",
1461
+ " if multisubj_loading: # remove incompatible ridge layer that will otherwise error\n",
1462
+ " state_dict.pop('ridge.linears.0.weight',None)\n",
1463
+ " model.load_state_dict(state_dict, strict=strict)\n",
1464
+ " if load_epoch:\n",
1465
+ " globals()[\"epoch\"] = checkpoint['epoch']\n",
1466
+ " print(\"Epoch\",epoch)\n",
1467
+ " if load_optimizer:\n",
1468
+ " optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n",
1469
+ " if load_lr:\n",
1470
+ " lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])\n",
1471
+ " del checkpoint\n",
1472
+ "\n",
1473
+ "print(\"\\nDone with model preparations!\")\n",
1474
+ "num_params = utils.count_params(model)"
1475
+ ]
1476
+ },
1477
+ {
1478
+ "cell_type": "markdown",
1479
+ "id": "b1e8dcc4-5ce2-4206-88dc-a68d1dd701cd",
1480
+ "metadata": {},
1481
+ "source": [
1482
+ "# Wandb"
1483
+ ]
1484
+ },
1485
+ {
1486
+ "cell_type": "code",
1487
+ "execution_count": null,
1488
+ "id": "990cce8c-df83-473a-93c8-c47ba355eccd",
1489
+ "metadata": {},
1490
+ "outputs": [],
1491
+ "source": [
1492
+ "if local_rank==0 and wandb_log: # only use main process for wandb logging\n",
1493
+ " import wandb\n",
1494
+ " wandb_project = 'rtmindeye'\n",
1495
+ " print(f\"wandb {wandb_project} run {model_name}\")\n",
1496
+ " # need to configure wandb beforehand in terminal with \"wandb init\"!\n",
1497
+ " wandb_config = {\n",
1498
+ " \"model_name\": model_name,\n",
1499
+ " \"global_batch_size\": global_batch_size,\n",
1500
+ " \"batch_size\": batch_size,\n",
1501
+ " \"num_epochs\": num_epochs,\n",
1502
+ " \"num_sessions\": num_sessions,\n",
1503
+ " \"num_params\": num_params,\n",
1504
+ " \"clip_scale\": clip_scale,\n",
1505
+ " \"prior_scale\": prior_scale,\n",
1506
+ " \"blur_scale\": blur_scale,\n",
1507
+ " \"use_image_aug\": use_image_aug,\n",
1508
+ " \"max_lr\": max_lr,\n",
1509
+ " \"mixup_pct\": mixup_pct,\n",
1510
+ " \"num_samples_per_epoch\": num_samples_per_epoch,\n",
1511
+ " \"ckpt_interval\": ckpt_interval,\n",
1512
+ " \"ckpt_saving\": ckpt_saving,\n",
1513
+ " \"seed\": seed,\n",
1514
+ " \"distributed\": distributed,\n",
1515
+ " \"num_devices\": num_devices,\n",
1516
+ " \"world_size\": world_size,\n",
1517
+ " }\n",
1518
+ " print(\"wandb_config:\\n\",wandb_config)\n",
1519
+ " print(\"wandb_id:\",model_name)\n",
1520
+ " wandb.init(\n",
1521
+ " id=model_name,\n",
1522
+ " project=wandb_project,\n",
1523
+ " name=model_name,\n",
1524
+ " config=wandb_config,\n",
1525
+ " resume=\"allow\",\n",
1526
+ " )\n",
1527
+ "else:\n",
1528
+ " wandb_log = False"
1529
+ ]
1530
+ },
1531
+ {
1532
+ "cell_type": "markdown",
1533
+ "id": "d5690151-2131-4918-b750-e869cbd1a8a8",
1534
+ "metadata": {},
1535
+ "source": [
1536
+ "# Train the model"
1537
+ ]
1538
+ },
1539
+ {
1540
+ "cell_type": "code",
1541
+ "execution_count": null,
1542
+ "id": "12de6387-6e18-4e4b-b5ce-a847d625330a",
1543
+ "metadata": {},
1544
+ "outputs": [],
1545
+ "source": [
1546
+ "epoch = 0\n",
1547
+ "losses, test_losses, lrs = [], [], []\n",
1548
+ "best_test_loss = 1e9\n",
1549
+ "torch.cuda.empty_cache()"
1550
+ ]
1551
+ },
1552
+ {
1553
+ "cell_type": "code",
1554
+ "execution_count": null,
1555
+ "id": "607a7c7b-fe5e-41a4-80bf-d2814b3a57cc",
1556
+ "metadata": {
1557
+ "tags": []
1558
+ },
1559
+ "outputs": [],
1560
+ "source": [
1561
+ "# load multisubject stage1 ckpt if set\n",
1562
+ "if multisubject_ckpt is not None and not resume_from_ckpt:\n",
1563
+ " load_ckpt(\"last\",outdir=multisubject_ckpt,load_lr=False,load_optimizer=False,load_epoch=False,strict=False,multisubj_loading=True)"
1564
+ ]
1565
+ },
1566
+ {
1567
+ "cell_type": "code",
1568
+ "execution_count": null,
1569
+ "id": "927350ea-b234-48e6-ae7b-2eee41ec0358",
1570
+ "metadata": {},
1571
+ "outputs": [],
1572
+ "source": []
1573
+ },
1574
+ {
1575
+ "cell_type": "code",
1576
+ "execution_count": null,
1577
+ "id": "00ea5ae0-5c92-4276-af5b-25a17ba4dc17",
1578
+ "metadata": {},
1579
+ "outputs": [],
1580
+ "source": [
1581
+ "# checkpoint = torch.load(multisubject_ckpt+'/last.pth', map_location='cpu')\n",
1582
+ "# state_dict = checkpoint['model_state_dict']\n",
1583
+ "# model.load_state_dict(state_dict, strict=False)"
1584
+ ]
1585
+ },
1586
+ {
1587
+ "cell_type": "code",
1588
+ "execution_count": null,
1589
+ "id": "99f09f76-4481-4133-b09a-a22b10dbc0c4",
1590
+ "metadata": {},
1591
+ "outputs": [],
1592
+ "source": [
1593
+ "# train_dls = [train_dl[f'subj0{s}'] for s in subj_list]\n",
1594
+ "\n",
1595
+ "model, optimizer, train_dl, lr_scheduler = accelerator.prepare(model, optimizer, train_dl, lr_scheduler)\n",
1596
+ "# leaving out test_dl since we will only have local_rank 0 device do evals"
1597
+ ]
1598
+ },
1599
+ {
1600
+ "cell_type": "code",
1601
+ "execution_count": null,
1602
+ "id": "60be0d5f-3e94-4612-9373-61b53d836393",
1603
+ "metadata": {
1604
+ "scrolled": true
1605
+ },
1606
+ "outputs": [],
1607
+ "source": [
1608
+ "print(f\"{model_name} starting with epoch {epoch} / {num_epochs}\")\n",
1609
+ "progress_bar = tqdm(range(epoch,num_epochs), ncols=1200, disable=(local_rank!=0))\n",
1610
+ "test_image, test_voxel = None, None\n",
1611
+ "mse = nn.MSELoss()\n",
1612
+ "l1 = nn.L1Loss()\n",
1613
+ "soft_loss_temps = utils.cosine_anneal(0.004, 0.0075, num_epochs - int(mixup_pct * num_epochs))\n",
1614
+ "skip_train = True if epoch>=(num_epochs-1) else False # skip training if you are resuming from a fully trained model\n",
1615
+ "\n",
1616
+ "for epoch in progress_bar:\n",
1617
+ " model.train()\n",
1618
+ "\n",
1619
+ " fwd_percent_correct = 0.\n",
1620
+ " bwd_percent_correct = 0.\n",
1621
+ " test_fwd_percent_correct = 0.\n",
1622
+ " test_bwd_percent_correct = 0.\n",
1623
+ " \n",
1624
+ " recon_cossim = 0.\n",
1625
+ " test_recon_cossim = 0.\n",
1626
+ " recon_mse = 0.\n",
1627
+ " test_recon_mse = 0.\n",
1628
+ "\n",
1629
+ " loss_clip_total = 0.\n",
1630
+ " loss_blurry_total = 0.\n",
1631
+ " loss_blurry_cont_total = 0.\n",
1632
+ " test_loss_clip_total = 0.\n",
1633
+ " \n",
1634
+ " loss_prior_total = 0.\n",
1635
+ " test_loss_prior_total = 0.\n",
1636
+ "\n",
1637
+ " blurry_pixcorr = 0.\n",
1638
+ " test_blurry_pixcorr = 0. \n",
1639
+ "\n",
1640
+ " # you now have voxel_iters and image_iters with num_iterations_per_epoch batches each\n",
1641
+ " for train_i, behav in enumerate(train_dl): \n",
1642
+ " with torch.cuda.amp.autocast(dtype=data_type):\n",
1643
+ " optimizer.zero_grad()\n",
1644
+ " loss = 0.\n",
1645
+ " \n",
1646
+ " behav = behav[0]\n",
1647
+ "\n",
1648
+ " image = images[behav.long().cpu()].to(device)\n",
1649
+ " voxel = vox[behav.long().cpu()]\n",
1650
+ " # voxel = (voxel - train_mean) / train_std\n",
1651
+ " voxel = torch.Tensor(voxel).unsqueeze(1).to(device)\n",
1652
+ "\n",
1653
+ " if use_image_aug: \n",
1654
+ " image = img_augment(image)\n",
1655
+ "\n",
1656
+ " clip_target = clip_img_embedder(image)\n",
1657
+ " assert not torch.any(torch.isnan(clip_target))\n",
1658
+ "\n",
1659
+ " if epoch < int(mixup_pct * num_epochs):\n",
1660
+ " voxel, perm, betas, select = utils.mixco(voxel)\n",
1661
+ "\n",
1662
+ " voxel_ridge = model.ridge(voxel,0) #[model.ridge(voxel_list[si],si) for si,s in enumerate(subj_list)]\n",
1663
+ " # voxel_ridge = torch.cat(voxel_ridge_list, dim=0)\n",
1664
+ "\n",
1665
+ " backbone, clip_voxels, blurry_image_enc_ = model.backbone(voxel_ridge)\n",
1666
+ "\n",
1667
+ " if clip_scale>0:\n",
1668
+ " clip_voxels_norm = nn.functional.normalize(clip_voxels.flatten(1), dim=-1)\n",
1669
+ " clip_target_norm = nn.functional.normalize(clip_target.flatten(1), dim=-1)\n",
1670
+ "\n",
1671
+ " if use_prior:\n",
1672
+ " loss_prior, prior_out = model.diffusion_prior(text_embed=backbone, image_embed=clip_target)\n",
1673
+ " loss_prior_total += loss_prior.item()\n",
1674
+ " loss_prior *= prior_scale\n",
1675
+ " loss += loss_prior\n",
1676
+ "\n",
1677
+ " recon_cossim += nn.functional.cosine_similarity(prior_out, clip_target).mean().item()\n",
1678
+ " recon_mse += mse(prior_out, clip_target).item()\n",
1679
+ "\n",
1680
+ " if clip_scale>0:\n",
1681
+ " if epoch < int(mixup_pct * num_epochs): \n",
1682
+ " loss_clip = utils.mixco_nce(\n",
1683
+ " clip_voxels_norm,\n",
1684
+ " clip_target_norm,\n",
1685
+ " temp=.006,\n",
1686
+ " perm=perm, betas=betas, select=select)\n",
1687
+ " else:\n",
1688
+ " epoch_temp = soft_loss_temps[epoch-int(mixup_pct*num_epochs)]\n",
1689
+ " loss_clip = utils.soft_clip_loss(\n",
1690
+ " clip_voxels_norm,\n",
1691
+ " clip_target_norm,\n",
1692
+ " temp=epoch_temp)\n",
1693
+ "\n",
1694
+ " loss_clip_total += loss_clip.item()\n",
1695
+ " loss_clip *= clip_scale\n",
1696
+ " loss += loss_clip\n",
1697
+ "\n",
1698
+ " if blurry_recon: \n",
1699
+ " image_enc_pred, transformer_feats = blurry_image_enc_\n",
1700
+ "\n",
1701
+ " image_enc = autoenc.encode(2*image-1).latent_dist.mode() * 0.18215\n",
1702
+ " loss_blurry = l1(image_enc_pred, image_enc)\n",
1703
+ " loss_blurry_total += loss_blurry.item()\n",
1704
+ "\n",
1705
+ " if epoch < int(mixup_pct * num_epochs):\n",
1706
+ " image_enc_shuf = image_enc[perm]\n",
1707
+ " betas_shape = [-1] + [1]*(len(image_enc.shape)-1)\n",
1708
+ " image_enc[select] = image_enc[select] * betas[select].reshape(*betas_shape) + \\\n",
1709
+ " image_enc_shuf[select] * (1 - betas[select]).reshape(*betas_shape)\n",
1710
+ "\n",
1711
+ " image_norm = (image - mean)/std\n",
1712
+ " image_aug = (blur_augs(image) - mean)/std\n",
1713
+ " _, cnx_embeds = cnx(image_norm)\n",
1714
+ " _, cnx_aug_embeds = cnx(image_aug)\n",
1715
+ "\n",
1716
+ " cont_loss = utils.soft_cont_loss(\n",
1717
+ " nn.functional.normalize(transformer_feats.reshape(-1, transformer_feats.shape[-1]), dim=-1),\n",
1718
+ " nn.functional.normalize(cnx_embeds.reshape(-1, cnx_embeds.shape[-1]), dim=-1),\n",
1719
+ " nn.functional.normalize(cnx_aug_embeds.reshape(-1, cnx_embeds.shape[-1]), dim=-1),\n",
1720
+ " temp=0.2)\n",
1721
+ " loss_blurry_cont_total += cont_loss.item()\n",
1722
+ "\n",
1723
+ " loss += (loss_blurry + 0.1*cont_loss) * blur_scale #/.18215\n",
1724
+ "\n",
1725
+ " if clip_scale>0:\n",
1726
+ " # forward and backward top 1 accuracy \n",
1727
+ " labels = torch.arange(len(clip_voxels_norm)).to(clip_voxels_norm.device) \n",
1728
+ " fwd_percent_correct += utils.topk(utils.batchwise_cosine_similarity(clip_voxels_norm, clip_target_norm), labels, k=1).item()\n",
1729
+ " bwd_percent_correct += utils.topk(utils.batchwise_cosine_similarity(clip_target_norm, clip_voxels_norm), labels, k=1).item()\n",
1730
+ "\n",
1731
+ " if blurry_recon:\n",
1732
+ " with torch.no_grad():\n",
1733
+ " # only doing pixcorr eval on a subset of the samples per batch because its costly & slow to compute autoenc.decode()\n",
1734
+ " random_samps = np.random.choice(np.arange(len(image)), size=len(image)//5, replace=False)\n",
1735
+ " blurry_recon_images = (autoenc.decode(image_enc_pred[random_samps]/0.18215).sample/ 2 + 0.5).clamp(0,1)\n",
1736
+ " pixcorr = utils.pixcorr(image[random_samps], blurry_recon_images)\n",
1737
+ " blurry_pixcorr += pixcorr.item()\n",
1738
+ " \n",
1739
+ " utils.check_loss(loss)\n",
1740
+ " accelerator.backward(loss)\n",
1741
+ " optimizer.step()\n",
1742
+ "\n",
1743
+ " losses.append(loss.item())\n",
1744
+ " lrs.append(optimizer.param_groups[0]['lr'])\n",
1745
+ "\n",
1746
+ " if lr_scheduler_type is not None:\n",
1747
+ " lr_scheduler.step()\n",
1748
+ " \n",
1749
+ " if train_i >= num_iterations_per_epoch-1:\n",
1750
+ " break\n",
1751
+ " \n",
1752
+ " model.eval()\n",
1753
+ " if local_rank==0:\n",
1754
+ " with torch.no_grad(), torch.cuda.amp.autocast(dtype=data_type): \n",
1755
+ " for test_i, behav in enumerate(test_dl): \n",
1756
+ " behav = behav[0]\n",
1757
+ "\n",
1758
+ " loss=0.\n",
1759
+ "\n",
1760
+ " if behav.ndim>1:\n",
1761
+ " image = images[behav[:,0].long().cpu()].to(device)\n",
1762
+ " voxel = vox[behav.long().cpu()].mean(1)\n",
1763
+ " else:\n",
1764
+ " image = images[behav.long().cpu()].to(device)\n",
1765
+ " voxel = vox[behav.long().cpu()]\n",
1766
+ " \n",
1767
+ " voxel = torch.Tensor(voxel).unsqueeze(1).to(device)\n",
1768
+ "\n",
1769
+ " clip_img_embedder = clip_img_embedder.to(device)\n",
1770
+ " clip_target = clip_img_embedder(image.float())\n",
1771
+ " \n",
1772
+ " voxel_ridge = model.ridge(voxel,0)\n",
1773
+ "\n",
1774
+ " backbone, clip_voxels, blurry_image_enc_ = model.backbone(voxel_ridge)\n",
1775
+ "\n",
1776
+ " if clip_scale>0:\n",
1777
+ " clip_voxels_norm = nn.functional.normalize(clip_voxels.flatten(1), dim=-1)\n",
1778
+ " clip_target_norm = nn.functional.normalize(clip_target.flatten(1), dim=-1)\n",
1779
+ " \n",
1780
+ " # for some evals, only doing a subset of the samples per batch because of computational cost\n",
1781
+ " random_samps = np.random.choice(np.arange(len(image)), size=len(image)//5, replace=False)\n",
1782
+ " \n",
1783
+ " if use_prior:\n",
1784
+ " loss_prior, contaminated_prior_out = model.diffusion_prior(text_embed=backbone[random_samps], image_embed=clip_target[random_samps])\n",
1785
+ " test_loss_prior_total += loss_prior.item()\n",
1786
+ " loss_prior *= prior_scale\n",
1787
+ " loss += loss_prior\n",
1788
+ " \n",
1789
+ " if clip_scale>0:\n",
1790
+ " loss_clip = utils.soft_clip_loss(\n",
1791
+ " clip_voxels_norm,\n",
1792
+ " clip_target_norm,\n",
1793
+ " temp=.006)\n",
1794
+ "\n",
1795
+ " test_loss_clip_total += loss_clip.item()\n",
1796
+ " loss_clip = loss_clip * clip_scale\n",
1797
+ " loss += loss_clip\n",
1798
+ "\n",
1799
+ " if blurry_recon:\n",
1800
+ " image_enc_pred, _ = blurry_image_enc_\n",
1801
+ " blurry_recon_images = (autoenc.decode(image_enc_pred[random_samps]/0.18215).sample / 2 + 0.5).clamp(0,1)\n",
1802
+ " pixcorr = utils.pixcorr(image[random_samps], blurry_recon_images)\n",
1803
+ " test_blurry_pixcorr += pixcorr.item()\n",
1804
+ "\n",
1805
+ " if clip_scale>0:\n",
1806
+ " # forward and backward top 1 accuracy \n",
1807
+ " labels = torch.arange(len(clip_voxels_norm)).to(clip_voxels_norm.device) \n",
1808
+ " test_fwd_percent_correct += utils.topk(utils.batchwise_cosine_similarity(clip_voxels_norm, clip_target_norm), labels, k=1).item()\n",
1809
+ " test_bwd_percent_correct += utils.topk(utils.batchwise_cosine_similarity(clip_target_norm, clip_voxels_norm), labels, k=1).item()\n",
1810
+ " \n",
1811
+ " utils.check_loss(loss) \n",
1812
+ " test_losses.append(loss.item())\n",
1813
+ "\n",
1814
+ " # if utils.is_interactive(): clear_output(wait=True)\n",
1815
+ " if skip_train: break\n",
1816
+ " print(\"---\")\n",
1817
+ "\n",
1818
+ " # assert (test_i+1) == 1\n",
1819
+ " logs = {\"train/loss\": np.mean(losses[-(train_i+1):]),\n",
1820
+ " \"test/loss\": np.mean(test_losses[-(test_i+1):]),\n",
1821
+ " \"train/lr\": lrs[-1],\n",
1822
+ " \"train/num_steps\": len(losses),\n",
1823
+ " \"test/num_steps\": len(test_losses),\n",
1824
+ " \"train/fwd_pct_correct\": fwd_percent_correct / (train_i + 1),\n",
1825
+ " \"train/bwd_pct_correct\": bwd_percent_correct / (train_i + 1),\n",
1826
+ " \"test/test_fwd_pct_correct\": test_fwd_percent_correct / (test_i + 1),\n",
1827
+ " \"test/test_bwd_pct_correct\": test_bwd_percent_correct / (test_i + 1),\n",
1828
+ " \"train/loss_clip_total\": loss_clip_total / (train_i + 1),\n",
1829
+ " \"train/loss_blurry_total\": loss_blurry_total / (train_i + 1),\n",
1830
+ " \"train/loss_blurry_cont_total\": loss_blurry_cont_total / (train_i + 1),\n",
1831
+ " \"test/loss_clip_total\": test_loss_clip_total / (test_i + 1),\n",
1832
+ " \"train/blurry_pixcorr\": blurry_pixcorr / (train_i + 1),\n",
1833
+ " \"test/blurry_pixcorr\": test_blurry_pixcorr / (test_i + 1),\n",
1834
+ " \"train/recon_cossim\": recon_cossim / (train_i + 1),\n",
1835
+ " \"test/recon_cossim\": test_recon_cossim / (test_i + 1),\n",
1836
+ " \"train/recon_mse\": recon_mse / (train_i + 1),\n",
1837
+ " \"test/recon_mse\": test_recon_mse / (test_i + 1),\n",
1838
+ " \"train/loss_prior\": loss_prior_total / (train_i + 1),\n",
1839
+ " \"test/loss_prior\": test_loss_prior_total / (test_i + 1),\n",
1840
+ " }\n",
1841
+ "\n",
1842
+ " # if finished training, save jpg recons if they exist\n",
1843
+ " if (epoch == num_epochs-1) or (epoch % ckpt_interval == 0):\n",
1844
+ " if blurry_recon: \n",
1845
+ " image_enc = autoenc.encode(2*image[:4]-1).latent_dist.mode() * 0.18215\n",
1846
+ " # transform blurry recon latents to images and plot it\n",
1847
+ " fig, axes = plt.subplots(1, 8, figsize=(10, 4))\n",
1848
+ " jj=-1\n",
1849
+ " for j in [0,1,2,3]:\n",
1850
+ " jj+=1\n",
1851
+ " axes[jj].imshow(utils.torch_to_Image((autoenc.decode(image_enc[[j]]/0.18215).sample / 2 + 0.5).clamp(0,1)))\n",
1852
+ " axes[jj].axis('off')\n",
1853
+ " jj+=1\n",
1854
+ " axes[jj].imshow(utils.torch_to_Image((autoenc.decode(image_enc_pred[[j]]/0.18215).sample / 2 + 0.5).clamp(0,1)))\n",
1855
+ " axes[jj].axis('off')\n",
1856
+ " plt.show()\n",
1857
+ "\n",
1858
+ " progress_bar.set_postfix(**logs)\n",
1859
+ "\n",
1860
+ " if wandb_log: wandb.log(logs)\n",
1861
+ " \n",
1862
+ " # Save model checkpoint and reconstruct\n",
1863
+ " if (ckpt_saving) and (epoch % ckpt_interval == 0):\n",
1864
+ " save_ckpt(f'last')\n",
1865
+ "\n",
1866
+ " # wait for other GPUs to catch up if needed\n",
1867
+ " accelerator.wait_for_everyone()\n",
1868
+ " torch.cuda.empty_cache()\n",
1869
+ "\n",
1870
+ "print(\"\\n===Finished!===\\n\")\n",
1871
+ "if ckpt_saving:\n",
1872
+ " save_ckpt(f'last')"
1873
+ ]
1874
+ },
1875
+ {
1876
+ "cell_type": "code",
1877
+ "execution_count": null,
1878
+ "id": "b0af03cb-58c3-4e3e-9e2b-a3485635864b",
1879
+ "metadata": {},
1880
+ "outputs": [],
1881
+ "source": [
1882
+ "blurry_recon"
1883
+ ]
1884
+ },
1885
+ {
1886
+ "cell_type": "code",
1887
+ "execution_count": null,
1888
+ "id": "5702acf6-45fe-44f5-8842-c0e2d4d8e8ce",
1889
+ "metadata": {},
1890
+ "outputs": [],
1891
+ "source": [
1892
+ "# # Track metrics here:\n",
1893
+ "# https://docs.google.com/spreadsheets/d/1-dbmr4ovl2-4-MFNAL1DqLS651KM_ihjDkkUeP1kHXs/edit?gid=1494588999#gid=1494588999"
1894
+ ]
1895
+ },
1896
+ {
1897
+ "cell_type": "markdown",
1898
+ "id": "23a54acc-1dce-4de4-9d5f-d0582f5097c5",
1899
+ "metadata": {},
1900
+ "source": [
1901
+ "**To tell if the model is working I'm looking at test_bwd/fwd_pct_correct and seeing if that is doing better than chance (1/batch_size)**"
1902
+ ]
1903
+ }
1904
+ ],
1905
+ "metadata": {
1906
+ "kernelspec": {
1907
+ "display_name": "rt_mindEye2 [~/.conda/envs/rt_mindEye2/]",
1908
+ "language": "python",
1909
+ "name": "conda_rt_mindeye2"
1910
+ },
1911
+ "language_info": {
1912
+ "codemirror_mode": {
1913
+ "name": "ipython",
1914
+ "version": 3
1915
+ },
1916
+ "file_extension": ".py",
1917
+ "mimetype": "text/x-python",
1918
+ "name": "python",
1919
+ "nbconvert_exporter": "python",
1920
+ "pygments_lexer": "ipython3",
1921
+ "version": "3.11.7"
1922
+ },
1923
+ "toc": {
1924
+ "base_numbering": 1,
1925
+ "nav_menu": {},
1926
+ "number_sections": true,
1927
+ "sideBar": true,
1928
+ "skip_h1_title": false,
1929
+ "title_cell": "Table of Contents",
1930
+ "title_sidebar": "Contents",
1931
+ "toc_cell": false,
1932
+ "toc_position": {
1933
+ "height": "calc(100% - 180px)",
1934
+ "left": "10px",
1935
+ "top": "150px",
1936
+ "width": "165px"
1937
+ },
1938
+ "toc_section_display": true,
1939
+ "toc_window_display": true
1940
+ },
1941
+ "toc-autonumbering": true,
1942
+ "vscode": {
1943
+ "interpreter": {
1944
+ "hash": "62aae01ef0cf7b6af841ab1c8ce59175c4332e693ab3d00bc32ceffb78a35376"
1945
+ }
1946
+ }
1947
+ },
1948
+ "nbformat": 4,
1949
+ "nbformat_minor": 5
1950
+ }
modeling_git.py ADDED
@@ -0,0 +1,2050 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Microsoft Research and The HuggingFace Inc. team.
3
+ # All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """PyTorch GIT model."""
17
+
18
+
19
+ import math
20
+ from dataclasses import dataclass
21
+ from typing import List, Optional, Tuple, Union
22
+
23
+ import torch
24
+ import torch.utils.checkpoint
25
+ from torch import nn
26
+ from torch.nn import CrossEntropyLoss
27
+
28
+ from transformers.activations import ACT2FN
29
+ from transformers.file_utils import ModelOutput
30
+ from transformers.modeling_outputs import (
31
+ BaseModelOutput,
32
+ BaseModelOutputWithPast,
33
+ BaseModelOutputWithPooling,
34
+ CausalLMOutputWithPast,
35
+ )
36
+ from transformers.modeling_utils import PreTrainedModel
37
+ from transformers.pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
38
+ from transformers.utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
39
+ from transformers.models.git.configuration_git import GitConfig, GitVisionConfig
40
+
41
+
42
+ logger = logging.get_logger(__name__)
43
+
44
+ _CHECKPOINT_FOR_DOC = "microsoft/git-large"
45
+ _CONFIG_FOR_DOC = "GitConfig"
46
+
47
+ GIT_PRETRAINED_MODEL_ARCHIVE_LIST = [
48
+ "microsoft/git-large",
49
+ # See all GIT models at https://huggingface.co/models?filter=git
50
+ ]
51
+
52
+
53
+ @dataclass
54
+ # Copied from transformers.models.clip.modeling_clip.CLIPVisionModelOutput with CLIP->Git
55
+ class GitVisionModelOutput(ModelOutput):
56
+ """
57
+ Base class for vision model's outputs that also contains image embeddings of the pooling of the last hidden states.
58
+
59
+ Args:
60
+ image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
61
+ The image embeddings obtained by applying the projection layer to the pooler_output.
62
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
63
+ Sequence of hidden-states at the output of the last layer of the model.
64
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
65
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
66
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
67
+
68
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
69
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
70
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
71
+ sequence_length)`.
72
+
73
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
74
+ heads.
75
+ """
76
+
77
+ image_embeds: Optional[torch.FloatTensor] = None
78
+ last_hidden_state: torch.FloatTensor = None
79
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
80
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
81
+
82
+
83
+ # Copied from transformers.models.bart.modeling_bart._expand_mask
84
+ def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
85
+ """
86
+ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
87
+ """
88
+ bsz, src_len = mask.size()
89
+ tgt_len = tgt_len if tgt_len is not None else src_len
90
+
91
+ expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
92
+
93
+ inverted_mask = 1.0 - expanded_mask
94
+
95
+ return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min)
96
+
97
+
98
+ class GitEmbeddings(nn.Module):
99
+ """Construct the embeddings from word and position embeddings."""
100
+
101
+ def __init__(self, config):
102
+ super().__init__()
103
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
104
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
105
+
106
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
107
+ # any TensorFlow checkpoint file
108
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
109
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
110
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
111
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
112
+ self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
113
+
114
+ def forward(
115
+ self,
116
+ input_ids: Optional[torch.LongTensor] = None,
117
+ position_ids: Optional[torch.LongTensor] = None,
118
+ inputs_embeds: Optional[torch.FloatTensor] = None,
119
+ past_key_values_length: int = 0,
120
+ ) -> torch.Tensor:
121
+ if input_ids is not None:
122
+ input_shape = input_ids.size()
123
+ else:
124
+ input_shape = inputs_embeds.size()[:-1]
125
+
126
+ seq_length = input_shape[1]
127
+
128
+ if position_ids is None:
129
+ position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
130
+
131
+ if inputs_embeds is None:
132
+ embeddings = self.word_embeddings(input_ids)
133
+ else:
134
+ embeddings = inputs_embeds
135
+
136
+ if self.position_embedding_type == "absolute":
137
+ position_embeddings = self.position_embeddings(position_ids)
138
+ embeddings += position_embeddings
139
+ embeddings = self.LayerNorm(embeddings)
140
+ embeddings = self.dropout(embeddings)
141
+ return embeddings
142
+
143
+
144
+ class GitSelfAttention(nn.Module):
145
+ def __init__(self, config, position_embedding_type=None):
146
+ super().__init__()
147
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
148
+ raise ValueError(
149
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
150
+ f"heads ({config.num_attention_heads})"
151
+ )
152
+
153
+ self.num_attention_heads = config.num_attention_heads
154
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
155
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
156
+ self.image_patch_tokens = int((config.vision_config.image_size / config.vision_config.patch_size) ** 2 + 1)
157
+ if config.num_image_with_embedding is not None:
158
+ self.image_patch_tokens *= config.num_image_with_embedding
159
+
160
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
161
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
162
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
163
+
164
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
165
+ self.position_embedding_type = position_embedding_type or getattr(
166
+ config, "position_embedding_type", "absolute"
167
+ )
168
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
169
+ self.max_position_embeddings = config.max_position_embeddings
170
+ self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
171
+
172
+ def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
173
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
174
+ x = x.view(new_x_shape)
175
+ return x.permute(0, 2, 1, 3)
176
+
177
+ def forward(
178
+ self,
179
+ hidden_states: torch.Tensor,
180
+ attention_mask: Optional[torch.FloatTensor] = None,
181
+ head_mask: Optional[torch.FloatTensor] = None,
182
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
183
+ output_attentions: Optional[bool] = False,
184
+ pixel_values_present: Optional[bool] = False,
185
+ ) -> Tuple[torch.Tensor]:
186
+ mixed_query_layer = self.query(hidden_states)
187
+
188
+ cutoff = self.image_patch_tokens if pixel_values_present else 0
189
+ if past_key_value is not None:
190
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
191
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
192
+ key_layer = torch.cat([key_layer[:, :, :cutoff, :], past_key_value[0], key_layer[:, :, -1:, :]], dim=2)
193
+ value_layer = torch.cat(
194
+ [value_layer[:, :, :cutoff, :], past_key_value[1], value_layer[:, :, -1:, :]], dim=2
195
+ )
196
+ else:
197
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
198
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
199
+
200
+ query_layer = self.transpose_for_scores(mixed_query_layer)
201
+
202
+ use_cache = past_key_value is not None
203
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
204
+ # Further calls to cross_attention layer can then reuse all cross-attention
205
+ # key/value_states (first "if" case)
206
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
207
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
208
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
209
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
210
+ # NOTE: like in other caches, we store the text component. In GIT it means we discard the image component.
211
+ past_key_value = (
212
+ key_layer[:, :, cutoff:, :],
213
+ value_layer[:, :, cutoff:, :],
214
+ )
215
+
216
+ # Take the dot product between "query" and "key" to get the raw attention scores.
217
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
218
+
219
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
220
+ query_length, key_length = query_layer.shape[2], key_layer.shape[2]
221
+ if use_cache:
222
+ position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view(
223
+ -1, 1
224
+ )
225
+ else:
226
+ position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
227
+ position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
228
+ distance = position_ids_l - position_ids_r
229
+
230
+ positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
231
+ positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
232
+
233
+ if self.position_embedding_type == "relative_key":
234
+ relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
235
+ attention_scores = attention_scores + relative_position_scores
236
+ elif self.position_embedding_type == "relative_key_query":
237
+ relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
238
+ relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
239
+ attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
240
+
241
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
242
+ if attention_mask is not None:
243
+ # Apply the attention mask is (precomputed for all layers in GitModel forward() function)
244
+ attention_scores = attention_scores + attention_mask
245
+
246
+ # Normalize the attention scores to probabilities.
247
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
248
+
249
+ # This is actually dropping out entire tokens to attend to, which might
250
+ # seem a bit unusual, but is taken from the original Transformer paper.
251
+ attention_probs = self.dropout(attention_probs)
252
+
253
+ # Mask heads if we want to
254
+ if head_mask is not None:
255
+ attention_probs = attention_probs * head_mask
256
+
257
+ context_layer = torch.matmul(attention_probs, value_layer)
258
+
259
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
260
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
261
+ context_layer = context_layer.view(new_context_layer_shape)
262
+
263
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
264
+
265
+ outputs = outputs + (past_key_value,)
266
+ return outputs
267
+
268
+
269
+ # Copied from transformers.models.bert.modeling_bert.BertSelfOutput
270
+ class GitSelfOutput(nn.Module):
271
+ def __init__(self, config):
272
+ super().__init__()
273
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
274
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
275
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
276
+
277
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
278
+ hidden_states = self.dense(hidden_states)
279
+ hidden_states = self.dropout(hidden_states)
280
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
281
+ return hidden_states
282
+
283
+
284
+ class GitAttention(nn.Module):
285
+ # Copied from transformers.models.bert.modeling_bert.BertAttention.__init__ with Bert->Git
286
+ def __init__(self, config, position_embedding_type=None):
287
+ super().__init__()
288
+ self.self = GitSelfAttention(config, position_embedding_type=position_embedding_type)
289
+ self.output = GitSelfOutput(config)
290
+ self.pruned_heads = set()
291
+
292
+ # Copied from transformers.models.bert.modeling_bert.BertAttention.prune_heads
293
+ def prune_heads(self, heads):
294
+ if len(heads) == 0:
295
+ return
296
+ heads, index = find_pruneable_heads_and_indices(
297
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
298
+ )
299
+
300
+ # Prune linear layers
301
+ self.self.query = prune_linear_layer(self.self.query, index)
302
+ self.self.key = prune_linear_layer(self.self.key, index)
303
+ self.self.value = prune_linear_layer(self.self.value, index)
304
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
305
+
306
+ # Update hyper params and store pruned heads
307
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
308
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
309
+ self.pruned_heads = self.pruned_heads.union(heads)
310
+
311
+ def forward(
312
+ self,
313
+ hidden_states: torch.Tensor,
314
+ attention_mask: Optional[torch.FloatTensor] = None,
315
+ head_mask: Optional[torch.FloatTensor] = None,
316
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
317
+ output_attentions: Optional[bool] = False,
318
+ pixel_values_present: Optional[bool] = False,
319
+ ) -> Tuple[torch.Tensor]:
320
+ self_outputs = self.self(
321
+ hidden_states,
322
+ attention_mask,
323
+ head_mask,
324
+ past_key_value,
325
+ output_attentions,
326
+ pixel_values_present,
327
+ )
328
+ attention_output = self.output(self_outputs[0], hidden_states)
329
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
330
+ return outputs
331
+
332
+
333
+ # Copied from transformers.models.bert.modeling_bert.BertIntermediate
334
+ class GitIntermediate(nn.Module):
335
+ def __init__(self, config):
336
+ super().__init__()
337
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
338
+ if isinstance(config.hidden_act, str):
339
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
340
+ else:
341
+ self.intermediate_act_fn = config.hidden_act
342
+
343
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
344
+ hidden_states = self.dense(hidden_states)
345
+ hidden_states = self.intermediate_act_fn(hidden_states)
346
+ return hidden_states
347
+
348
+
349
+ # Copied from transformers.models.bert.modeling_bert.BertOutput
350
+ class GitOutput(nn.Module):
351
+ def __init__(self, config):
352
+ super().__init__()
353
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
354
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
355
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
356
+
357
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
358
+ hidden_states = self.dense(hidden_states)
359
+ hidden_states = self.dropout(hidden_states)
360
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
361
+ return hidden_states
362
+
363
+
364
+ class GitLayer(nn.Module):
365
+ def __init__(self, config):
366
+ super().__init__()
367
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
368
+ self.seq_len_dim = 1
369
+ self.attention = GitAttention(config)
370
+ self.intermediate = GitIntermediate(config)
371
+ self.output = GitOutput(config)
372
+
373
+ def forward(
374
+ self,
375
+ hidden_states: torch.Tensor,
376
+ attention_mask: Optional[torch.FloatTensor] = None,
377
+ head_mask: Optional[torch.FloatTensor] = None,
378
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
379
+ output_attentions: Optional[bool] = False,
380
+ pixel_values_present: Optional[bool] = False,
381
+ ) -> Tuple[torch.Tensor]:
382
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
383
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
384
+ self_attention_outputs = self.attention(
385
+ hidden_states,
386
+ attention_mask,
387
+ head_mask,
388
+ output_attentions=output_attentions,
389
+ past_key_value=self_attn_past_key_value,
390
+ pixel_values_present=pixel_values_present,
391
+ )
392
+ attention_output = self_attention_outputs[0]
393
+
394
+ # if decoder, the last output is tuple of self-attn cache
395
+ outputs = self_attention_outputs[1:-1]
396
+ present_key_value = self_attention_outputs[-1]
397
+
398
+ layer_output = apply_chunking_to_forward(
399
+ self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
400
+ )
401
+ outputs = (layer_output,) + outputs
402
+
403
+ # if decoder, return the attn key/values as the last output
404
+ outputs = outputs + (present_key_value,)
405
+
406
+ return outputs
407
+
408
+ def feed_forward_chunk(self, attention_output):
409
+ intermediate_output = self.intermediate(attention_output)
410
+ layer_output = self.output(intermediate_output, attention_output)
411
+ return layer_output
412
+
413
+
414
+ class GitEncoder(nn.Module):
415
+ # Copied from transformers.models.bert.modeling_bert.BertEncoder.__init__ with Bert->Git
416
+ def __init__(self, config):
417
+ super().__init__()
418
+ self.config = config
419
+ self.layer = nn.ModuleList([GitLayer(config) for _ in range(config.num_hidden_layers)])
420
+ self.gradient_checkpointing = False
421
+
422
+ def forward(
423
+ self,
424
+ hidden_states: torch.Tensor,
425
+ attention_mask: Optional[torch.FloatTensor] = None,
426
+ head_mask: Optional[torch.FloatTensor] = None,
427
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
428
+ use_cache: Optional[bool] = None,
429
+ output_attentions: Optional[bool] = False,
430
+ output_hidden_states: Optional[bool] = False,
431
+ pixel_values_present: Optional[bool] = False,
432
+ return_dict: Optional[bool] = True,
433
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPast]:
434
+ if self.gradient_checkpointing and self.training:
435
+ if use_cache:
436
+ logger.warning_once(
437
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
438
+ )
439
+ use_cache = False
440
+
441
+ all_hidden_states = () if output_hidden_states else None
442
+ all_self_attentions = () if output_attentions else None
443
+
444
+ next_decoder_cache = () if use_cache else None
445
+ for i, layer_module in enumerate(self.layer):
446
+ if output_hidden_states:
447
+ all_hidden_states = all_hidden_states + (hidden_states,)
448
+
449
+ layer_head_mask = head_mask[i] if head_mask is not None else None
450
+ past_key_value = past_key_values[i] if past_key_values is not None else None
451
+
452
+ if self.gradient_checkpointing and self.training:
453
+
454
+ def create_custom_forward(module):
455
+ def custom_forward(*inputs):
456
+ return module(*inputs, past_key_value, output_attentions)
457
+
458
+ return custom_forward
459
+
460
+ layer_outputs = torch.utils.checkpoint.checkpoint(
461
+ create_custom_forward(layer_module),
462
+ hidden_states,
463
+ attention_mask,
464
+ layer_head_mask,
465
+ )
466
+ else:
467
+ layer_outputs = layer_module(
468
+ hidden_states,
469
+ attention_mask,
470
+ layer_head_mask,
471
+ past_key_value,
472
+ output_attentions,
473
+ pixel_values_present,
474
+ )
475
+
476
+ hidden_states = layer_outputs[0]
477
+ if use_cache:
478
+ next_decoder_cache += (layer_outputs[-1],)
479
+ if output_attentions:
480
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
481
+
482
+ if output_hidden_states:
483
+ all_hidden_states = all_hidden_states + (hidden_states,)
484
+
485
+ if not return_dict:
486
+ return tuple(
487
+ v
488
+ for v in [
489
+ hidden_states,
490
+ next_decoder_cache,
491
+ all_hidden_states,
492
+ all_self_attentions,
493
+ ]
494
+ if v is not None
495
+ )
496
+ return BaseModelOutputWithPast(
497
+ last_hidden_state=hidden_states,
498
+ past_key_values=next_decoder_cache,
499
+ hidden_states=all_hidden_states,
500
+ attentions=all_self_attentions,
501
+ )
502
+
503
+
504
+ class GitPreTrainedModel(PreTrainedModel):
505
+ """
506
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
507
+ models.
508
+ """
509
+
510
+ config_class = GitConfig
511
+ base_model_prefix = "git"
512
+ supports_gradient_checkpointing = True
513
+ _keys_to_ignore_on_load_missing = [r"position_ids"]
514
+
515
+ def _init_weights(self, module):
516
+ """Initialize the weights"""
517
+ if isinstance(module, GitVisionEmbeddings):
518
+ nn.init.normal_(module.class_embedding, mean=0.0, std=self.config.initializer_range)
519
+ nn.init.normal_(module.patch_embedding.weight, std=self.config.initializer_range)
520
+ nn.init.normal_(module.position_embedding.weight, std=self.config.initializer_range)
521
+ if isinstance(module, nn.Linear):
522
+ # Slightly different from the TF version which uses truncated_normal for initialization
523
+ # cf https://github.com/pytorch/pytorch/pull/5617
524
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
525
+ if module.bias is not None:
526
+ module.bias.data.zero_()
527
+ elif isinstance(module, nn.Embedding):
528
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
529
+ if module.padding_idx is not None:
530
+ module.weight.data[module.padding_idx].zero_()
531
+ elif isinstance(module, nn.LayerNorm):
532
+ module.bias.data.zero_()
533
+ module.weight.data.fill_(1.0)
534
+
535
+ def _set_gradient_checkpointing(self, module, value=False):
536
+ if isinstance(module, (GitEncoder, GitVisionEncoder)):
537
+ module.gradient_checkpointing = value
538
+
539
+
540
+ GIT_START_DOCSTRING = r"""
541
+
542
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
543
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
544
+ etc.)
545
+
546
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
547
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
548
+ and behavior.
549
+
550
+ Parameters:
551
+ config ([`GitConfig`]): Model configuration class with all the parameters of the model.
552
+ Initializing with a config file does not load the weights associated with the model, only the
553
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
554
+ """
555
+
556
+ GIT_INPUTS_DOCSTRING = r"""
557
+ Args:
558
+ input_ids (`torch.LongTensor` of shape `({0})`):
559
+ Indices of input sequence tokens in the vocabulary.
560
+
561
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
562
+ [`PreTrainedTokenizer.__call__`] for details.
563
+
564
+ [What are input IDs?](../glossary#input-ids)
565
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
566
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
567
+
568
+ - 1 for tokens that are **not masked**,
569
+ - 0 for tokens that are **masked**.
570
+
571
+ [What are attention masks?](../glossary#attention-mask)
572
+
573
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
574
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
575
+ config.max_position_embeddings - 1]`.
576
+
577
+ [What are position IDs?](../glossary#position-ids)
578
+
579
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
580
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
581
+ [`CLIPImageProcessor.__call__`] for details.
582
+
583
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
584
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
585
+
586
+ - 1 indicates the head is **not masked**,
587
+ - 0 indicates the head is **masked**.
588
+
589
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
590
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
591
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
592
+ model's internal embedding lookup matrix.
593
+ output_attentions (`bool`, *optional*):
594
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
595
+ tensors for more detail.
596
+ output_hidden_states (`bool`, *optional*):
597
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
598
+ more detail.
599
+ return_dict (`bool`, *optional*):
600
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
601
+ """
602
+
603
+
604
+ # Copied from transformers.models.clip.modeling_clip.CLIPVisionEmbeddings with CLIP->Git
605
+ class GitVisionEmbeddings(nn.Module):
606
+ def __init__(self, config: GitVisionConfig):
607
+ super().__init__()
608
+ self.config = config
609
+ self.embed_dim = config.hidden_size
610
+ self.image_size = config.image_size
611
+ self.patch_size = config.patch_size
612
+
613
+ self.class_embedding = nn.Parameter(torch.randn(self.embed_dim))
614
+
615
+ self.patch_embedding = nn.Conv2d(
616
+ in_channels=config.num_channels,
617
+ out_channels=self.embed_dim,
618
+ kernel_size=self.patch_size,
619
+ stride=self.patch_size,
620
+ bias=False,
621
+ )
622
+
623
+ self.num_patches = (self.image_size // self.patch_size) ** 2
624
+ self.num_positions = self.num_patches + 1
625
+ self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim)
626
+ self.register_buffer("position_ids", torch.arange(self.num_positions).expand((1, -1)))
627
+
628
+ def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
629
+ batch_size = pixel_values.shape[0]
630
+ patch_embeds = self.patch_embedding(pixel_values) # shape = [*, width, grid, grid]
631
+ patch_embeds = patch_embeds.flatten(2).transpose(1, 2)
632
+
633
+ class_embeds = self.class_embedding.expand(batch_size, 1, -1)
634
+ embeddings = torch.cat([class_embeds, patch_embeds], dim=1)
635
+ embeddings = embeddings + self.position_embedding(self.position_ids)
636
+ return embeddings
637
+
638
+
639
+ # Copied from transformers.models.clip.modeling_clip.CLIPMLP
640
+ class GitVisionMLP(nn.Module):
641
+ def __init__(self, config):
642
+ super().__init__()
643
+ self.config = config
644
+ self.activation_fn = ACT2FN[config.hidden_act]
645
+ self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
646
+ self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
647
+
648
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
649
+ hidden_states = self.fc1(hidden_states)
650
+ hidden_states = self.activation_fn(hidden_states)
651
+ hidden_states = self.fc2(hidden_states)
652
+ return hidden_states
653
+
654
+
655
+ # Copied from transformers.models.clip.modeling_clip.CLIPAttention
656
+ class GitVisionAttention(nn.Module):
657
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
658
+
659
+ def __init__(self, config):
660
+ super().__init__()
661
+ self.config = config
662
+ self.embed_dim = config.hidden_size
663
+ self.num_heads = config.num_attention_heads
664
+ self.head_dim = self.embed_dim // self.num_heads
665
+ if self.head_dim * self.num_heads != self.embed_dim:
666
+ raise ValueError(
667
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
668
+ f" {self.num_heads})."
669
+ )
670
+ self.scale = self.head_dim**-0.5
671
+ self.dropout = config.attention_dropout
672
+
673
+ self.k_proj = nn.Linear(self.embed_dim, self.embed_dim)
674
+ self.v_proj = nn.Linear(self.embed_dim, self.embed_dim)
675
+ self.q_proj = nn.Linear(self.embed_dim, self.embed_dim)
676
+ self.out_proj = nn.Linear(self.embed_dim, self.embed_dim)
677
+
678
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
679
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
680
+
681
+ def forward(
682
+ self,
683
+ hidden_states: torch.Tensor,
684
+ attention_mask: Optional[torch.Tensor] = None,
685
+ causal_attention_mask: Optional[torch.Tensor] = None,
686
+ output_attentions: Optional[bool] = False,
687
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
688
+ """Input shape: Batch x Time x Channel"""
689
+
690
+ bsz, tgt_len, embed_dim = hidden_states.size()
691
+
692
+ # get query proj
693
+ query_states = self.q_proj(hidden_states) * self.scale
694
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
695
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
696
+
697
+ proj_shape = (bsz * self.num_heads, -1, self.head_dim)
698
+ query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
699
+ key_states = key_states.view(*proj_shape)
700
+ value_states = value_states.view(*proj_shape)
701
+
702
+ src_len = key_states.size(1)
703
+ attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
704
+
705
+ if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
706
+ raise ValueError(
707
+ f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
708
+ f" {attn_weights.size()}"
709
+ )
710
+
711
+ # apply the causal_attention_mask first
712
+ if causal_attention_mask is not None:
713
+ if causal_attention_mask.size() != (bsz, 1, tgt_len, src_len):
714
+ raise ValueError(
715
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is"
716
+ f" {causal_attention_mask.size()}"
717
+ )
718
+ attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + causal_attention_mask
719
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
720
+
721
+ if attention_mask is not None:
722
+ if attention_mask.size() != (bsz, 1, tgt_len, src_len):
723
+ raise ValueError(
724
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
725
+ )
726
+ attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
727
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
728
+
729
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
730
+
731
+ if output_attentions:
732
+ # this operation is a bit akward, but it's required to
733
+ # make sure that attn_weights keeps its gradient.
734
+ # In order to do so, attn_weights have to reshaped
735
+ # twice and have to be reused in the following
736
+ attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
737
+ attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
738
+ else:
739
+ attn_weights_reshaped = None
740
+
741
+ attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
742
+
743
+ attn_output = torch.bmm(attn_probs, value_states)
744
+
745
+ if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
746
+ raise ValueError(
747
+ f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
748
+ f" {attn_output.size()}"
749
+ )
750
+
751
+ attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
752
+ attn_output = attn_output.transpose(1, 2)
753
+ attn_output = attn_output.reshape(bsz, tgt_len, embed_dim)
754
+
755
+ attn_output = self.out_proj(attn_output)
756
+
757
+ return attn_output, attn_weights_reshaped
758
+
759
+
760
+ # Copied from transformers.models.clip.modeling_clip.CLIPEncoderLayer with CLIP->GitVision
761
+ class GitVisionEncoderLayer(nn.Module):
762
+ def __init__(self, config: GitVisionConfig):
763
+ super().__init__()
764
+ self.embed_dim = config.hidden_size
765
+ self.self_attn = GitVisionAttention(config)
766
+ self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
767
+ self.mlp = GitVisionMLP(config)
768
+ self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
769
+
770
+ def forward(
771
+ self,
772
+ hidden_states: torch.Tensor,
773
+ attention_mask: torch.Tensor,
774
+ causal_attention_mask: torch.Tensor,
775
+ output_attentions: Optional[bool] = False,
776
+ ) -> Tuple[torch.FloatTensor]:
777
+ """
778
+ Args:
779
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
780
+ attention_mask (`torch.FloatTensor`): attention mask of size
781
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
782
+ `(config.encoder_attention_heads,)`.
783
+ output_attentions (`bool`, *optional*):
784
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
785
+ returned tensors for more detail.
786
+ """
787
+ residual = hidden_states
788
+
789
+ hidden_states = self.layer_norm1(hidden_states)
790
+ hidden_states, attn_weights = self.self_attn(
791
+ hidden_states=hidden_states,
792
+ attention_mask=attention_mask,
793
+ causal_attention_mask=causal_attention_mask,
794
+ output_attentions=output_attentions,
795
+ )
796
+ hidden_states = residual + hidden_states
797
+
798
+ residual = hidden_states
799
+ hidden_states = self.layer_norm2(hidden_states)
800
+ hidden_states = self.mlp(hidden_states)
801
+ hidden_states = residual + hidden_states
802
+
803
+ outputs = (hidden_states,)
804
+
805
+ if output_attentions:
806
+ outputs += (attn_weights,)
807
+
808
+ return outputs
809
+
810
+
811
+ # Copied from transformers.models.clip.modeling_clip.CLIPEncoder with CLIP->GitVision, CLIPConfig
812
+ class GitVisionEncoder(nn.Module):
813
+ """
814
+ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
815
+ [`GitVisionEncoderLayer`].
816
+
817
+ Args:
818
+ config: GitVisionConfig
819
+ """
820
+
821
+ def __init__(self, config: GitVisionConfig):
822
+ super().__init__()
823
+ self.config = config
824
+ self.layers = nn.ModuleList([GitVisionEncoderLayer(config) for _ in range(config.num_hidden_layers)])
825
+ self.gradient_checkpointing = False
826
+
827
+ def forward(
828
+ self,
829
+ inputs_embeds,
830
+ attention_mask: Optional[torch.Tensor] = None,
831
+ causal_attention_mask: Optional[torch.Tensor] = None,
832
+ output_attentions: Optional[bool] = None,
833
+ output_hidden_states: Optional[bool] = None,
834
+ return_dict: Optional[bool] = None,
835
+ ) -> Union[Tuple, BaseModelOutput]:
836
+ r"""
837
+ Args:
838
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
839
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
840
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
841
+ than the model's internal embedding lookup matrix.
842
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
843
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
844
+
845
+ - 1 for tokens that are **not masked**,
846
+ - 0 for tokens that are **masked**.
847
+
848
+ [What are attention masks?](../glossary#attention-mask)
849
+ causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
850
+ Causal mask for the text model. Mask values selected in `[0, 1]`:
851
+
852
+ - 1 for tokens that are **not masked**,
853
+ - 0 for tokens that are **masked**.
854
+
855
+ [What are attention masks?](../glossary#attention-mask)
856
+ output_attentions (`bool`, *optional*):
857
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
858
+ returned tensors for more detail.
859
+ output_hidden_states (`bool`, *optional*):
860
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
861
+ for more detail.
862
+ return_dict (`bool`, *optional*):
863
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
864
+ """
865
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
866
+ output_hidden_states = (
867
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
868
+ )
869
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
870
+
871
+ encoder_states = () if output_hidden_states else None
872
+ all_attentions = () if output_attentions else None
873
+
874
+ hidden_states = inputs_embeds
875
+ for idx, encoder_layer in enumerate(self.layers):
876
+ if output_hidden_states:
877
+ encoder_states = encoder_states + (hidden_states,)
878
+ if self.gradient_checkpointing and self.training:
879
+
880
+ def create_custom_forward(module):
881
+ def custom_forward(*inputs):
882
+ return module(*inputs, output_attentions)
883
+
884
+ return custom_forward
885
+
886
+ layer_outputs = torch.utils.checkpoint.checkpoint(
887
+ create_custom_forward(encoder_layer),
888
+ hidden_states,
889
+ attention_mask,
890
+ causal_attention_mask,
891
+ )
892
+ else:
893
+ layer_outputs = encoder_layer(
894
+ hidden_states,
895
+ attention_mask,
896
+ causal_attention_mask,
897
+ output_attentions=output_attentions,
898
+ )
899
+
900
+ hidden_states = layer_outputs[0]
901
+
902
+ if output_attentions:
903
+ all_attentions = all_attentions + (layer_outputs[1],)
904
+
905
+ if output_hidden_states:
906
+ encoder_states = encoder_states + (hidden_states,)
907
+
908
+ if not return_dict:
909
+ return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
910
+ return BaseModelOutput(
911
+ last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
912
+ )
913
+
914
+
915
+ GIT_VISION_INPUTS_DOCSTRING = r"""
916
+ Args:
917
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
918
+ Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
919
+ [`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details.
920
+ output_attentions (`bool`, *optional*):
921
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
922
+ tensors for more detail.
923
+ output_hidden_states (`bool`, *optional*):
924
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
925
+ more detail.
926
+ return_dict (`bool`, *optional*):
927
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
928
+ """
929
+
930
+
931
+ class GitVisionTransformer(nn.Module):
932
+ # Copied from transformers.models.clip.modeling_clip.CLIPVisionTransformer.__init__ with CLIPEncoder->GitVisionEncoder, CLIP->Git
933
+ def __init__(self, config: GitVisionConfig):
934
+ super().__init__()
935
+ self.config = config
936
+ embed_dim = config.hidden_size
937
+
938
+ self.embeddings = GitVisionEmbeddings(config)
939
+ self.pre_layrnorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
940
+ self.encoder = GitVisionEncoder(config)
941
+ self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
942
+
943
+ @add_start_docstrings_to_model_forward(GIT_VISION_INPUTS_DOCSTRING)
944
+ @replace_return_docstrings(output_type=BaseModelOutput, config_class=GitVisionConfig)
945
+ def forward(
946
+ self,
947
+ pixel_values: Optional[torch.FloatTensor] = None,
948
+ output_attentions: Optional[bool] = None,
949
+ output_hidden_states: Optional[bool] = None,
950
+ return_dict: Optional[bool] = None,
951
+ ) -> Union[Tuple, BaseModelOutput]:
952
+ r"""
953
+ Returns:
954
+
955
+ """
956
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
957
+ output_hidden_states = (
958
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
959
+ )
960
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
961
+
962
+ if pixel_values is None:
963
+ raise ValueError("You have to specify pixel_values")
964
+
965
+ hidden_states = self.embeddings(pixel_values)
966
+ hidden_states = self.pre_layrnorm(hidden_states)
967
+
968
+ encoder_outputs = self.encoder(
969
+ inputs_embeds=hidden_states,
970
+ output_attentions=output_attentions,
971
+ output_hidden_states=output_hidden_states,
972
+ return_dict=return_dict,
973
+ )
974
+
975
+ last_hidden_state = encoder_outputs[0]
976
+
977
+ last_hidden_state = self.post_layernorm(last_hidden_state)
978
+
979
+ if not return_dict:
980
+ return (last_hidden_state,) + encoder_outputs[1:]
981
+
982
+ return BaseModelOutput(
983
+ last_hidden_state=last_hidden_state,
984
+ hidden_states=encoder_outputs.hidden_states,
985
+ attentions=encoder_outputs.attentions,
986
+ )
987
+
988
+
989
+ @add_start_docstrings(
990
+ """The vision model from CLIP, used in GIT, without any head or projection on top.""",
991
+ GIT_START_DOCSTRING,
992
+ )
993
+ class GitVisionModel(GitPreTrainedModel):
994
+ config_class = GitVisionConfig
995
+ main_input_name = "pixel_values"
996
+
997
+ # Copied from transformers.models.clip.modeling_clip.CLIPVisionModel.__init__ with CLIP->Git
998
+ def __init__(self, config: GitVisionConfig):
999
+ super().__init__(config)
1000
+ self.vision_model = GitVisionTransformer(config)
1001
+ # Initialize weights and apply final processing
1002
+ self.post_init()
1003
+
1004
+ def get_input_embeddings(self) -> nn.Module:
1005
+ return self.vision_model.embeddings.patch_embedding
1006
+
1007
+ @add_start_docstrings_to_model_forward(GIT_VISION_INPUTS_DOCSTRING)
1008
+ @replace_return_docstrings(output_type=BaseModelOutput, config_class=GitVisionConfig)
1009
+ def forward(
1010
+ self,
1011
+ pixel_values: Optional[torch.FloatTensor] = None,
1012
+ output_attentions: Optional[bool] = None,
1013
+ output_hidden_states: Optional[bool] = None,
1014
+ return_dict: Optional[bool] = None,
1015
+ ) -> Union[Tuple, BaseModelOutput]:
1016
+ r"""
1017
+ Returns:
1018
+
1019
+ Examples:
1020
+
1021
+ ```python
1022
+ >>> from PIL import Image
1023
+ >>> import requests
1024
+ >>> from transformers import AutoProcessor, GitVisionModel
1025
+
1026
+ >>> processor = AutoProcessor.from_pretrained("microsoft/git-large")
1027
+ >>> model = GitVisionModel.from_pretrained("microsoft/git-large")
1028
+
1029
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1030
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1031
+
1032
+ >>> inputs = processor(images=image, return_tensors="pt")
1033
+
1034
+ >>> outputs = model(**inputs)
1035
+ >>> last_hidden_state = outputs.last_hidden_state
1036
+ ```"""
1037
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1038
+
1039
+ return self.vision_model(
1040
+ pixel_values=pixel_values,
1041
+ output_attentions=output_attentions,
1042
+ output_hidden_states=output_hidden_states,
1043
+ return_dict=return_dict,
1044
+ )
1045
+
1046
+
1047
+ class GitProjection(nn.Module):
1048
+ def __init__(self, config: GitConfig):
1049
+ super().__init__()
1050
+ self.config = config
1051
+ self.visual_projection = nn.Sequential(
1052
+ nn.Linear(config.vision_config.hidden_size, config.hidden_size),
1053
+ nn.LayerNorm(config.hidden_size, eps=config.vision_config.layer_norm_eps),
1054
+ )
1055
+
1056
+ def forward(self, embeddings: torch.Tensor) -> torch.Tensor:
1057
+ return self.visual_projection(embeddings)
1058
+
1059
+
1060
+ @add_start_docstrings(
1061
+ "The bare GIT Model transformer consisting of a CLIP image encoder and text decoder outputting raw hidden-states"
1062
+ " without any specific head on top.",
1063
+ GIT_START_DOCSTRING,
1064
+ )
1065
+ class GitModel(GitPreTrainedModel):
1066
+ def __init__(self, config):
1067
+ super().__init__(config)
1068
+ self.config = config
1069
+
1070
+ self.embeddings = GitEmbeddings(config)
1071
+ self.image_encoder = GitVisionModel(config.vision_config)
1072
+ self.encoder = GitEncoder(config)
1073
+
1074
+ self.visual_projection = GitProjection(config)
1075
+
1076
+ if config.num_image_with_embedding is not None:
1077
+ self.img_temperal_embedding = nn.ParameterList(
1078
+ nn.Parameter(torch.zeros(1, 1, config.vision_config.hidden_size))
1079
+ for _ in range(config.num_image_with_embedding)
1080
+ )
1081
+
1082
+ # Initialize weights and apply final processing
1083
+ self.post_init()
1084
+
1085
+ def get_input_embeddings(self):
1086
+ return self.embeddings.word_embeddings
1087
+
1088
+ def set_input_embeddings(self, value):
1089
+ self.embeddings.word_embeddings = value
1090
+
1091
+ def _prune_heads(self, heads_to_prune):
1092
+ """
1093
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
1094
+ class PreTrainedModel
1095
+ """
1096
+ for layer, heads in heads_to_prune.items():
1097
+ self.encoder.layer[layer].attention.prune_heads(heads)
1098
+
1099
+ def _generate_future_mask(self, size: int, dtype: torch.dtype, device: torch.device) -> torch.Tensor:
1100
+ # Default mask is for forward direction. Flip for backward direction.
1101
+ mask = torch.triu(torch.ones(size, size, device=device, dtype=dtype), diagonal=1)
1102
+ mask = mask.masked_fill(mask == 1, float("-inf"))
1103
+ return mask
1104
+
1105
+ def create_attention_mask(self, tgt, memory, tgt_mask, past_key_values_length, memory_key_padding_mask=None):
1106
+ num_tgt = tgt.shape[1]
1107
+ num_memory = memory.shape[1]
1108
+ device = tgt.device
1109
+ dtype = tgt.dtype
1110
+ top_left = torch.zeros((num_memory, num_memory), device=device, dtype=dtype)
1111
+ top_right = torch.full(
1112
+ (num_memory, num_tgt + past_key_values_length),
1113
+ float("-inf"),
1114
+ device=tgt.device,
1115
+ dtype=dtype,
1116
+ )
1117
+ bottom_left = torch.zeros(
1118
+ (num_tgt, num_memory),
1119
+ dtype=dtype,
1120
+ device=tgt_mask.device,
1121
+ )
1122
+
1123
+ if past_key_values_length > 0:
1124
+ tgt_mask = torch.zeros(
1125
+ (tgt_mask.shape[0], tgt_mask.shape[0] + past_key_values_length),
1126
+ dtype=dtype,
1127
+ device=tgt_mask.device,
1128
+ )
1129
+
1130
+ left = torch.cat((top_left, bottom_left), dim=0)
1131
+ right = torch.cat((top_right, tgt_mask.to(dtype)), dim=0)
1132
+
1133
+ full_attention_mask = torch.cat((left, right), dim=1)[None, :]
1134
+
1135
+ if memory_key_padding_mask is None:
1136
+ memory_key_padding_mask = torch.full((memory.shape[0], memory.shape[1]), fill_value=False, device=device)
1137
+ # if it is False, it means valid. That is, it is not a padding
1138
+ if memory_key_padding_mask.dtype != torch.bool:
1139
+ raise ValueError("Memory key padding mask must be a boolean tensor.")
1140
+ zero_negative_infinity = torch.zeros_like(memory_key_padding_mask, dtype=tgt.dtype)
1141
+ zero_negative_infinity[memory_key_padding_mask] = float("-inf")
1142
+ full_attention_mask = full_attention_mask.expand(
1143
+ (memory_key_padding_mask.shape[0], num_memory + num_tgt, num_memory + past_key_values_length + num_tgt)
1144
+ )
1145
+ full_attention_mask = full_attention_mask.clone()
1146
+ origin_left = full_attention_mask[:, :, :num_memory]
1147
+ update = zero_negative_infinity[:, None, :]
1148
+ full_attention_mask[:, :, :num_memory] = origin_left + update
1149
+
1150
+ # add axis for multi-head
1151
+ full_attention_mask = full_attention_mask[:, None, :, :]
1152
+
1153
+ return full_attention_mask
1154
+
1155
+ @add_start_docstrings_to_model_forward(GIT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1156
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC)
1157
+ def forward(
1158
+ self,
1159
+ input_ids: Optional[torch.Tensor] = None,
1160
+ attention_mask: Optional[torch.Tensor] = None,
1161
+ position_ids: Optional[torch.Tensor] = None,
1162
+ pixel_values: Optional[torch.Tensor] = None,
1163
+ head_mask: Optional[torch.Tensor] = None,
1164
+ inputs_embeds: Optional[torch.Tensor] = None,
1165
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1166
+ use_cache: Optional[bool] = None,
1167
+ output_attentions: Optional[bool] = None,
1168
+ output_hidden_states: Optional[bool] = None,
1169
+ return_dict: Optional[bool] = None,
1170
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPooling]:
1171
+ r"""
1172
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
1173
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
1174
+
1175
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
1176
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
1177
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
1178
+ use_cache (`bool`, *optional*):
1179
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
1180
+ `past_key_values`).
1181
+
1182
+ Returns:
1183
+
1184
+ Examples:
1185
+
1186
+ ```python
1187
+ >>> from transformers import AutoProcessor, AutoModel
1188
+ >>> import requests
1189
+ >>> from PIL import Image
1190
+
1191
+ >>> processor = AutoProcessor.from_pretrained("microsoft/git-large")
1192
+ >>> model = AutoModel.from_pretrained("microsoft/git-large")
1193
+
1194
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1195
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1196
+
1197
+ >>> text = "this is an image of two cats"
1198
+
1199
+ >>> inputs = processor(text, images=image, return_tensors="pt")
1200
+
1201
+ >>> outputs = model(**inputs)
1202
+ >>> last_hidden_state = outputs.last_hidden_state
1203
+ ```"""
1204
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1205
+ output_hidden_states = (
1206
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1207
+ )
1208
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
1209
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1210
+
1211
+ if input_ids is not None and inputs_embeds is not None:
1212
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
1213
+ elif input_ids is not None:
1214
+ input_shape = input_ids.size()
1215
+ elif inputs_embeds is not None:
1216
+ input_shape = inputs_embeds.size()[:-1]
1217
+ else:
1218
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
1219
+
1220
+ seq_length = input_shape[1]
1221
+
1222
+ # past_key_values_length
1223
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
1224
+
1225
+ # Prepare head mask if needed
1226
+ # 1.0 in head_mask indicate we keep the head
1227
+ # attention_probs has shape bsz x n_heads x N x N
1228
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
1229
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
1230
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
1231
+
1232
+ projected_visual_features = None
1233
+
1234
+ # print(input_ids)
1235
+ if pixel_values is not None:
1236
+ if pixel_values.ndim == 4:
1237
+ # here we assume pixel_values is of shape (batch_size, num_channels, height, width)
1238
+ visual_features = self.image_encoder(pixel_values).last_hidden_state
1239
+
1240
+ elif pixel_values.ndim == 5:
1241
+ # here we assume pixel_values is of shape (batch_size, num_frames, num_channels, height, width)
1242
+ visual_features = []
1243
+ for frame_idx in range(pixel_values.shape[1]):
1244
+ visual_features_frame = self.image_encoder(pixel_values[:, frame_idx, :, :]).last_hidden_state
1245
+ visual_features_frame += self.img_temperal_embedding[frame_idx]
1246
+ visual_features.append(visual_features_frame)
1247
+
1248
+ # finally, concatenate all features along sequence dimension
1249
+ visual_features = torch.cat(visual_features, dim=1)
1250
+
1251
+ else:
1252
+ raise ValueError("pixel_values must be of rank 4 or 5")
1253
+
1254
+ projected_visual_features = self.visual_projection(visual_features)
1255
+
1256
+ embedding_output = self.embeddings(
1257
+ input_ids=input_ids,
1258
+ position_ids=position_ids,
1259
+ inputs_embeds=inputs_embeds,
1260
+ past_key_values_length=past_key_values_length,
1261
+ )
1262
+
1263
+ if projected_visual_features is None:
1264
+ projected_visual_features = torch.zeros(
1265
+ (embedding_output.shape[0], 0, embedding_output.shape[2]),
1266
+ dtype=embedding_output.dtype,
1267
+ device=embedding_output.device,
1268
+ )
1269
+
1270
+ # Repeat visual features to match embedding batch size.
1271
+ projected_visual_features = projected_visual_features.repeat(
1272
+ embedding_output.size(0) // projected_visual_features.size(0), 1, 1
1273
+ )
1274
+
1275
+ # concatenate patch token and text token embeddings
1276
+ hidden_states = torch.cat((projected_visual_features, embedding_output), dim=1)
1277
+
1278
+ # By default, an additive causal mask is created
1279
+ # for masking the future (one direction).
1280
+ tgt_mask = self._generate_future_mask(seq_length, embedding_output.dtype, embedding_output.device)
1281
+
1282
+ # Create an attention mask of shape (batch_size, 1, tgt_seq_len, src_seq_len)
1283
+ combined_attention_mask = self.create_attention_mask(
1284
+ tgt=embedding_output,
1285
+ memory=projected_visual_features,
1286
+ tgt_mask=tgt_mask,
1287
+ past_key_values_length=past_key_values_length,
1288
+ )
1289
+
1290
+ if attention_mask is not None:
1291
+ # if the user provides an attention mask, we add it to the default one
1292
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
1293
+ expanded_attn_mask = _expand_mask(attention_mask, embedding_output.dtype, tgt_len=input_shape[-1]).to(
1294
+ embedding_output.device
1295
+ )
1296
+ if past_key_values_length > 0:
1297
+ expanded_attn_mask = expanded_attn_mask[:, :, -past_key_values_length:, :]
1298
+ else:
1299
+ combined_attention_mask[:, :, -input_shape[1] :, -input_shape[1] :] += expanded_attn_mask
1300
+
1301
+ encoder_outputs = self.encoder(
1302
+ hidden_states,
1303
+ attention_mask=combined_attention_mask,
1304
+ head_mask=head_mask,
1305
+ past_key_values=past_key_values,
1306
+ use_cache=use_cache,
1307
+ output_attentions=output_attentions,
1308
+ output_hidden_states=output_hidden_states,
1309
+ return_dict=return_dict,
1310
+ pixel_values_present=pixel_values is not None,
1311
+ )
1312
+ sequence_output = encoder_outputs[0]
1313
+
1314
+ if not return_dict:
1315
+ return (sequence_output,) + encoder_outputs[1:]
1316
+
1317
+ return BaseModelOutputWithPast(
1318
+ last_hidden_state=sequence_output,
1319
+ past_key_values=encoder_outputs.past_key_values,
1320
+ hidden_states=encoder_outputs.hidden_states,
1321
+ attentions=encoder_outputs.attentions,
1322
+ )
1323
+
1324
+
1325
+ @add_start_docstrings(
1326
+ """GIT Model with a `language modeling` head on top for autoregressive language modeling.""", GIT_START_DOCSTRING
1327
+ )
1328
+ class GitForCausalLM(GitPreTrainedModel):
1329
+ def __init__(self, config):
1330
+ super().__init__(config)
1331
+
1332
+ self.git = GitModel(config)
1333
+ self.output = nn.Linear(config.hidden_size, config.vocab_size)
1334
+
1335
+ # Initialize weights and apply final processing
1336
+ self.post_init()
1337
+
1338
+ def get_output_embeddings(self):
1339
+ return self.output
1340
+
1341
+ def set_output_embeddings(self, new_embeddings):
1342
+ self.output = new_embeddings
1343
+
1344
+ @add_start_docstrings_to_model_forward(GIT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1345
+ @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
1346
+ def forward(
1347
+ self,
1348
+ input_ids: Optional[torch.Tensor] = None,
1349
+ attention_mask: Optional[torch.Tensor] = None,
1350
+ position_ids: Optional[torch.Tensor] = None,
1351
+ pixel_values: Optional[torch.Tensor] = None,
1352
+ head_mask: Optional[torch.Tensor] = None,
1353
+ inputs_embeds: Optional[torch.Tensor] = None,
1354
+ labels: Optional[torch.Tensor] = None,
1355
+ past_key_values: Optional[List[torch.Tensor]] = None,
1356
+ use_cache: Optional[bool] = None,
1357
+ output_attentions: Optional[bool] = None,
1358
+ output_hidden_states: Optional[bool] = None,
1359
+ return_dict: Optional[bool] = None,
1360
+ ) -> Union[Tuple[torch.Tensor], CausalLMOutputWithPast]:
1361
+ r"""
1362
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1363
+ Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
1364
+ `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are
1365
+ ignored (masked), the loss is only computed for the tokens with labels n `[0, ..., config.vocab_size]`
1366
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
1367
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
1368
+
1369
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
1370
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
1371
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
1372
+ use_cache (`bool`, *optional*):
1373
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
1374
+ `past_key_values`).
1375
+
1376
+ Returns:
1377
+
1378
+ Examples:
1379
+
1380
+ Image captioning example:
1381
+
1382
+ ```python
1383
+ >>> from transformers import AutoProcessor, AutoModelForCausalLM
1384
+ >>> import requests
1385
+ >>> from PIL import Image
1386
+
1387
+ >>> processor = AutoProcessor.from_pretrained("microsoft/git-large-coco")
1388
+ >>> model = AutoModelForCausalLM.from_pretrained("microsoft/git-large-coco")
1389
+
1390
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1391
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1392
+
1393
+ >>> pixel_values = processor(images=image, return_tensors="pt").pixel_values
1394
+
1395
+ >>> generated_ids = model.generate(pixel_values=pixel_values, max_length=50)
1396
+ >>> generated_caption = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
1397
+ >>> print(generated_caption)
1398
+ two cats sleeping on a pink blanket next to remotes.
1399
+ ```
1400
+
1401
+ Visual question answering (VQA) example:
1402
+
1403
+ ```python
1404
+ >>> from transformers import AutoProcessor, AutoModelForCausalLM
1405
+ >>> from huggingface_hub import hf_hub_download
1406
+ >>> from PIL import Image
1407
+
1408
+ >>> processor = AutoProcessor.from_pretrained("microsoft/git-large-textvqa")
1409
+ >>> model = AutoModelForCausalLM.from_pretrained("microsoft/git-large-textvqa")
1410
+
1411
+ >>> file_path = hf_hub_download(repo_id="nielsr/textvqa-sample", filename="bus.png", repo_type="dataset")
1412
+ >>> image = Image.open(file_path).convert("RGB")
1413
+
1414
+ >>> pixel_values = processor(images=image, return_tensors="pt").pixel_values
1415
+
1416
+ >>> question = "what does the front of the bus say at the top?"
1417
+
1418
+ >>> input_ids = processor(text=question, add_special_tokens=False).input_ids
1419
+ >>> input_ids = [processor.tokenizer.cls_token_id] + input_ids
1420
+ >>> input_ids = torch.tensor(input_ids).unsqueeze(0)
1421
+
1422
+ >>> generated_ids = model.generate(pixel_values=pixel_values, input_ids=input_ids, max_length=50)
1423
+ >>> print(processor.batch_decode(generated_ids, skip_special_tokens=True))
1424
+ ['what does the front of the bus say at the top? special']
1425
+ ```
1426
+
1427
+ Video captioning example:
1428
+
1429
+ ```python
1430
+ >>> import av
1431
+ >>> import numpy as np
1432
+ >>> from PIL import Image
1433
+ >>> from huggingface_hub import hf_hub_download
1434
+ >>> from transformers import AutoProcessor, AutoModelForCausalLM
1435
+
1436
+ >>> processor = AutoProcessor.from_pretrained("microsoft/git-large-vatex")
1437
+ >>> model = AutoModelForCausalLM.from_pretrained("microsoft/git-large-vatex")
1438
+
1439
+ >>> # set seed for reproducability
1440
+ >>> np.random.seed(45)
1441
+
1442
+
1443
+ >>> def read_video_pyav(container, indices):
1444
+ ... '''
1445
+ ... Decode the video with PyAV decoder.
1446
+ ... Args:
1447
+ ... container (`av.container.input.InputContainer`): PyAV container.
1448
+ ... indices (`List[int]`): List of frame indices to decode.
1449
+ ... Returns:
1450
+ ... result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3).
1451
+ ... '''
1452
+ ... frames = []
1453
+ ... container.seek(0)
1454
+ ... start_index = indices[0]
1455
+ ... end_index = indices[-1]
1456
+ ... for i, frame in enumerate(container.decode(video=0)):
1457
+ ... if i > end_index:
1458
+ ... break
1459
+ ... if i >= start_index and i in indices:
1460
+ ... frames.append(frame)
1461
+ ... return np.stack([x.to_ndarray(format="rgb24") for x in frames])
1462
+
1463
+
1464
+ >>> def sample_frame_indices(clip_len, frame_sample_rate, seg_len):
1465
+ ... converted_len = int(clip_len * frame_sample_rate)
1466
+ ... end_idx = np.random.randint(converted_len, seg_len)
1467
+ ... start_idx = end_idx - converted_len
1468
+ ... indices = np.linspace(start_idx, end_idx, num=clip_len)
1469
+ ... indices = np.clip(indices, start_idx, end_idx - 1).astype(np.int64)
1470
+ ... return indices
1471
+
1472
+
1473
+ >>> # load video
1474
+ >>> file_path = hf_hub_download(
1475
+ ... repo_id="nielsr/video-demo", filename="eating_spaghetti.mp4", repo_type="dataset"
1476
+ ... )
1477
+ >>> container = av.open(file_path)
1478
+
1479
+ >>> # sample frames
1480
+ >>> num_frames = model.config.num_image_with_embedding
1481
+ >>> indices = sample_frame_indices(
1482
+ ... clip_len=num_frames, frame_sample_rate=4, seg_len=container.streams.video[0].frames
1483
+ ... )
1484
+ >>> frames = read_video_pyav(container, indices)
1485
+
1486
+ >>> pixel_values = processor(images=list(frames), return_tensors="pt").pixel_values
1487
+
1488
+ >>> generated_ids = model.generate(pixel_values=pixel_values, max_length=50)
1489
+
1490
+ >>> print("Generated caption:", processor.batch_decode(generated_ids, skip_special_tokens=True))
1491
+ Generated caption: ['a woman is sitting at a table and she is talking about the food she is holding.']
1492
+ ```
1493
+ """
1494
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1495
+ if labels is not None:
1496
+ use_cache = False
1497
+
1498
+ outputs = self.git(
1499
+ input_ids,
1500
+ attention_mask=attention_mask,
1501
+ position_ids=position_ids,
1502
+ pixel_values=pixel_values,
1503
+ head_mask=head_mask,
1504
+ inputs_embeds=inputs_embeds,
1505
+ past_key_values=past_key_values,
1506
+ use_cache=use_cache,
1507
+ output_attentions=output_attentions,
1508
+ output_hidden_states=output_hidden_states,
1509
+ return_dict=return_dict,
1510
+ )
1511
+
1512
+ sequence_output = outputs[0]
1513
+ logits = self.output(sequence_output)
1514
+
1515
+ loss = None
1516
+ if labels is not None:
1517
+ # we are doing next-token prediction; shift prediction scores and input ids by one
1518
+ num_image_tokens = self.git.encoder.layer[0].attention.self.image_patch_tokens
1519
+ shifted_logits = logits[:, num_image_tokens:-1, :].contiguous()
1520
+ labels = labels[:, 1:].contiguous()
1521
+ loss_fct = CrossEntropyLoss()
1522
+ loss = loss_fct(shifted_logits.view(-1, self.config.vocab_size), labels.view(-1))
1523
+
1524
+ if not return_dict:
1525
+ output = (logits,) + outputs[1:]
1526
+ return ((loss,) + output) if loss is not None else output
1527
+
1528
+ return CausalLMOutputWithPast(
1529
+ loss=loss,
1530
+ logits=logits,
1531
+ past_key_values=outputs.past_key_values,
1532
+ hidden_states=outputs.hidden_states,
1533
+ attentions=outputs.attentions,
1534
+ )
1535
+
1536
+ def prepare_inputs_for_generation(
1537
+ self, input_ids, past_key_values=None, attention_mask=None, use_cache=None, **kwargs
1538
+ ):
1539
+ # cut decoder_input_ids if past_key_values is used
1540
+ if past_key_values is not None:
1541
+ input_ids = input_ids[:, -1:]
1542
+
1543
+ # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
1544
+ input_shape = input_ids.shape
1545
+ if attention_mask is None:
1546
+ attention_mask = input_ids.new_ones(input_shape)
1547
+
1548
+ return {
1549
+ "input_ids": input_ids,
1550
+ "attention_mask": attention_mask,
1551
+ "pixel_values": kwargs.get("pixel_values", None),
1552
+ "past_key_values": past_key_values,
1553
+ "use_cache": use_cache,
1554
+ }
1555
+
1556
+ def _reorder_cache(self, past_key_values, beam_idx):
1557
+ reordered_past = ()
1558
+ for layer_past in past_key_values:
1559
+ reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)
1560
+ return reordered_past
1561
+
1562
+
1563
+
1564
+ ### MOD
1565
+
1566
+
1567
+ class GitForCausalLMClipEmb(GitPreTrainedModel):
1568
+ def __init__(self, config):
1569
+ super().__init__(config)
1570
+
1571
+ self.git = GitModelClipEmb(config)
1572
+ self.output = nn.Linear(config.hidden_size, config.vocab_size)
1573
+
1574
+ # Initialize weights and apply final processing
1575
+ self.post_init()
1576
+
1577
+ def get_output_embeddings(self):
1578
+ return self.output
1579
+
1580
+ def set_output_embeddings(self, new_embeddings):
1581
+ self.output = new_embeddings
1582
+
1583
+ @add_start_docstrings_to_model_forward(GIT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1584
+ @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
1585
+ def forward(
1586
+ self,
1587
+ input_ids: Optional[torch.Tensor] = None,
1588
+ attention_mask: Optional[torch.Tensor] = None,
1589
+ position_ids: Optional[torch.Tensor] = None,
1590
+ pixel_values: Optional[torch.Tensor] = None,
1591
+ head_mask: Optional[torch.Tensor] = None,
1592
+ inputs_embeds: Optional[torch.Tensor] = None,
1593
+ labels: Optional[torch.Tensor] = None,
1594
+ past_key_values: Optional[List[torch.Tensor]] = None,
1595
+ use_cache: Optional[bool] = None,
1596
+ output_attentions: Optional[bool] = None,
1597
+ output_hidden_states: Optional[bool] = None,
1598
+ return_dict: Optional[bool] = None,
1599
+ ) -> Union[Tuple[torch.Tensor], CausalLMOutputWithPast]:
1600
+ r"""
1601
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1602
+ Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
1603
+ `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are
1604
+ ignored (masked), the loss is only computed for the tokens with labels n `[0, ..., config.vocab_size]`
1605
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
1606
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
1607
+
1608
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
1609
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
1610
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
1611
+ use_cache (`bool`, *optional*):
1612
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
1613
+ `past_key_values`).
1614
+
1615
+ Returns:
1616
+
1617
+ Examples:
1618
+
1619
+ Image captioning example:
1620
+
1621
+ ```python
1622
+ >>> from transformers import AutoProcessor, AutoModelForCausalLM
1623
+ >>> import requests
1624
+ >>> from PIL import Image
1625
+
1626
+ >>> processor = AutoProcessor.from_pretrained("microsoft/git-large-coco")
1627
+ >>> model = AutoModelForCausalLM.from_pretrained("microsoft/git-large-coco")
1628
+
1629
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1630
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1631
+
1632
+ >>> pixel_values = processor(images=image, return_tensors="pt").pixel_values
1633
+
1634
+ >>> generated_ids = model.generate(pixel_values=pixel_values, max_length=50)
1635
+ >>> generated_caption = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
1636
+ >>> print(generated_caption)
1637
+ two cats sleeping on a pink blanket next to remotes.
1638
+ ```
1639
+
1640
+ Visual question answering (VQA) example:
1641
+
1642
+ ```python
1643
+ >>> from transformers import AutoProcessor, AutoModelForCausalLM
1644
+ >>> from huggingface_hub import hf_hub_download
1645
+ >>> from PIL import Image
1646
+
1647
+ >>> processor = AutoProcessor.from_pretrained("microsoft/git-large-textvqa")
1648
+ >>> model = AutoModelForCausalLM.from_pretrained("microsoft/git-large-textvqa")
1649
+
1650
+ >>> file_path = hf_hub_download(repo_id="nielsr/textvqa-sample", filename="bus.png", repo_type="dataset")
1651
+ >>> image = Image.open(file_path).convert("RGB")
1652
+
1653
+ >>> pixel_values = processor(images=image, return_tensors="pt").pixel_values
1654
+
1655
+ >>> question = "what does the front of the bus say at the top?"
1656
+
1657
+ >>> input_ids = processor(text=question, add_special_tokens=False).input_ids
1658
+ >>> input_ids = [processor.tokenizer.cls_token_id] + input_ids
1659
+ >>> input_ids = torch.tensor(input_ids).unsqueeze(0)
1660
+
1661
+ >>> generated_ids = model.generate(pixel_values=pixel_values, input_ids=input_ids, max_length=50)
1662
+ >>> print(processor.batch_decode(generated_ids, skip_special_tokens=True))
1663
+ ['what does the front of the bus say at the top? special']
1664
+ ```
1665
+
1666
+ Video captioning example:
1667
+
1668
+ ```python
1669
+ >>> import av
1670
+ >>> import numpy as np
1671
+ >>> from PIL import Image
1672
+ >>> from huggingface_hub import hf_hub_download
1673
+ >>> from transformers import AutoProcessor, AutoModelForCausalLM
1674
+
1675
+ >>> processor = AutoProcessor.from_pretrained("microsoft/git-large-vatex")
1676
+ >>> model = AutoModelForCausalLM.from_pretrained("microsoft/git-large-vatex")
1677
+
1678
+ >>> # set seed for reproducability
1679
+ >>> np.random.seed(45)
1680
+
1681
+
1682
+ >>> def read_video_pyav(container, indices):
1683
+ ... '''
1684
+ ... Decode the video with PyAV decoder.
1685
+ ... Args:
1686
+ ... container (`av.container.input.InputContainer`): PyAV container.
1687
+ ... indices (`List[int]`): List of frame indices to decode.
1688
+ ... Returns:
1689
+ ... result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3).
1690
+ ... '''
1691
+ ... frames = []
1692
+ ... container.seek(0)
1693
+ ... start_index = indices[0]
1694
+ ... end_index = indices[-1]
1695
+ ... for i, frame in enumerate(container.decode(video=0)):
1696
+ ... if i > end_index:
1697
+ ... break
1698
+ ... if i >= start_index and i in indices:
1699
+ ... frames.append(frame)
1700
+ ... return np.stack([x.to_ndarray(format="rgb24") for x in frames])
1701
+
1702
+
1703
+ >>> def sample_frame_indices(clip_len, frame_sample_rate, seg_len):
1704
+ ... converted_len = int(clip_len * frame_sample_rate)
1705
+ ... end_idx = np.random.randint(converted_len, seg_len)
1706
+ ... start_idx = end_idx - converted_len
1707
+ ... indices = np.linspace(start_idx, end_idx, num=clip_len)
1708
+ ... indices = np.clip(indices, start_idx, end_idx - 1).astype(np.int64)
1709
+ ... return indices
1710
+
1711
+
1712
+ >>> # load video
1713
+ >>> file_path = hf_hub_download(
1714
+ ... repo_id="nielsr/video-demo", filename="eating_spaghetti.mp4", repo_type="dataset"
1715
+ ... )
1716
+ >>> container = av.open(file_path)
1717
+
1718
+ >>> # sample frames
1719
+ >>> num_frames = model.config.num_image_with_embedding
1720
+ >>> indices = sample_frame_indices(
1721
+ ... clip_len=num_frames, frame_sample_rate=4, seg_len=container.streams.video[0].frames
1722
+ ... )
1723
+ >>> frames = read_video_pyav(container, indices)
1724
+
1725
+ >>> pixel_values = processor(images=list(frames), return_tensors="pt").pixel_values
1726
+
1727
+ >>> generated_ids = model.generate(pixel_values=pixel_values, max_length=50)
1728
+
1729
+ >>> print("Generated caption:", processor.batch_decode(generated_ids, skip_special_tokens=True))
1730
+ Generated caption: ['a woman is sitting at a table and she is talking about the food she is holding.']
1731
+ ```
1732
+ """
1733
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1734
+ if labels is not None:
1735
+ use_cache = False
1736
+
1737
+ outputs = self.git(
1738
+ input_ids,
1739
+ attention_mask=attention_mask,
1740
+ position_ids=position_ids,
1741
+ pixel_values=pixel_values,
1742
+ head_mask=head_mask,
1743
+ inputs_embeds=inputs_embeds,
1744
+ past_key_values=past_key_values,
1745
+ use_cache=use_cache,
1746
+ output_attentions=output_attentions,
1747
+ output_hidden_states=output_hidden_states,
1748
+ return_dict=return_dict,
1749
+ )
1750
+
1751
+ sequence_output = outputs[0]
1752
+ logits = self.output(sequence_output)
1753
+
1754
+ loss = None
1755
+ if labels is not None:
1756
+ # we are doing next-token prediction; shift prediction scores and input ids by one
1757
+ num_image_tokens = self.git.encoder.layer[0].attention.self.image_patch_tokens
1758
+ shifted_logits = logits[:, num_image_tokens:-1, :].contiguous()
1759
+ labels = labels[:, 1:].contiguous()
1760
+ loss_fct = CrossEntropyLoss()
1761
+ loss = loss_fct(shifted_logits.view(-1, self.config.vocab_size), labels.view(-1))
1762
+
1763
+ if not return_dict:
1764
+ output = (logits,) + outputs[1:]
1765
+ return ((loss,) + output) if loss is not None else output
1766
+
1767
+ return CausalLMOutputWithPast(
1768
+ loss=loss,
1769
+ logits=logits,
1770
+ past_key_values=outputs.past_key_values,
1771
+ hidden_states=outputs.hidden_states,
1772
+ attentions=outputs.attentions,
1773
+ )
1774
+
1775
+ def prepare_inputs_for_generation(
1776
+ self, input_ids, past_key_values=None, attention_mask=None, use_cache=None, **kwargs
1777
+ ):
1778
+ # cut decoder_input_ids if past_key_values is used
1779
+ if past_key_values is not None:
1780
+ input_ids = input_ids[:, -1:]
1781
+
1782
+ # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
1783
+ input_shape = input_ids.shape
1784
+ if attention_mask is None:
1785
+ attention_mask = input_ids.new_ones(input_shape)
1786
+
1787
+ return {
1788
+ "input_ids": input_ids,
1789
+ "attention_mask": attention_mask,
1790
+ "pixel_values": kwargs.get("pixel_values", None),
1791
+ "past_key_values": past_key_values,
1792
+ "use_cache": use_cache,
1793
+ }
1794
+
1795
+ def _reorder_cache(self, past_key_values, beam_idx):
1796
+ reordered_past = ()
1797
+ for layer_past in past_key_values:
1798
+ reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)
1799
+ return reordered_past
1800
+
1801
+
1802
+ class GitModelClipEmb(GitPreTrainedModel):
1803
+ def __init__(self, config):
1804
+ super().__init__(config)
1805
+ self.config = config
1806
+
1807
+ self.embeddings = GitEmbeddings(config)
1808
+ self.image_encoder = GitVisionModel(config.vision_config)
1809
+ self.encoder = GitEncoder(config)
1810
+
1811
+ self.visual_projection = GitProjection(config)
1812
+
1813
+ if config.num_image_with_embedding is not None:
1814
+ self.img_temperal_embedding = nn.ParameterList(
1815
+ nn.Parameter(torch.zeros(1, 1, config.vision_config.hidden_size))
1816
+ for _ in range(config.num_image_with_embedding)
1817
+ )
1818
+
1819
+ # Initialize weights and apply final processing
1820
+ self.post_init()
1821
+
1822
+ def get_input_embeddings(self):
1823
+ return self.embeddings.word_embeddings
1824
+
1825
+ def set_input_embeddings(self, value):
1826
+ self.embeddings.word_embeddings = value
1827
+
1828
+ def _prune_heads(self, heads_to_prune):
1829
+ """
1830
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
1831
+ class PreTrainedModel
1832
+ """
1833
+ for layer, heads in heads_to_prune.items():
1834
+ self.encoder.layer[layer].attention.prune_heads(heads)
1835
+
1836
+ def _generate_future_mask(self, size: int, dtype: torch.dtype, device: torch.device) -> torch.Tensor:
1837
+ # Default mask is for forward direction. Flip for backward direction.
1838
+ mask = torch.triu(torch.ones(size, size, device=device, dtype=dtype), diagonal=1)
1839
+ mask = mask.masked_fill(mask == 1, float("-inf"))
1840
+ return mask
1841
+
1842
+ def create_attention_mask(self, tgt, memory, tgt_mask, past_key_values_length, memory_key_padding_mask=None):
1843
+ num_tgt = tgt.shape[1]
1844
+ num_memory = memory.shape[1]
1845
+ device = tgt.device
1846
+ dtype = tgt.dtype
1847
+ top_left = torch.zeros((num_memory, num_memory), device=device, dtype=dtype)
1848
+ top_right = torch.full(
1849
+ (num_memory, num_tgt + past_key_values_length),
1850
+ float("-inf"),
1851
+ device=tgt.device,
1852
+ dtype=dtype,
1853
+ )
1854
+ bottom_left = torch.zeros(
1855
+ (num_tgt, num_memory),
1856
+ dtype=dtype,
1857
+ device=tgt_mask.device,
1858
+ )
1859
+
1860
+ if past_key_values_length > 0:
1861
+ tgt_mask = torch.zeros(
1862
+ (tgt_mask.shape[0], tgt_mask.shape[0] + past_key_values_length),
1863
+ dtype=dtype,
1864
+ device=tgt_mask.device,
1865
+ )
1866
+
1867
+ left = torch.cat((top_left, bottom_left), dim=0)
1868
+ right = torch.cat((top_right, tgt_mask.to(dtype)), dim=0)
1869
+
1870
+ full_attention_mask = torch.cat((left, right), dim=1)[None, :]
1871
+
1872
+ if memory_key_padding_mask is None:
1873
+ memory_key_padding_mask = torch.full((memory.shape[0], memory.shape[1]), fill_value=False, device=device)
1874
+ # if it is False, it means valid. That is, it is not a padding
1875
+ if memory_key_padding_mask.dtype != torch.bool:
1876
+ raise ValueError("Memory key padding mask must be a boolean tensor.")
1877
+ zero_negative_infinity = torch.zeros_like(memory_key_padding_mask, dtype=tgt.dtype)
1878
+ zero_negative_infinity[memory_key_padding_mask] = float("-inf")
1879
+ full_attention_mask = full_attention_mask.expand(
1880
+ (memory_key_padding_mask.shape[0], num_memory + num_tgt, num_memory + past_key_values_length + num_tgt)
1881
+ )
1882
+ full_attention_mask = full_attention_mask.clone()
1883
+ origin_left = full_attention_mask[:, :, :num_memory]
1884
+ update = zero_negative_infinity[:, None, :]
1885
+ full_attention_mask[:, :, :num_memory] = origin_left + update
1886
+
1887
+ # add axis for multi-head
1888
+ full_attention_mask = full_attention_mask[:, None, :, :]
1889
+
1890
+ return full_attention_mask
1891
+
1892
+ @add_start_docstrings_to_model_forward(GIT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1893
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC)
1894
+ def forward(
1895
+ self,
1896
+ input_ids: Optional[torch.Tensor] = None,
1897
+ attention_mask: Optional[torch.Tensor] = None,
1898
+ position_ids: Optional[torch.Tensor] = None,
1899
+ pixel_values: Optional[torch.Tensor] = None,
1900
+ head_mask: Optional[torch.Tensor] = None,
1901
+ inputs_embeds: Optional[torch.Tensor] = None,
1902
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1903
+ use_cache: Optional[bool] = None,
1904
+ output_attentions: Optional[bool] = None,
1905
+ output_hidden_states: Optional[bool] = None,
1906
+ return_dict: Optional[bool] = None,
1907
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPooling]:
1908
+ r"""
1909
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
1910
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
1911
+
1912
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
1913
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
1914
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
1915
+ use_cache (`bool`, *optional*):
1916
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
1917
+ `past_key_values`).
1918
+
1919
+ Returns:
1920
+
1921
+ Examples:
1922
+
1923
+ ```python
1924
+ >>> from transformers import AutoProcessor, AutoModel
1925
+ >>> import requests
1926
+ >>> from PIL import Image
1927
+
1928
+ >>> processor = AutoProcessor.from_pretrained("microsoft/git-large")
1929
+ >>> model = AutoModel.from_pretrained("microsoft/git-large")
1930
+
1931
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1932
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1933
+
1934
+ >>> text = "this is an image of two cats"
1935
+
1936
+ >>> inputs = processor(text, images=image, return_tensors="pt")
1937
+
1938
+ >>> outputs = model(**inputs)
1939
+ >>> last_hidden_state = outputs.last_hidden_state
1940
+ ```"""
1941
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1942
+ output_hidden_states = (
1943
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1944
+ )
1945
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
1946
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1947
+
1948
+ if input_ids is not None and inputs_embeds is not None:
1949
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
1950
+ elif input_ids is not None:
1951
+ input_shape = input_ids.size()
1952
+ elif inputs_embeds is not None:
1953
+ input_shape = inputs_embeds.size()[:-1]
1954
+ else:
1955
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
1956
+
1957
+ seq_length = input_shape[1]
1958
+
1959
+ # past_key_values_length
1960
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
1961
+
1962
+ # Prepare head mask if needed
1963
+ # 1.0 in head_mask indicate we keep the head
1964
+ # attention_probs has shape bsz x n_heads x N x N
1965
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
1966
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
1967
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
1968
+
1969
+ projected_visual_features = None
1970
+ if pixel_values is not None:
1971
+
1972
+ # print("turning pixel_values in visual_features")
1973
+ visual_features = pixel_values
1974
+ # if pixel_values.ndim == 4:
1975
+ # # here we assume pixel_values is of shape (batch_size, num_channels, height, width)
1976
+ # visual_features = self.image_encoder(pixel_values).last_hidden_state
1977
+
1978
+
1979
+ # else:
1980
+ # raise ValueError("pixel_values must be of rank 4 or 5")
1981
+
1982
+ projected_visual_features = self.visual_projection(visual_features)
1983
+
1984
+ embedding_output = self.embeddings(
1985
+ input_ids=input_ids,
1986
+ position_ids=position_ids,
1987
+ inputs_embeds=inputs_embeds,
1988
+ past_key_values_length=past_key_values_length,
1989
+ )
1990
+
1991
+ if projected_visual_features is None:
1992
+ projected_visual_features = torch.zeros(
1993
+ (embedding_output.shape[0], 0, embedding_output.shape[2]),
1994
+ dtype=embedding_output.dtype,
1995
+ device=embedding_output.device,
1996
+ )
1997
+
1998
+ # Repeat visual features to match embedding batch size.
1999
+ projected_visual_features = projected_visual_features.repeat(
2000
+ embedding_output.size(0) // projected_visual_features.size(0), 1, 1
2001
+ )
2002
+
2003
+ # concatenate patch token and text token embeddings
2004
+ hidden_states = torch.cat((projected_visual_features, embedding_output), dim=1)
2005
+
2006
+ # By default, an additive causal mask is created
2007
+ # for masking the future (one direction).
2008
+ tgt_mask = self._generate_future_mask(seq_length, embedding_output.dtype, embedding_output.device)
2009
+
2010
+ # Create an attention mask of shape (batch_size, 1, tgt_seq_len, src_seq_len)
2011
+ combined_attention_mask = self.create_attention_mask(
2012
+ tgt=embedding_output,
2013
+ memory=projected_visual_features,
2014
+ tgt_mask=tgt_mask,
2015
+ past_key_values_length=past_key_values_length,
2016
+ )
2017
+
2018
+ if attention_mask is not None:
2019
+ # if the user provides an attention mask, we add it to the default one
2020
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
2021
+ expanded_attn_mask = _expand_mask(attention_mask, embedding_output.dtype, tgt_len=input_shape[-1]).to(
2022
+ embedding_output.device
2023
+ )
2024
+ if past_key_values_length > 0:
2025
+ expanded_attn_mask = expanded_attn_mask[:, :, -past_key_values_length:, :]
2026
+ else:
2027
+ combined_attention_mask[:, :, -input_shape[1] :, -input_shape[1] :] += expanded_attn_mask
2028
+
2029
+ encoder_outputs = self.encoder(
2030
+ hidden_states,
2031
+ attention_mask=combined_attention_mask,
2032
+ head_mask=head_mask,
2033
+ past_key_values=past_key_values,
2034
+ use_cache=use_cache,
2035
+ output_attentions=output_attentions,
2036
+ output_hidden_states=output_hidden_states,
2037
+ return_dict=return_dict,
2038
+ pixel_values_present=pixel_values is not None,
2039
+ )
2040
+ sequence_output = encoder_outputs[0]
2041
+
2042
+ if not return_dict:
2043
+ return (sequence_output,) + encoder_outputs[1:]
2044
+
2045
+ return BaseModelOutputWithPast(
2046
+ last_hidden_state=sequence_output,
2047
+ past_key_values=encoder_outputs.past_key_values,
2048
+ hidden_states=encoder_outputs.hidden_states,
2049
+ attentions=encoder_outputs.attentions,
2050
+ )
models.py ADDED
@@ -0,0 +1,756 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import numpy as np
3
+ from torchvision import transforms
4
+ import torch
5
+ import torch.nn as nn
6
+ import PIL
7
+ import clip
8
+ import open_clip
9
+ from functools import partial
10
+ import random
11
+ import json
12
+ from tqdm import tqdm
13
+ import utils
14
+
15
+ # class BrainMLP(nn.Module):
16
+ # def __init__(self, out_dim=257*768, in_dim=15724, clip_size=768, h=4096):
17
+ # super().__init__()
18
+ # self.lin0 = nn.Sequential(
19
+ # nn.Linear(in_dim, h, bias=False),
20
+ # nn.LayerNorm(h),
21
+ # nn.GELU(inplace=True),
22
+ # nn.Dropout(0.5))
23
+ # self.mlp = nn.ModuleList([
24
+ # nn.Sequential(
25
+ # nn.Linear(h, h),
26
+ # nn.LayerNorm(h),
27
+ # nn.GELU(inplace=True),
28
+ # nn.Dropout(0.15)
29
+ # ) for _ in range(4)])
30
+ # self.lin1 = nn.Linear(h, out_dim, bias=True)
31
+ # self.proj = nn.Sequential(
32
+ # nn.LayerNorm(clip_size),
33
+ # nn.GELU(),
34
+ # nn.Linear(clip_size, 2048),
35
+ # nn.LayerNorm(2048),
36
+ # nn.GELU(),
37
+ # nn.Linear(2048, 2048),
38
+ # nn.LayerNorm(2048),
39
+ # nn.GELU(),
40
+ # nn.Linear(2048, clip_size))
41
+ # def forward(self, x):
42
+ # x = self.lin0(x)
43
+ # residual = x
44
+ # for res_block in range(self.n_blocks):
45
+ # x = self.mlp[res_block](x)
46
+ # x += residual
47
+ # residual = x
48
+ # diffusion_prior_input = self.lin1(x.reshape(len(x), -1))
49
+ # disjointed_clip_fmri = self.proj(diffusion_prior_input.reshape(
50
+ # len(x),-1, self.clip_size))
51
+ # return diffusion_prior_input, disjointed_clip_fmri
52
+
53
+
54
+
55
+ class Clipper(torch.nn.Module):
56
+ def __init__(self, clip_variant, clamp_embs=False, norm_embs=False,
57
+ hidden_state=False, device=torch.device('cpu')):
58
+ super().__init__()
59
+ assert clip_variant in ("RN50", "ViT-L/14", "ViT-B/32", "RN50x64"), \
60
+ "clip_variant must be one of RN50, ViT-L/14, ViT-B/32, RN50x64"
61
+ print(clip_variant, device)
62
+
63
+ if clip_variant=="ViT-L/14" and hidden_state:
64
+ # from transformers import CLIPVisionModelWithProjection
65
+ # image_encoder = CLIPVisionModelWithProjection.from_pretrained("openai/clip-vit-large-patch14",cache_dir="/fsx/proj-medarc/fmri/cache")
66
+ from transformers import CLIPVisionModelWithProjection
67
+ sd_cache_dir = '/fsx/proj-fmri/shared/cache/models--shi-labs--versatile-diffusion/snapshots/2926f8e11ea526b562cd592b099fcf9c2985d0b7'
68
+ image_encoder = CLIPVisionModelWithProjection.from_pretrained(sd_cache_dir, subfolder='image_encoder').eval()
69
+ image_encoder = image_encoder.to(device)
70
+ for param in image_encoder.parameters():
71
+ param.requires_grad = False # dont need to calculate gradients
72
+ self.image_encoder = image_encoder
73
+ elif hidden_state:
74
+ raise Exception("hidden_state embeddings only works with ViT-L/14 right now")
75
+
76
+ clip_model, preprocess = clip.load(clip_variant, device=device)
77
+ clip_model.eval() # dont want to train model
78
+ for param in clip_model.parameters():
79
+ param.requires_grad = False # dont need to calculate gradients
80
+
81
+ self.clip = clip_model
82
+ self.clip_variant = clip_variant
83
+ if clip_variant == "RN50x64":
84
+ self.clip_size = (448,448)
85
+ else:
86
+ self.clip_size = (224,224)
87
+
88
+ preproc = transforms.Compose([
89
+ transforms.Resize(size=self.clip_size[0], interpolation=transforms.InterpolationMode.BICUBIC),
90
+ transforms.CenterCrop(size=self.clip_size),
91
+ transforms.Normalize(mean=(0.48145466, 0.4578275, 0.40821073), std=(0.26862954, 0.26130258, 0.27577711))
92
+ ])
93
+ self.preprocess = preproc
94
+ self.hidden_state = hidden_state
95
+ self.mean = np.array([0.48145466, 0.4578275, 0.40821073])
96
+ self.std = np.array([0.26862954, 0.26130258, 0.27577711])
97
+ self.normalize = transforms.Normalize(self.mean, self.std)
98
+ self.denormalize = transforms.Normalize((-self.mean / self.std).tolist(), (1.0 / self.std).tolist())
99
+ self.clamp_embs = clamp_embs
100
+ self.norm_embs = norm_embs
101
+ self.device= device
102
+
103
+ def versatile_normalize_embeddings(encoder_output):
104
+ embeds = encoder_output.last_hidden_state
105
+ embeds = image_encoder.vision_model.post_layernorm(embeds)
106
+ embeds = image_encoder.visual_projection(embeds)
107
+ return embeds
108
+ self.versatile_normalize_embeddings = versatile_normalize_embeddings
109
+
110
+ def resize_image(self, image):
111
+ # note: antialias should be False if planning to use Pinkney's Image Variation SD model
112
+ return transforms.Resize(self.clip_size)(image.to(self.device))
113
+
114
+ def embed_image(self, image):
115
+ """Expects images in -1 to 1 range"""
116
+ if self.hidden_state:
117
+ # clip_emb = self.preprocess((image/1.5+.25).to(self.device)) # for some reason the /1.5+.25 prevents oversaturation
118
+ clip_emb = self.preprocess((image).to(self.device))
119
+ clip_emb = self.image_encoder(clip_emb)
120
+ clip_emb = self.versatile_normalize_embeddings(clip_emb)
121
+ else:
122
+ clip_emb = self.preprocess(image.to(self.device))
123
+ clip_emb = self.clip.encode_image(clip_emb)
124
+ # input is now in CLIP space, but mind-reader preprint further processes embeddings:
125
+ if self.clamp_embs:
126
+ clip_emb = torch.clamp(clip_emb, -1.5, 1.5)
127
+ if self.norm_embs:
128
+ if self.hidden_state:
129
+ # normalize all tokens by cls token's norm
130
+ clip_emb = clip_emb / torch.norm(clip_emb[:, 0], dim=-1).reshape(-1, 1, 1)
131
+ else:
132
+ clip_emb = nn.functional.normalize(clip_emb, dim=-1)
133
+ return clip_emb
134
+
135
+ def embed_text(self, text_samples):
136
+ clip_text = clip.tokenize(text_samples).to(self.device)
137
+ clip_text = self.clip.encode_text(clip_text)
138
+ if self.clamp_embs:
139
+ clip_text = torch.clamp(clip_text, -1.5, 1.5)
140
+ if self.norm_embs:
141
+ clip_text = nn.functional.normalize(clip_text, dim=-1)
142
+ return clip_text
143
+
144
+ def embed_curated_annotations(self, annots):
145
+ for i,b in enumerate(annots):
146
+ t = ''
147
+ while t == '':
148
+ rand = torch.randint(5,(1,1))[0][0]
149
+ t = b[0,rand]
150
+ if i==0:
151
+ txt = np.array(t)
152
+ else:
153
+ txt = np.vstack((txt,t))
154
+ txt = txt.flatten()
155
+ return self.embed_text(txt)
156
+
157
+ # for prior
158
+ from dalle2_pytorch import DiffusionPrior
159
+ from dalle2_pytorch.dalle2_pytorch import l2norm, default, exists
160
+ from dalle2_pytorch.train_configs import DiffusionPriorNetworkConfig
161
+ # vd prior
162
+ from dalle2_pytorch.dalle2_pytorch import RotaryEmbedding, CausalTransformer, SinusoidalPosEmb, MLP, Rearrange, repeat, rearrange, prob_mask_like, LayerNorm, RelPosBias, Attention, FeedForward
163
+
164
+
165
+ class BrainDiffusionPrior(DiffusionPrior):
166
+ """
167
+ Differences from original:
168
+ - Allow for passing of generators to torch random functions
169
+ - Option to include the voxel2clip model and pass voxels into forward method
170
+ - Return predictions when computing loss
171
+ - Load pretrained model from @nousr trained on LAION aesthetics
172
+ """
173
+ def __init__(self, *args, **kwargs):
174
+ voxel2clip = kwargs.pop('voxel2clip', None)
175
+ super().__init__(*args, **kwargs)
176
+ self.voxel2clip = voxel2clip
177
+
178
+ @torch.no_grad()
179
+ def p_sample(self, x, t, text_cond = None, self_cond = None, clip_denoised = True, cond_scale = 1.,
180
+ generator=None):
181
+ b, *_, device = *x.shape, x.device
182
+ model_mean, _, model_log_variance, x_start = self.p_mean_variance(x = x, t = t, text_cond = text_cond, self_cond = self_cond, clip_denoised = clip_denoised, cond_scale = cond_scale)
183
+ if generator is None:
184
+ noise = torch.randn_like(x)
185
+ else:
186
+ noise = torch.randn_like(x)
187
+ # noise = torch.randn(x.size(), device=x.device, dtype=x.dtype, generator=generator)
188
+ # no noise when t == 0
189
+ nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))
190
+ pred = model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise
191
+ return pred, x_start
192
+
193
+ @torch.no_grad()
194
+ def p_sample_loop(self, *args, timesteps = None, **kwargs):
195
+ timesteps = default(timesteps, self.noise_scheduler.num_timesteps)
196
+ assert timesteps <= self.noise_scheduler.num_timesteps
197
+ is_ddim = timesteps < self.noise_scheduler.num_timesteps
198
+
199
+ if not is_ddim:
200
+ normalized_image_embed = self.p_sample_loop_ddpm(*args, **kwargs)
201
+ else:
202
+ normalized_image_embed = self.p_sample_loop_ddim(*args, **kwargs, timesteps = timesteps)
203
+
204
+ # print("PS removed all image_embed_scale instances!")
205
+ image_embed = normalized_image_embed #/ self.image_embed_scale
206
+ return image_embed
207
+
208
+ @torch.no_grad()
209
+ def p_sample_loop_ddpm(self, shape, text_cond, cond_scale = 1., generator=None):
210
+ batch, device = shape[0], self.device
211
+
212
+ if generator is None:
213
+ image_embed = torch.randn(shape, device = device)
214
+ else:
215
+ image_embed = torch.randn(shape, device = device, generator=generator)
216
+ x_start = None # for self-conditioning
217
+
218
+ if self.init_image_embed_l2norm:
219
+ image_embed = l2norm(image_embed) * self.image_embed_scale
220
+
221
+ for i in tqdm(reversed(range(0, self.noise_scheduler.num_timesteps)), desc='sampling loop time step', total=self.noise_scheduler.num_timesteps, disable=True):
222
+ times = torch.full((batch,), i, device = device, dtype = torch.long)
223
+
224
+ self_cond = x_start if self.net.self_cond else None
225
+ image_embed, x_start = self.p_sample(image_embed, times, text_cond = text_cond, self_cond = self_cond, cond_scale = cond_scale,
226
+ generator=generator)
227
+
228
+ if self.sampling_final_clamp_l2norm and self.predict_x_start:
229
+ image_embed = self.l2norm_clamp_embed(image_embed)
230
+
231
+ return image_embed
232
+
233
+ def p_losses(self, image_embed, times, text_cond, noise = None):
234
+ noise = default(noise, lambda: torch.randn_like(image_embed))
235
+
236
+ image_embed_noisy = self.noise_scheduler.q_sample(x_start = image_embed, t = times, noise = noise)
237
+
238
+ self_cond = None
239
+ if self.net.self_cond and random.random() < 0.5:
240
+ with torch.no_grad():
241
+ self_cond = self.net(image_embed_noisy, times, **text_cond).detach()
242
+
243
+ pred = self.net(
244
+ image_embed_noisy,
245
+ times,
246
+ self_cond = self_cond,
247
+ text_cond_drop_prob = self.text_cond_drop_prob,
248
+ image_cond_drop_prob = self.image_cond_drop_prob,
249
+ **text_cond
250
+ )
251
+
252
+ if self.predict_x_start and self.training_clamp_l2norm:
253
+ pred = self.l2norm_clamp_embed(pred)
254
+
255
+ if self.predict_v:
256
+ target = self.noise_scheduler.calculate_v(image_embed, times, noise)
257
+ elif self.predict_x_start:
258
+ target = image_embed
259
+ else:
260
+ target = noise
261
+
262
+ loss = nn.functional.mse_loss(pred, target) # mse
263
+ # print("1", loss)
264
+ # loss += (1 - nn.functional.cosine_similarity(pred, target).mean())
265
+ # print("2", (1 - nn.functional.cosine_similarity(pred, target).mean()))
266
+ return loss, pred
267
+
268
+ def forward(
269
+ self,
270
+ text = None,
271
+ image = None,
272
+ voxel = None,
273
+ text_embed = None, # allow for training on preprocessed CLIP text and image embeddings
274
+ image_embed = None,
275
+ text_encodings = None, # as well as CLIP text encodings
276
+ *args,
277
+ **kwargs
278
+ ):
279
+ assert exists(text) ^ exists(text_embed) ^ exists(voxel), 'either text, text embedding, or voxel must be supplied'
280
+ assert exists(image) ^ exists(image_embed), 'either image or image embedding must be supplied'
281
+ assert not (self.condition_on_text_encodings and (not exists(text_encodings) and not exists(text))), 'text encodings must be present if you specified you wish to condition on it on initialization'
282
+
283
+ if exists(voxel):
284
+ assert exists(self.voxel2clip), 'voxel2clip must be trained if you wish to pass in voxels'
285
+ assert not exists(text_embed), 'cannot pass in both text and voxels'
286
+ if self.voxel2clip.use_projector:
287
+ clip_voxels_mse, clip_voxels = self.voxel2clip(voxel)
288
+ text_embed = clip_voxels_mse
289
+ else:
290
+ clip_voxels = self.voxel2clip(voxel)
291
+ text_embed = clip_voxels_mse = clip_voxels
292
+ # text_embed = self.voxel2clip(voxel)
293
+
294
+ if exists(image):
295
+ image_embed, _ = self.clip.embed_image(image)
296
+
297
+ # calculate text conditionings, based on what is passed in
298
+
299
+ if exists(text):
300
+ text_embed, text_encodings = self.clip.embed_text(text)
301
+
302
+ text_cond = dict(text_embed = text_embed)
303
+
304
+ if self.condition_on_text_encodings:
305
+ assert exists(text_encodings), 'text encodings must be present for diffusion prior if specified'
306
+ text_cond = {**text_cond, 'text_encodings': text_encodings}
307
+
308
+ # timestep conditioning from ddpm
309
+
310
+ batch, device = image_embed.shape[0], image_embed.device
311
+ times = self.noise_scheduler.sample_random_times(batch)
312
+
313
+ # PS: I dont think we need this? also if uncommented this does in-place global variable change
314
+ # scale image embed (Katherine)
315
+ # image_embed *= self.image_embed_scale
316
+
317
+ # calculate forward loss
318
+
319
+ loss, pred = self.p_losses(image_embed, times, text_cond = text_cond, *args, **kwargs)
320
+
321
+ # undo the scaling so we can directly use it for real mse loss and reconstruction
322
+ return loss, pred
323
+
324
+
325
+ class VersatileDiffusionPriorNetwork(nn.Module):
326
+ def __init__(
327
+ self,
328
+ dim,
329
+ num_timesteps = None,
330
+ num_time_embeds = 1,
331
+ # num_image_embeds = 1,
332
+ # num_brain_embeds = 1,
333
+ num_tokens = 257,
334
+ causal = True,
335
+ learned_query_mode = 'none',
336
+ **kwargs
337
+ ):
338
+ super().__init__()
339
+ self.dim = dim
340
+ self.num_time_embeds = num_time_embeds
341
+ self.continuous_embedded_time = not exists(num_timesteps)
342
+ self.learned_query_mode = learned_query_mode
343
+
344
+ self.to_time_embeds = nn.Sequential(
345
+ nn.Embedding(num_timesteps, dim * num_time_embeds) if exists(num_timesteps) else nn.Sequential(SinusoidalPosEmb(dim), MLP(dim, dim * num_time_embeds)), # also offer a continuous version of timestep embeddings, with a 2 layer MLP
346
+ Rearrange('b (n d) -> b n d', n = num_time_embeds)
347
+ )
348
+
349
+ if self.learned_query_mode == 'token':
350
+ self.learned_query = nn.Parameter(torch.randn(num_tokens, dim))
351
+ if self.learned_query_mode == 'pos_emb':
352
+ scale = dim ** -0.5
353
+ self.learned_query = nn.Parameter(torch.randn(num_tokens, dim) * scale)
354
+ if self.learned_query_mode == 'all_pos_emb':
355
+ scale = dim ** -0.5
356
+ self.learned_query = nn.Parameter(torch.randn(num_tokens*2+1, dim) * scale)
357
+ self.causal_transformer = FlaggedCausalTransformer(dim = dim, causal=causal, **kwargs)
358
+
359
+ self.null_brain_embeds = nn.Parameter(torch.randn(num_tokens, dim))
360
+ self.null_image_embed = nn.Parameter(torch.randn(num_tokens, dim))
361
+
362
+ self.num_tokens = num_tokens
363
+ self.self_cond = False
364
+
365
+ def forward_with_cond_scale(
366
+ self,
367
+ *args,
368
+ cond_scale = 1.,
369
+ **kwargs
370
+ ):
371
+ logits = self.forward(*args, **kwargs)
372
+
373
+ if cond_scale == 1:
374
+ return logits
375
+
376
+ null_logits = self.forward(*args, brain_cond_drop_prob = 1., image_cond_drop_prob = 1, **kwargs)
377
+ return null_logits + (logits - null_logits) * cond_scale
378
+
379
+ def forward(
380
+ self,
381
+ image_embed,
382
+ diffusion_timesteps,
383
+ *,
384
+ self_cond=None,
385
+ brain_embed=None,
386
+ text_embed=None,
387
+ brain_cond_drop_prob = 0.,
388
+ text_cond_drop_prob = None,
389
+ image_cond_drop_prob = 0.
390
+ ):
391
+ if text_embed is not None:
392
+ brain_embed = text_embed
393
+ if text_cond_drop_prob is not None:
394
+ brain_cond_drop_prob = text_cond_drop_prob
395
+
396
+ # image_embed = image_embed.view(len(image_embed),-1,16*16)
397
+ # text_embed = text_embed.view(len(text_embed),-1,768)
398
+ # brain_embed = brain_embed.view(len(brain_embed),-1,16*16)
399
+ # print(*image_embed.shape)
400
+ # print(*image_embed.shape, image_embed.device, image_embed.dtype)
401
+
402
+ batch, _, dim, device, dtype = *image_embed.shape, image_embed.device, image_embed.dtype
403
+ # num_time_embeds, num_image_embeds, num_brain_embeds = self.num_time_embeds, self.num_image_embeds, self.num_brain_embeds
404
+
405
+ # classifier free guidance masks
406
+ brain_keep_mask = prob_mask_like((batch,), 1 - brain_cond_drop_prob, device = device)
407
+ brain_keep_mask = rearrange(brain_keep_mask, 'b -> b 1 1')
408
+
409
+ image_keep_mask = prob_mask_like((batch,), 1 - image_cond_drop_prob, device = device)
410
+ image_keep_mask = rearrange(image_keep_mask, 'b -> b 1 1')
411
+
412
+ # mask out brain embeddings with null brain embeddings
413
+
414
+ # import pdb; pdb.set_trace()
415
+ null_brain_embeds = self.null_brain_embeds.to(brain_embed.dtype)
416
+ brain_embed = torch.where(
417
+ brain_keep_mask,
418
+ brain_embed,
419
+ null_brain_embeds[None]
420
+ )
421
+
422
+ # mask out image embeddings with null image embeddings
423
+ null_image_embed = self.null_image_embed.to(image_embed.dtype)
424
+ image_embed = torch.where(
425
+ image_keep_mask,
426
+ image_embed,
427
+ null_image_embed[None]
428
+ )
429
+
430
+ # whether brain embedding is used for conditioning depends on whether brain encodings are available for attention (for classifier free guidance, even though it seems from the paper it was not used in the prior ddpm, as the objective is different)
431
+ # but let's just do it right
432
+ if self.continuous_embedded_time:
433
+ # if continuous cast to flat, else keep int for indexing embeddings
434
+ diffusion_timesteps = diffusion_timesteps.type(dtype)
435
+ time_embed = self.to_time_embeds(diffusion_timesteps)
436
+
437
+ if self.learned_query_mode == 'token':
438
+ learned_queries = repeat(self.learned_query, 'n d -> b n d', b = batch)
439
+ elif self.learned_query_mode == 'pos_emb':
440
+ pos_embs = repeat(self.learned_query, 'n d -> b n d', b = batch)
441
+ image_embed = image_embed + pos_embs
442
+ learned_queries = torch.empty((batch, 0, dim), device=brain_embed.device)
443
+ elif self.learned_query_mode == 'all_pos_emb':
444
+ pos_embs = repeat(self.learned_query, 'n d -> b n d', b = batch)
445
+ learned_queries = torch.empty((batch, 0, dim), device=brain_embed.device)
446
+ else:
447
+ learned_queries = torch.empty((batch, 0, dim), device=brain_embed.device)
448
+
449
+ tokens = torch.cat((
450
+ brain_embed, # 257
451
+ time_embed, # 1
452
+ image_embed, # 257
453
+ learned_queries # 257
454
+ ), dim = -2)
455
+ if self.learned_query_mode == 'all_pos_emb':
456
+ tokens = tokens + pos_embs
457
+
458
+ # attend
459
+ tokens = self.causal_transformer(tokens)
460
+
461
+ # get learned query, which should predict the image embedding (per DDPM timestep)
462
+ pred_image_embed = tokens[..., -self.num_tokens:, :]
463
+
464
+ return pred_image_embed
465
+
466
+ class FlaggedCausalTransformer(nn.Module):
467
+ def __init__(
468
+ self,
469
+ *,
470
+ dim,
471
+ depth,
472
+ dim_head = 64,
473
+ heads = 8,
474
+ ff_mult = 4,
475
+ norm_in = False,
476
+ norm_out = True,
477
+ attn_dropout = 0.,
478
+ ff_dropout = 0.,
479
+ final_proj = True,
480
+ normformer = False,
481
+ rotary_emb = True,
482
+ causal=True
483
+ ):
484
+ super().__init__()
485
+ self.init_norm = LayerNorm(dim) if norm_in else nn.Identity() # from latest BLOOM model and Yandex's YaLM
486
+
487
+ self.rel_pos_bias = RelPosBias(heads = heads)
488
+
489
+ rotary_emb = RotaryEmbedding(dim = min(32, dim_head)) if rotary_emb else None
490
+
491
+ self.layers = nn.ModuleList([])
492
+ for _ in range(depth):
493
+ self.layers.append(nn.ModuleList([
494
+ Attention(dim = dim, causal = causal, dim_head = dim_head, heads = heads, dropout = attn_dropout, rotary_emb = rotary_emb),
495
+ FeedForward(dim = dim, mult = ff_mult, dropout = ff_dropout, post_activation_norm = normformer)
496
+ ]))
497
+
498
+ self.norm = LayerNorm(dim, stable = True) if norm_out else nn.Identity() # unclear in paper whether they projected after the classic layer norm for the final denoised image embedding, or just had the transformer output it directly: plan on offering both options
499
+ self.project_out = nn.Linear(dim, dim, bias = False) if final_proj else nn.Identity()
500
+
501
+ def forward(self, x):
502
+ n, device = x.shape[1], x.device
503
+
504
+ x = self.init_norm(x)
505
+
506
+ attn_bias = self.rel_pos_bias(n, n + 1, device = device)
507
+
508
+ for attn, ff in self.layers:
509
+ x = attn(x, attn_bias = attn_bias) + x
510
+ x = ff(x) + x
511
+
512
+ out = self.norm(x)
513
+ return self.project_out(out)
514
+
515
+ #Subclass for GNET
516
+ class TrunkBlock(nn.Module):
517
+ def __init__(self, feat_in, feat_out):
518
+ super(TrunkBlock, self).__init__()
519
+ self.conv1 = nn.Conv2d(feat_in, int(feat_out*1.), kernel_size=3, stride=1, padding=1, dilation=1)
520
+ self.drop1 = nn.Dropout2d(p=0.5, inplace=False)
521
+ self.bn1 = nn.BatchNorm2d(feat_in, eps=1e-05, momentum=0.25, affine=True, track_running_stats=True)
522
+
523
+ torch.nn.init.xavier_normal_(self.conv1.weight, gain=torch.nn.init.calculate_gain('relu'))
524
+ torch.nn.init.constant_(self.conv1.bias, 0.0) # current
525
+
526
+ def forward(self, x):
527
+ return torch.nn.functional.relu(self.conv1(self.drop1(self.bn1(x))))
528
+
529
+ #Subclass for GNET
530
+ class PreFilter(nn.Module):
531
+ def __init__(self):
532
+ super(PreFilter, self).__init__()
533
+ self.conv1 = nn.Sequential(
534
+ nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2),
535
+ nn.ReLU(inplace=True),
536
+ nn.MaxPool2d(kernel_size=3, stride=2)
537
+ )
538
+ self.conv2 = nn.Sequential(
539
+ nn.Conv2d(64, 192, kernel_size=5, padding=2),
540
+ nn.ReLU(inplace=True)
541
+ )
542
+
543
+ def forward(self, x):
544
+ c1 = self.conv1(x)
545
+ y = self.conv2(c1)
546
+ return y
547
+
548
+ #Subclass for GNET
549
+ class EncStage(nn.Module):
550
+ def __init__(self, trunk_width=64, pass_through=64):
551
+ super(EncStage, self).__init__()
552
+ self.conv3 = nn.Conv2d(192, 128, kernel_size=3, stride=1, padding=0)
553
+ self.drop1 = nn.Dropout2d(p=0.5, inplace=False) ##
554
+ self.bn1 = nn.BatchNorm2d(192, eps=1e-05, momentum=0.25, affine=True, track_running_stats=True) ##
555
+ self.pool1 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
556
+ ##
557
+ self.tw = int(trunk_width)
558
+ self.pt = int(pass_through)
559
+ ss = (self.tw + self.pt)
560
+ self.conv4a = TrunkBlock(128, ss)
561
+ self.conv5a = TrunkBlock(ss, ss)
562
+ self.conv6a = TrunkBlock(ss, ss)
563
+ self.conv4b = TrunkBlock(ss, ss)
564
+ self.conv5b = TrunkBlock(ss, ss)
565
+ self.conv6b = TrunkBlock(ss, self.tw)
566
+ ##
567
+ torch.nn.init.xavier_normal_(self.conv3.weight, gain=torch.nn.init.calculate_gain('relu'))
568
+ torch.nn.init.constant_(self.conv3.bias, 0.0)
569
+
570
+ def forward(self, x):
571
+ c3 = (torch.nn.functional.relu(self.conv3(self.drop1(self.bn1(x))), inplace=False))
572
+ c4a = self.conv4a(c3)
573
+ c4b = self.conv4b(c4a)
574
+ c5a = self.conv5a(self.pool1(c4b))
575
+ c5b = self.conv5b(c5a)
576
+ c6a = self.conv6a(c5b)
577
+ c6b = self.conv6b(c6a)
578
+
579
+ return [torch.cat([c3, c4a[:,:self.tw], c4b[:,:self.tw]], dim=1),
580
+ torch.cat([c5a[:,:self.tw], c5b[:,:self.tw], c6a[:,:self.tw], c6b], dim=1)], c6b
581
+
582
+ #Subclass for GNET
583
+ class GEncoder(nn.Module):
584
+ def __init__(self, mu, trunk_width, pass_through=64 ):
585
+ super(GEncoder, self).__init__()
586
+ self.mu = nn.Parameter(torch.from_numpy(mu), requires_grad=False) #.to(device)
587
+ self.pre = PreFilter()
588
+ self.enc = EncStage(trunk_width, pass_through)
589
+
590
+ def forward(self, x):
591
+ fmaps, h = self.enc(self.pre(x - self.mu))
592
+ return x, fmaps, h
593
+
594
+ #Main GNET model class
595
+ class Torch_LayerwiseFWRF(nn.Module):
596
+ def __init__(self, fmaps, nv=1, pre_nl=None, post_nl=None, dtype=np.float32):
597
+ super(Torch_LayerwiseFWRF, self).__init__()
598
+ self.fmaps_shapes = [list(f.size()) for f in fmaps]
599
+ self.nf = np.sum([s[1] for s in self.fmaps_shapes])
600
+ self.pre_nl = pre_nl
601
+ self.post_nl = post_nl
602
+ self.nv = nv
603
+ ##
604
+ self.rfs = []
605
+ self.sm = nn.Softmax(dim=1)
606
+ for k,fm_rez in enumerate(self.fmaps_shapes):
607
+ rf = nn.Parameter(torch.tensor(np.ones(shape=(self.nv, fm_rez[2], fm_rez[2]), dtype=dtype), requires_grad=True))
608
+ self.register_parameter('rf%d'%k, rf)
609
+ self.rfs += [rf,]
610
+ self.w = nn.Parameter(torch.tensor(np.random.normal(0, 0.01, size=(self.nv, self.nf)).astype(dtype=dtype), requires_grad=True))
611
+ self.b = nn.Parameter(torch.tensor(np.random.normal(0, 0.01, size=(self.nv,)).astype(dtype=dtype), requires_grad=True))
612
+
613
+ def forward(self, fmaps):
614
+ phi = []
615
+ for fm,rf in zip(fmaps, self.rfs): #, self.scales):
616
+ g = self.sm(torch.flatten(rf, start_dim=1))
617
+ f = torch.flatten(fm, start_dim=2) # *s
618
+ if self.pre_nl is not None:
619
+ f = self.pre_nl(f)
620
+ # fmaps : [batch, features, space]
621
+ # v : [nv, space]
622
+ phi += [torch.tensordot(g, f, dims=[[1],[2]]),] # apply pooling field and add to list.
623
+ # phi : [nv, batch, features]
624
+ Phi = torch.cat(phi, dim=2)
625
+ if self.post_nl is not None:
626
+ Phi = self.post_nl(Phi)
627
+ vr = torch.squeeze(torch.bmm(Phi, torch.unsqueeze(self.w,2))).t() + torch.unsqueeze(self.b,0)
628
+ return vr
629
+
630
+ class GNet8_Encoder():
631
+
632
+ def __init__(self, subject = 1, device = "cuda", model_path = "gnet_multisubject.pt"):
633
+
634
+ # Setting up Cuda
635
+ self.device = torch.device(device)
636
+ torch.backends.cudnn.enabled=True
637
+ # Subject number
638
+ self.subject = subject
639
+
640
+ # Vector type
641
+ self.vector = "images"
642
+
643
+ # x size
644
+ subject_sizes = [0, 15724, 14278, 15226, 13153, 13039, 17907, 12682, 14386]
645
+ self.x_size = subject_sizes[self.subject]
646
+
647
+ # Reload joined GNet model files
648
+ self.joined_checkpoint = torch.load(model_path, map_location=self.device)
649
+
650
+ self.subjects = list(self.joined_checkpoint['voxel_mask'].keys())
651
+ self.gnet8j_voxel_mask = self.joined_checkpoint['voxel_mask']
652
+ self.gnet8j_voxel_roi = self.joined_checkpoint['voxel_roi']
653
+ self.gnet8j_voxel_index= self.joined_checkpoint['voxel_index']
654
+ self.gnet8j_brain_nii_shape= self.joined_checkpoint['brain_nii_shape']
655
+ self.gnet8j_val_cc = self.joined_checkpoint['val_cc']
656
+
657
+
658
+
659
+ def load_image(self, image_path):
660
+
661
+ image = PIL.Image.open(image_path).convert('RGB')
662
+
663
+ w, h = 227, 227 # resize to integer multiple of 64
664
+ imagePil = image.resize((w, h), resample=PIL.Image.Resampling.LANCZOS)
665
+ image = np.array(imagePil).astype(np.float32) / 255.0
666
+
667
+ return image
668
+
669
+ # Rebuild Model
670
+ def _model_fn(self, _ext, _con, _x):
671
+ '''model consists of an extractor (_ext) and a connection model (_con)'''
672
+ _y, _fm, _h = _ext(_x)
673
+ return _con(_fm)
674
+
675
+ def _pred_fn(self, _ext, _con, xb):
676
+ return self._model_fn(_ext, _con, torch.from_numpy(xb).to(self.device))
677
+
678
+ def subject_pred_pass(self, _pred_fn, _ext, _con, x, batch_size):
679
+ pred = _pred_fn(_ext, _con, x[:batch_size]) # this is just to get the shape
680
+ pred = np.zeros(shape=(len(x), pred.shape[1]), dtype=np.float32) # allocate
681
+ for rb,_ in utils.iterate_range(0, len(x), batch_size):
682
+ pred[rb] = utils.get_value(_pred_fn(_ext, _con, x[rb]))
683
+ return pred
684
+
685
+ def gnet8j_predictions(self, image_data, _pred_fn, trunk_width, pass_through, checkpoint, mask, batch_size, device=torch.device("cuda:0")):
686
+
687
+ subjects = list(image_data.keys())
688
+
689
+ if(mask is None):
690
+ subject_nv = {s: len(v) for s,v in checkpoint['val_cc'].items()}
691
+ else:
692
+ subject_nv = {s: len(v) for s,v in checkpoint['val_cc'].items()}
693
+ subject_nv[subjects[0]] = int(torch.sum(mask == True))
694
+
695
+ # allocate
696
+ subject_image_pred = {s: np.zeros(shape=(len(image_data[s]), subject_nv[s]), dtype=np.float32) for s in subjects}
697
+ # print(subject_image_pred)
698
+ _log_act_fn = lambda _x: torch.log(1 + torch.abs(_x))*torch.tanh(_x)
699
+
700
+ best_params = checkpoint['best_params']
701
+ # print(best_params)
702
+ shared_model = GEncoder(np.array(checkpoint['input_mean']).astype(np.float32), trunk_width=trunk_width, pass_through=pass_through).to(device)
703
+ shared_model.load_state_dict(best_params['enc'])
704
+ shared_model.eval()
705
+
706
+ # example fmaps
707
+ rec, fmaps, h = shared_model(torch.from_numpy(image_data[list(image_data.keys())[0]][:20]).to(device))
708
+ for s in subjects:
709
+ sd = Torch_LayerwiseFWRF(fmaps, nv=subject_nv[s], pre_nl=_log_act_fn, post_nl=_log_act_fn, dtype=np.float32).to(device)
710
+ params = best_params['fwrfs'][s]
711
+
712
+ if(mask is None):
713
+ sd.load_state_dict(params)
714
+
715
+ else:
716
+ masked_params = {}
717
+ for key, value in params.items():
718
+ masked_params[key] = value[mask]
719
+
720
+ sd.load_state_dict(masked_params)
721
+
722
+ # print(params['w'].shape)
723
+ # print(params['b'].shape)
724
+ # sd.load_state_dict(best_params['fwrfs'][s])
725
+ sd.eval()
726
+ # print(sd)
727
+
728
+ subject_image_pred[s] = self.subject_pred_pass(_pred_fn, shared_model, sd, image_data[s], batch_size)
729
+
730
+ return subject_image_pred
731
+
732
+ def predict(self, images, mask = None):
733
+ self.stim_data = {}
734
+ data = []
735
+ w, h = 227, 227 # resize to integer multiple of 64
736
+
737
+ if(isinstance(images, list)):
738
+ for i in range(len(images)):
739
+
740
+ imagePil = images[i].convert("RGB").resize((w, h), resample=PIL.Image.Resampling.LANCZOS)
741
+ image = np.array(imagePil).astype(np.float32) / 255.0
742
+ data.append(image)
743
+
744
+ elif(isinstance(images, torch.Tensor)):
745
+ for i in range(images.shape[0]):
746
+
747
+ imagePil = utils.process_image(images[i], w, h)
748
+ image = np.array(imagePil).astype(np.float32) / 255.0
749
+ data.append(image)
750
+
751
+
752
+ self.stim_data[self.subject] = np.moveaxis(np.array(data), 3, 1)
753
+
754
+ gnet8j_image_pred = self.gnet8j_predictions(self.stim_data, self._pred_fn, 64, 192, self.joined_checkpoint, mask, batch_size=100, device=self.device)
755
+
756
+ return torch.from_numpy(gnet8j_image_pred[self.subject])
recon_inference-multisession-simple.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
recon_inference-multisession.ipynb ADDED
@@ -0,0 +1,1689 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 1,
6
+ "id": "f16c9d4c-66cb-4692-a61d-9aa86a8765d0",
7
+ "metadata": {
8
+ "tags": []
9
+ },
10
+ "outputs": [
11
+ {
12
+ "name": "stdout",
13
+ "output_type": "stream",
14
+ "text": [
15
+ "importing modules\n"
16
+ ]
17
+ }
18
+ ],
19
+ "source": [
20
+ "print(\"importing modules\")\n",
21
+ "import os\n",
22
+ "import sys\n",
23
+ "import json\n",
24
+ "import argparse\n",
25
+ "import numpy as np\n",
26
+ "import time\n",
27
+ "import random\n",
28
+ "import string\n",
29
+ "import h5py\n",
30
+ "from tqdm import tqdm\n",
31
+ "import webdataset as wds\n",
32
+ "from PIL import Image\n",
33
+ "import pandas as pd\n",
34
+ "import nibabel as nib\n",
35
+ "import nilearn\n",
36
+ "\n",
37
+ "import matplotlib.pyplot as plt\n",
38
+ "import torch\n",
39
+ "import torch.nn as nn\n",
40
+ "from torchvision import transforms\n",
41
+ "\n",
42
+ "# tf32 data type is faster than standard float32\n",
43
+ "torch.backends.cuda.matmul.allow_tf32 = True\n",
44
+ "\n",
45
+ "import utils\n",
46
+ "from utils import load_preprocess_betas, resample, applyxfm, apply_thresh, resample_betas\n",
47
+ "\n",
48
+ "# this block imports utils from mindeye_preproc as \"preproc\"\n",
49
+ "import importlib.util\n",
50
+ "parent_utils_path = \"/home/ri4541/mindeye_preproc/analysis/utils.py\"\n",
51
+ "spec = importlib.util.spec_from_file_location(\"utils\", parent_utils_path)\n",
52
+ "preproc = importlib.util.module_from_spec(spec)\n",
53
+ "parent_dir = os.path.dirname(parent_utils_path)\n",
54
+ "if parent_dir not in sys.path:\n",
55
+ " sys.path.append(parent_dir)\n",
56
+ "spec.loader.exec_module(preproc)\n",
57
+ "\n",
58
+ "if utils.is_interactive():\n",
59
+ " from IPython.display import clear_output # function to clear print outputs in cell\n",
60
+ " %load_ext autoreload \n",
61
+ " # this allows you to change functions in models.py or utils.py and have this notebook automatically update with your revisions\n",
62
+ " %autoreload 2 "
63
+ ]
64
+ },
65
+ {
66
+ "cell_type": "code",
67
+ "execution_count": 2,
68
+ "id": "33a4a539-7c94-4447-b3a4-9208c6af7920",
69
+ "metadata": {},
70
+ "outputs": [
71
+ {
72
+ "name": "stdout",
73
+ "output_type": "stream",
74
+ "text": [
75
+ "LOCAL RANK 0\n",
76
+ "device: cuda\n"
77
+ ]
78
+ }
79
+ ],
80
+ "source": [
81
+ "from accelerate import Accelerator, DeepSpeedPlugin\n",
82
+ "from generative_models.sgm.models.diffusion import DiffusionEngine\n",
83
+ "from omegaconf import OmegaConf\n",
84
+ "\n",
85
+ "import os\n",
86
+ "### Multi-GPU config ###\n",
87
+ "local_rank = os.getenv('RANK')\n",
88
+ "if local_rank is None: \n",
89
+ " local_rank = 0\n",
90
+ "else:\n",
91
+ " local_rank = int(local_rank)\n",
92
+ "print(\"LOCAL RANK \", local_rank) \n",
93
+ "\n",
94
+ "accelerator = Accelerator(split_batches=False, mixed_precision=\"fp16\")\n",
95
+ "device = accelerator.device\n",
96
+ "print(\"device:\",device)"
97
+ ]
98
+ },
99
+ {
100
+ "cell_type": "markdown",
101
+ "id": "7d2d8de1-d0ca-4b5f-84d8-2560f0399a5a",
102
+ "metadata": {},
103
+ "source": [
104
+ "# Data"
105
+ ]
106
+ },
107
+ {
108
+ "cell_type": "markdown",
109
+ "id": "84c47b5b-869f-468c-bb93-43610ee5dbe0",
110
+ "metadata": {},
111
+ "source": [
112
+ "## New Design"
113
+ ]
114
+ },
115
+ {
116
+ "cell_type": "code",
117
+ "execution_count": 3,
118
+ "id": "69037852-cdbd-4eac-a720-3fca5dc48a61",
119
+ "metadata": {},
120
+ "outputs": [],
121
+ "source": [
122
+ "if utils.is_interactive():\n",
123
+ " sub = \"sub-005\"\n",
124
+ " session = \"ses-03\"\n",
125
+ " task = 'C' # 'study' or 'A'; used to search for functional run in bids format\n",
126
+ "else:\n",
127
+ " sub = os.environ[\"sub\"]\n",
128
+ " session = os.environ[\"session\"]\n",
129
+ " task = os.environ[\"task\"]\n",
130
+ "\n",
131
+ "if session == \"all\":\n",
132
+ " ses_list = [\"ses-01\", \"ses-02\"] # list of actual session IDs\n",
133
+ " design_ses_list = [\"ses-01\", \"ses-02\"] # list of session IDs to search for design matrix\n",
134
+ "else:\n",
135
+ " ses_list = [session]\n",
136
+ " design_ses_list = [session]\n",
137
+ " \n",
138
+ "task_name = f\"_task-{task}\" if task != 'study' else ''\n",
139
+ "resample_voxel_size = False\n",
140
+ "resample_post_glmsingle = False # do you want to do voxel resampling here? if resample_voxel_size = True and resample_post_glmsingle = False, assume the resampling has been done prior to GLMsingle, so just use resampled directory but otherwise proceed as normal\n",
141
+ "load_from_resampled_file = False # do you want to load resampled data from file? if True, assume resampling was done in this notebook before, and that we're not using the GLMsingle resampled data\n",
142
+ " \n",
143
+ "train_test_split = 'MST' # 'MST', 'orig', 'unique'\n",
144
+ "remove_close_to_MST = False\n",
145
+ "remove_random_n = False\n",
146
+ "\n",
147
+ "if remove_close_to_MST or remove_random_n:\n",
148
+ " assert remove_close_to_MST != remove_random_n # don't remove both sets of images\n",
149
+ "\n",
150
+ "n_to_remove = 0\n",
151
+ "if remove_random_n:\n",
152
+ " assert train_test_split == 'MST' # MST images are excluded from the n images removed, so only makes sense if they're not in the training set\n",
153
+ " n_to_remove = 150\n",
154
+ " \n",
155
+ "if resample_voxel_size:\n",
156
+ " # voxel size was unchanged in glmsingle, want to perform resampling here\n",
157
+ " resampled_vox_size = 2.5\n",
158
+ " resample_method = \"sinc\" # {trilinear,nearestneighbour,sinc,spline}, credit: https://johnmuschelli.com/fslr/reference/flirt.help.html\n",
159
+ " \n",
160
+ " # file name helper variables\n",
161
+ " vox_dim_str = str(resampled_vox_size).replace('.', '_') # in case the voxel size has a decimal, replace with an underscore\n",
162
+ " resampled_suffix = f\"resampled_{vox_dim_str}mm_{resample_method}\"\n",
163
+ " mask_resampled_suffix = resampled_suffix\n",
164
+ " if resample_post_glmsingle:\n",
165
+ " resampled_suffix += '_postglmsingle'\n",
166
+ " else:\n",
167
+ " resampled_suffix += '_preglmsingle'"
168
+ ]
169
+ },
170
+ {
171
+ "cell_type": "code",
172
+ "execution_count": 4,
173
+ "id": "2ece766e-4272-4ca3-81e9-9ea5dccd2279",
174
+ "metadata": {},
175
+ "outputs": [
176
+ {
177
+ "name": "stdout",
178
+ "output_type": "stream",
179
+ "text": [
180
+ "session label: ses-03\n"
181
+ ]
182
+ }
183
+ ],
184
+ "source": [
185
+ "session_label = preproc.get_session_label(ses_list)\n",
186
+ "print('session label:', session_label)\n",
187
+ "n_runs, _ = preproc.get_runs_per_session(sub, session, ses_list)"
188
+ ]
189
+ },
190
+ {
191
+ "cell_type": "code",
192
+ "execution_count": 6,
193
+ "id": "e52985b1-95ff-487b-8b2d-cc1ad1c190b8",
194
+ "metadata": {
195
+ "tags": []
196
+ },
197
+ "outputs": [
198
+ {
199
+ "name": "stdout",
200
+ "output_type": "stream",
201
+ "text": [
202
+ "model_name: sub-005_all_task-C_bs24_MST_rishab_MSTsplit_union_mask_finetune_0\n",
203
+ "glmsingle_path: /scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/glmsingle_sub-005_ses-03_task-C\n",
204
+ "glmsingle path exists!\n",
205
+ "--data_path=/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2 --glmsingle_path=/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/glmsingle_sub-005_ses-03_task-C --model_name=sub-005_all_task-C_bs24_MST_rishab_MSTsplit_union_mask_finetune_0 --subj=1 --no-blurry_recon --use_prior --hidden_dim=1024 --n_blocks=4\n",
206
+ "The autoreload extension is already loaded. To reload it, use:\n",
207
+ " %reload_ext autoreload\n"
208
+ ]
209
+ }
210
+ ],
211
+ "source": [
212
+ "# if running this interactively, can specify jupyter_args here for argparser to use\n",
213
+ "if utils.is_interactive():\n",
214
+ " # model_name=f\"{sub}_{session}_task-{task}_bs24_MST_rishab_{train_test_split}split\"\n",
215
+ " model_name = \"sub-005_all_task-C_bs24_MST_rishab_MSTsplit_union_mask_finetune_0\"\n",
216
+ " print(\"model_name:\", model_name)\n",
217
+ " glmsingle_path = f\"/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/glmsingle_{sub}_{session_label}_task-{task}\"\n",
218
+ " print(\"glmsingle_path:\", glmsingle_path)\n",
219
+ " assert os.path.exists(glmsingle_path)\n",
220
+ " print(\"glmsingle path exists!\")\n",
221
+ " # global_batch_size and batch_size should already be defined in the above cells\n",
222
+ " # other variables can be specified in the following string:\n",
223
+ " jupyter_args = f\"--data_path=/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2 \\\n",
224
+ " --glmsingle_path={glmsingle_path} \\\n",
225
+ " --model_name={model_name} --subj=1 \\\n",
226
+ " --no-blurry_recon --use_prior \\\n",
227
+ " --hidden_dim=1024 --n_blocks=4\"\n",
228
+ " \n",
229
+ " print(jupyter_args)\n",
230
+ " jupyter_args = jupyter_args.split()\n",
231
+ " \n",
232
+ " from IPython.display import clear_output # function to clear print outputs in cell\n",
233
+ " %load_ext autoreload \n",
234
+ " # this allows you to change functions in models.py or utils.py and have this notebook automatically update with your revisions\n",
235
+ " %autoreload 2 "
236
+ ]
237
+ },
238
+ {
239
+ "cell_type": "code",
240
+ "execution_count": 7,
241
+ "id": "49e5dae4-606d-4dc6-b420-df9e4c14737e",
242
+ "metadata": {
243
+ "tags": []
244
+ },
245
+ "outputs": [],
246
+ "source": [
247
+ "parser = argparse.ArgumentParser(description=\"Model Training Configuration\")\n",
248
+ "parser.add_argument(\n",
249
+ " \"--model_name\", type=str, default=\"testing\",\n",
250
+ " help=\"will load ckpt for model found in ../train_logs/model_name\",\n",
251
+ ")\n",
252
+ "parser.add_argument(\n",
253
+ " \"--data_path\", type=str, default=\"/weka/proj-fmri/shared/mindeyev2_dataset\",\n",
254
+ " help=\"Path to where NSD data is stored / where to download it to\",\n",
255
+ ")\n",
256
+ "parser.add_argument(\n",
257
+ " \"--subj\",type=int, default=1, choices=[1,2,3,4,5,6,7,8],\n",
258
+ " help=\"Validate on which subject?\",\n",
259
+ ")\n",
260
+ "parser.add_argument(\n",
261
+ " \"--blurry_recon\",action=argparse.BooleanOptionalAction,default=True,\n",
262
+ ")\n",
263
+ "parser.add_argument(\n",
264
+ " \"--use_prior\",action=argparse.BooleanOptionalAction,default=False,\n",
265
+ " help=\"whether to train diffusion prior (True) or just rely on retrieval part of the pipeline (False)\",\n",
266
+ ")\n",
267
+ "parser.add_argument(\n",
268
+ " \"--clip_scale\",type=float,default=1.,\n",
269
+ ")\n",
270
+ "parser.add_argument(\n",
271
+ " \"--n_blocks\",type=int,default=4,\n",
272
+ ")\n",
273
+ "parser.add_argument(\n",
274
+ " \"--hidden_dim\",type=int,default=2048,\n",
275
+ ")\n",
276
+ "parser.add_argument(\n",
277
+ " \"--new_test\",action=argparse.BooleanOptionalAction,default=True,\n",
278
+ ")\n",
279
+ "parser.add_argument(\n",
280
+ " \"--seq_len\",type=int,default=1,\n",
281
+ ")\n",
282
+ "parser.add_argument(\n",
283
+ " \"--seed\",type=int,default=42,\n",
284
+ ")\n",
285
+ "parser.add_argument(\n",
286
+ " \"--glmsingle_path\",type=str,default=\"/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/glmsingle_ses-01\",\n",
287
+ ")\n",
288
+ "if utils.is_interactive():\n",
289
+ " args = parser.parse_args(jupyter_args)\n",
290
+ "else:\n",
291
+ " args = parser.parse_args()\n",
292
+ "\n",
293
+ "# create global variables without the args prefix\n",
294
+ "for attribute_name in vars(args).keys():\n",
295
+ " globals()[attribute_name] = getattr(args, attribute_name)\n",
296
+ " \n",
297
+ "# make output directory\n",
298
+ "# os.makedirs(\"evals\",exist_ok=True)\n",
299
+ "# os.makedirs(f\"evals/{model_name}\",exist_ok=True)"
300
+ ]
301
+ },
302
+ {
303
+ "cell_type": "code",
304
+ "execution_count": 8,
305
+ "id": "34c1e0c6-0641-4239-8201-f2c676532302",
306
+ "metadata": {},
307
+ "outputs": [
308
+ {
309
+ "name": "stdout",
310
+ "output_type": "stream",
311
+ "text": [
312
+ "csv/sub-005_ses-03.csv\n",
313
+ "(785, 126)\n",
314
+ "len_unique_images 532\n",
315
+ "n_runs 11\n",
316
+ "['all_stimuli/unchosen_nsd_1000_images/unchosen_7211_cocoid_59250.png'\n",
317
+ " 'all_stimuli/special515/special_67295.jpg'\n",
318
+ " 'all_stimuli/unchosen_nsd_1000_images/unchosen_5729_cocoid_53029.png'\n",
319
+ " 'all_stimuli/special515/special_70232.jpg']\n",
320
+ "[174.7109683 178.7049172 182.7072832 186.7297016]\n",
321
+ "[0. 0. 0. 0.]\n",
322
+ "(693,)\n"
323
+ ]
324
+ }
325
+ ],
326
+ "source": [
327
+ "if session == \"all\":\n",
328
+ " filename = f\"csv/{sub}_{ses_list[0]}.csv\"\n",
329
+ " data = pd.read_csv(filename)[14:]\n",
330
+ " print(filename)\n",
331
+ " print(data.shape)\n",
332
+ " for s in ses_list[1:]:\n",
333
+ " filename = f\"csv/{sub}_{s}.csv\"\n",
334
+ " print(filename)\n",
335
+ " data = pd.concat([data, pd.read_csv(filename)[14:]])\n",
336
+ " print(data.shape)\n",
337
+ "else:\n",
338
+ " filename = f\"csv/{sub}_{session}.csv\"\n",
339
+ " if sub == 'sub-001' and session == 'ses-01':\n",
340
+ " data = pd.read_csv(filename)[23:]\n",
341
+ " else: \n",
342
+ " data = pd.read_csv(filename)[14:]\n",
343
+ " print(filename)\n",
344
+ " print(data.shape)\n",
345
+ "\n",
346
+ "image_names = data['current_image'].values\n",
347
+ "starts = data['trial.started'].values\n",
348
+ "is_new_run = data['is_new_run'].values\n",
349
+ "\n",
350
+ "if sub == 'sub-001':\n",
351
+ " if session == 'ses-01':\n",
352
+ " assert image_names[0] == 'images/image_686_seed_1.png'\n",
353
+ " elif session in ('ses-02', 'all'):\n",
354
+ " assert image_names[0] == 'all_stimuli/special515/special_40840.jpg'\n",
355
+ " elif session == 'ses-03':\n",
356
+ " assert image_names[0] == 'all_stimuli/special515/special_69839.jpg'\n",
357
+ " elif session == 'ses-04':\n",
358
+ " assert image_names[0] == 'all_stimuli/rtmindeye_stimuli/image_686_seed_1.png'\n",
359
+ "elif sub == 'sub-003':\n",
360
+ " assert image_names[0] == 'all_stimuli/rtmindeye_stimuli/image_686_seed_1.png'\n",
361
+ "\n",
362
+ "unique_images = np.unique(image_names.astype(str))\n",
363
+ "unique_images = unique_images[(unique_images!=\"nan\")]\n",
364
+ "# unique_images = unique_images[(unique_images!=\"blank.jpg\")]\n",
365
+ "len_unique_images = len(unique_images)\n",
366
+ "print(\"len_unique_images\",len_unique_images)\n",
367
+ "print(\"n_runs\",n_runs)\n",
368
+ "\n",
369
+ "if (sub == 'sub-001' and session == 'ses-04') or (sub == 'sub-003' and session == 'ses-01'):\n",
370
+ " assert len(unique_images) == 851\n",
371
+ "\n",
372
+ "print(image_names[:4])\n",
373
+ "print(starts[:4])\n",
374
+ "print(is_new_run[:4])\n",
375
+ "\n",
376
+ "if remove_random_n:\n",
377
+ " # want to remove 150 imgs\n",
378
+ " # 100 special515 imgs are repeated 3x (300 total)\n",
379
+ " # all other train imgs are only shown once (558 total)\n",
380
+ " # of the 150, want to sample proportionally since we're cutting all repeats for special515\n",
381
+ " # so take out 51 (17 unique) from special515 and 99 from rest = removing 150 total\n",
382
+ " np.random.seed(seed)\n",
383
+ " options_to_remove = [x for x in set(image_names) if str(x) != 'nan' and x != 'blank.jpg' and 'MST_pairs' not in x and 'special515' not in x and list(image_names).count(x)==1] # all the imgs that only appear once (this is O(N^2) b/c of count() within list comprehension but image_names is a relatively small list)\n",
384
+ " options_to_remove_special515 = [x for x in set(image_names) if str(x) != 'nan' and x != 'blank.jpg' and 'MST_pairs' not in x and 'special515' in x and list(image_names).count(x)>1] # all the special515 images that are repeated (count()>1 necessary because there are special515 that are not repeated)\n",
385
+ " imgs_to_remove = np.random.choice(options_to_remove, size=99, replace=False)\n",
386
+ " imgs_to_remove = np.append(imgs_to_remove, np.random.choice(options_to_remove_special515, size=17, replace=False))\n",
387
+ "\n",
388
+ "image_idx = np.array([]) # contains the unique index of each presented image\n",
389
+ "vox_image_names = np.array([]) # contains the names of the images corresponding to image_idx\n",
390
+ "all_MST_images = dict()\n",
391
+ "for i, im in enumerate(image_names):\n",
392
+ " # skip if blank, nan\n",
393
+ " if im == \"blank.jpg\":\n",
394
+ " i+=1\n",
395
+ " continue\n",
396
+ " if str(im) == \"nan\":\n",
397
+ " i+=1\n",
398
+ " continue\n",
399
+ " vox_image_names = np.append(vox_image_names, im)\n",
400
+ " if remove_close_to_MST: # optionally skip close_to_MST images \n",
401
+ " if \"closest_pairs\" in im:\n",
402
+ " i+=1\n",
403
+ " continue\n",
404
+ " elif remove_random_n:\n",
405
+ " if im in imgs_to_remove:\n",
406
+ " i+=1\n",
407
+ " continue\n",
408
+ " \n",
409
+ " image_idx_ = np.where(im==unique_images)[0].item()\n",
410
+ " image_idx = np.append(image_idx, image_idx_)\n",
411
+ " \n",
412
+ " if (sub == 'sub-001' and session == 'ses-04') or (sub == 'sub-003' and session == 'ses-01'): # MST images are ones that matched these image titles\n",
413
+ " import re\n",
414
+ " if ('w_' in im or 'paired_image_' in im or re.match(r'all_stimuli/rtmindeye_stimuli/\\d{1,2}_\\d{1,3}\\.png$', im) or re.match(r'images/\\d{1,2}_\\d{1,3}\\.png$', im)): \n",
415
+ " # the regexp here looks for **_***.png, allows 1-2 chars before underscore and 1-3 chars after it\n",
416
+ " # print(im)\n",
417
+ " all_MST_images[i] = im\n",
418
+ " i+=1 \n",
419
+ " elif 'MST' in im:\n",
420
+ " all_MST_images[i] = im\n",
421
+ " i+=1\n",
422
+ " \n",
423
+ "image_idx = torch.Tensor(image_idx).long()\n",
424
+ "# for im in new_image_names[MST_images]:\n",
425
+ "# assert 'MST_pairs' in im\n",
426
+ "# assert len(all_MST_images) == 300\n",
427
+ "\n",
428
+ "unique_MST_images = np.unique(list(all_MST_images.values())) \n",
429
+ "\n",
430
+ "MST_ID = np.array([], dtype=int)\n",
431
+ "if remove_close_to_MST:\n",
432
+ " close_to_MST_idx = np.array([], dtype=int)\n",
433
+ "if remove_random_n:\n",
434
+ " random_n_idx = np.array([], dtype=int)\n",
435
+ "\n",
436
+ "vox_idx = np.array([], dtype=int)\n",
437
+ "j=0 # this is a counter keeping track of the remove_random_n used later to index vox based on the removed images; unused otherwise\n",
438
+ "for i, im in enumerate(image_names): # need unique_MST_images to be defined, so repeating the same loop structure\n",
439
+ " # skip if blank, nan\n",
440
+ " if im == \"blank.jpg\":\n",
441
+ " i+=1\n",
442
+ " continue\n",
443
+ " if str(im) == \"nan\":\n",
444
+ " i+=1\n",
445
+ " continue\n",
446
+ " if remove_close_to_MST: # optionally skip close_to_MST images \n",
447
+ " if \"closest_pairs\" in im:\n",
448
+ " close_to_MST_idx = np.append(close_to_MST_idx, i)\n",
449
+ " i+=1\n",
450
+ " continue\n",
451
+ " if remove_random_n:\n",
452
+ " if im in imgs_to_remove:\n",
453
+ " vox_idx = np.append(vox_idx, j)\n",
454
+ " i+=1\n",
455
+ " j+=1\n",
456
+ " continue\n",
457
+ " j+=1\n",
458
+ " curr = np.where(im == unique_MST_images)\n",
459
+ " # print(curr)\n",
460
+ " if curr[0].size == 0:\n",
461
+ " MST_ID = np.append(MST_ID, np.array(len(unique_MST_images))) # add a value that should be out of range based on the for loop, will index it out later\n",
462
+ " else:\n",
463
+ " MST_ID = np.append(MST_ID, curr)\n",
464
+ " \n",
465
+ "assert len(MST_ID) == len(image_idx)\n",
466
+ "# assert len(np.argwhere(pd.isna(data['current_image']))) + len(np.argwhere(data['current_image'] == 'blank.jpg')) + len(image_idx) == len(data)\n",
467
+ "# MST_ID = torch.tensor(MST_ID[MST_ID != len(unique_MST_images)], dtype=torch.uint8) # torch.tensor (lowercase) allows dtype kwarg, Tensor (uppercase) is an alias for torch.FloatTensor\n",
468
+ "print(MST_ID.shape)\n",
469
+ "if (sub == 'sub-001' and session == 'ses-04') or (sub == 'sub-003' and session == 'ses-01'):\n",
470
+ " assert len(all_MST_images) == 100"
471
+ ]
472
+ },
473
+ {
474
+ "cell_type": "code",
475
+ "execution_count": 9,
476
+ "id": "dd08fa34-ebd0-482a-bc29-8fb32c8b888b",
477
+ "metadata": {},
478
+ "outputs": [],
479
+ "source": [
480
+ "# unique_images_pairs = [\n",
481
+ "# (1,2),(3,4),(5,6),(7,8),(9,10),(11,12),(13,14),(15,16),\n",
482
+ "# (17,18),(19,20),(21,22),(23,24),(25,26),(27,28),(29,30),\n",
483
+ "# (31,32),(33,34),(35,36),\n",
484
+ "# (787, 788), (789, 790), (791, 792), (793, 794), (795, 796),\n",
485
+ "# (797, 798), (799, 800), (801, 802), (803, 804), (805, 806),\n",
486
+ "# (807, 808), (809, 810), (811, 812), (813, 814), (815, 816),\n",
487
+ "# (817, 818), (819, 820), (821, 822), (823, 824), (825, 826),\n",
488
+ "# (827, 828), (829, 830), (831, 832), (833, 834), (835, 836),\n",
489
+ "# (837, 838), (839, 840), (841, 842), (843, 844), (845, 846),\n",
490
+ "# (847, 848), (849, 850)\n",
491
+ "# ]\n",
492
+ "# unique_images[unique_images_pairs]"
493
+ ]
494
+ },
495
+ {
496
+ "cell_type": "code",
497
+ "execution_count": 10,
498
+ "id": "59bc3b21-e29d-4d2b-8223-cd704e3f058a",
499
+ "metadata": {
500
+ "tags": []
501
+ },
502
+ "outputs": [
503
+ {
504
+ "name": "stderr",
505
+ "output_type": "stream",
506
+ "text": [
507
+ " 0%| | 1/693 [00:00<03:38, 3.16it/s]/home/ri4541/.conda/envs/rt_mindEye2/lib/python3.11/site-packages/torchvision/transforms/functional.py:1603: UserWarning: The default value of the antialias parameter of all the resizing transforms (Resize(), RandomResizedCrop(), etc.) will change from None to True in v0.17, in order to be consistent across the PIL and Tensor backends. To suppress this warning, directly pass antialias=True (recommended, future default), antialias=None (current default, which means False for Tensors and True for PIL), or antialias=False (only works on Tensors - PIL will still use antialiasing). This also applies if you are using the inference transforms from the models weights: update the call to weights.transforms(antialias=True).\n",
508
+ " warnings.warn(\n",
509
+ "100%|██████████| 693/693 [00:33<00:00, 20.78it/s]"
510
+ ]
511
+ },
512
+ {
513
+ "name": "stdout",
514
+ "output_type": "stream",
515
+ "text": [
516
+ "images torch.Size([693, 3, 224, 224])\n",
517
+ "MST_images 693\n",
518
+ "MST_images==True 124\n"
519
+ ]
520
+ },
521
+ {
522
+ "name": "stderr",
523
+ "output_type": "stream",
524
+ "text": [
525
+ "\n"
526
+ ]
527
+ }
528
+ ],
529
+ "source": [
530
+ "import imageio.v2 as imageio\n",
531
+ "resize_transform = transforms.Resize((224, 224))\n",
532
+ "MST_images = []\n",
533
+ "images = None\n",
534
+ "for im_name in tqdm(image_idx):\n",
535
+ " if sub == 'sub-001' and session == 'ses-01':\n",
536
+ " image_file = f\"all_stimuli/rtmindeye_stimuli/{unique_images[im_name]}\"\n",
537
+ " else:\n",
538
+ " image_file = f\"{unique_images[im_name]}\"\n",
539
+ " im = imageio.imread(image_file)\n",
540
+ " im = torch.Tensor(im / 255).permute(2,0,1)\n",
541
+ " im = resize_transform(im.unsqueeze(0))\n",
542
+ " if images is None:\n",
543
+ " images = im\n",
544
+ " else:\n",
545
+ " images = torch.vstack((images, im))\n",
546
+ " if (sub == 'sub-001' and session == 'ses-04') or (sub == 'sub-003' and session == 'ses-01'):\n",
547
+ " if ('w_' in image_file or 'paired_image_' in image_file or re.match(r'all_stimuli/rtmindeye_stimuli/\\d{1,2}_\\d{1,3}\\.png$', image_file) or re.match(r'all_stimuli/rtmindeye_stimuli/images/\\d{1,2}_\\d{1,3}\\.png$', image_file)): \n",
548
+ " MST_images.append(True)\n",
549
+ " else:\n",
550
+ " MST_images.append(False)\n",
551
+ " else: \n",
552
+ " if (\"MST_pairs\" in image_file): # (\"_seed_\" not in unique_images[im_name]) and (unique_images[im_name] != \"blank.jpg\") \n",
553
+ " MST_images.append(True)\n",
554
+ " else:\n",
555
+ " MST_images.append(False)\n",
556
+ "\n",
557
+ "print(\"images\", images.shape)\n",
558
+ "MST_images = np.array(MST_images)\n",
559
+ "print(\"MST_images\", len(MST_images))\n",
560
+ "if (sub == 'sub-001' and session == 'ses-04') or (sub == 'sub-003' and session == 'ses-01'):\n",
561
+ " assert len(MST_images[MST_images==True]) == 100\n",
562
+ "print(\"MST_images==True\", len(MST_images[MST_images==True]))"
563
+ ]
564
+ },
565
+ {
566
+ "cell_type": "code",
567
+ "execution_count": 11,
568
+ "id": "6f440a02-dd8a-4a13-9c90-bd07253f6910",
569
+ "metadata": {},
570
+ "outputs": [],
571
+ "source": [
572
+ "pairs = utils.find_paired_indices(image_idx)\n",
573
+ "pairs = sorted(pairs, key=lambda x: x[0])"
574
+ ]
575
+ },
576
+ {
577
+ "cell_type": "code",
578
+ "execution_count": 12,
579
+ "id": "c5f61515-d4fa-419b-b945-cdedc8f24669",
580
+ "metadata": {
581
+ "tags": []
582
+ },
583
+ "outputs": [
584
+ {
585
+ "name": "stdout",
586
+ "output_type": "stream",
587
+ "text": [
588
+ "vox (693, 1, 1, 183408)\n",
589
+ "vox (693, 183408)\n"
590
+ ]
591
+ }
592
+ ],
593
+ "source": [
594
+ "vox = None\n",
595
+ "needs_postprocessing = False\n",
596
+ "params = (session, ses_list, remove_close_to_MST, image_names, remove_random_n, vox_idx)\n",
597
+ "\n",
598
+ "if resample_post_glmsingle == True:\n",
599
+ " glm_save_path_resampled = f\"{glmsingle_path}/vox_resampled.nii.gz\"\n",
600
+ " if load_from_resampled_file == True:\n",
601
+ " # resampling was done in this notebook so we can load from file\n",
602
+ " vox = nib.load(glm_save_path_resampled)\n",
603
+ " else:\n",
604
+ " # do resampling here\n",
605
+ " assert os.path.exists(ref_name) and os.path.exists(omat_name), \"need to generate the boldref and omat separately since we don't have access to the functional data here; either do so using flirt on the command line or copy over the glmsingle resampled outputs\"\n",
606
+ " vox = load_preprocess_betas(orig_glmsingle_path, *params)\n",
607
+ " vox = resample_betas(orig_glmsingle_path, sub, session, task_name, vox, glmsingle_path, glm_save_path_resampled, ref_name, omat_name)\n",
608
+ " needs_postprocessing = True\n",
609
+ "\n",
610
+ "if vox is None:\n",
611
+ " # either resampling was done in glmsingle or we aren't resampling \n",
612
+ " vox = load_preprocess_betas(glmsingle_path, *params)\n",
613
+ "\n",
614
+ "if needs_postprocessing == True:\n",
615
+ " vox = apply_mask(vox, avg_mask)\n",
616
+ " vox = vox.reshape(-1, vox.shape[-1]) # flatten the 3D image into np array with shape (voxels, images)\n",
617
+ " print(vox.shape)\n",
618
+ "\n",
619
+ "assert len(vox) == len(image_idx)"
620
+ ]
621
+ },
622
+ {
623
+ "cell_type": "code",
624
+ "execution_count": 13,
625
+ "id": "a4675ba2-b27c-48db-893c-d81f978ba93b",
626
+ "metadata": {},
627
+ "outputs": [
628
+ {
629
+ "name": "stdout",
630
+ "output_type": "stream",
631
+ "text": [
632
+ "/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/glmsingle_sub-005_ses-03_task-C/sub-005_ses-03_task-C_brain.nii.gz\n",
633
+ "Mask dimensions: (2.0, 2.0, 2.0)\n",
634
+ "\n",
635
+ "Affine:\n",
636
+ "[[ 2. 0. 0. -76.29234314]\n",
637
+ " [ 0. 2. 0. -84.79180908]\n",
638
+ " [ 0. 0. 2. -62.80359268]\n",
639
+ " [ 0. 0. 0. 1. ]]\n",
640
+ "\n",
641
+ "There are 183408 voxels in the included brain mask\n",
642
+ "\n"
643
+ ]
644
+ }
645
+ ],
646
+ "source": [
647
+ "from nilearn.plotting import plot_roi, plot_anat, plot_epi\n",
648
+ "\n",
649
+ "mask_name = f'{glmsingle_path}/{sub}_{session_label}{task_name}_brain'\n",
650
+ "if resample_voxel_size:\n",
651
+ " if resample_post_glmsingle is True:\n",
652
+ " # use original mask directory\n",
653
+ " mask_in_name = f'{orig_glmsingle_path}/{sub}_{session}{task_name}_brain.nii.gz'\n",
654
+ " mask_out_name = mask_name + f\"_{mask_resampled_suffix}.nii.gz\"\n",
655
+ " assert os.path.exists(mask_in_name)\n",
656
+ " applyxfm(mask_in_name, ref_name, omat_name, resample_method, output=mask_out_name)\n",
657
+ " apply_thresh(mask_out_name, 0.5, output=mask_out_name) # binarize the mask since resampling can result in non- 0 or 1 values\n",
658
+ " mask_name += f\"_{mask_resampled_suffix}\"\n",
659
+ "\n",
660
+ "mask_name += \".nii.gz\"\n",
661
+ "print(mask_name)\n",
662
+ "avg_mask = nib.load(mask_name)\n",
663
+ "# mask info\n",
664
+ "dimsize=avg_mask.header.get_zooms()\n",
665
+ "affine_mat = avg_mask.affine\n",
666
+ "brain=avg_mask.get_fdata()\n",
667
+ "xyz=brain.shape #xyz dimensionality of brain mask and epi data\n",
668
+ "\n",
669
+ "print('Mask dimensions:', dimsize)\n",
670
+ "print('')\n",
671
+ "print('Affine:')\n",
672
+ "print(affine_mat)\n",
673
+ "print('')\n",
674
+ "print(f'There are {int(np.sum(brain))} voxels in the included brain mask\\n')"
675
+ ]
676
+ },
677
+ {
678
+ "cell_type": "code",
679
+ "execution_count": 14,
680
+ "id": "8a5573cf-19b5-40e6-b21c-883e762f5f35",
681
+ "metadata": {},
682
+ "outputs": [
683
+ {
684
+ "name": "stdout",
685
+ "output_type": "stream",
686
+ "text": [
687
+ "/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/glmsingle_sub-005_ses-03_task-C/sub-005_ses-03_task-C_nsdgeneral.nii.gz\n",
688
+ "nsdgeneral path exists!\n"
689
+ ]
690
+ }
691
+ ],
692
+ "source": [
693
+ "nsdgeneral_path = f'{glmsingle_path}/{sub}_{session_label}{task_name}_nsdgeneral.nii.gz' \n",
694
+ "print(nsdgeneral_path)\n",
695
+ "assert os.path.exists(nsdgeneral_path)\n",
696
+ "print(f\"nsdgeneral path exists!\")"
697
+ ]
698
+ },
699
+ {
700
+ "cell_type": "code",
701
+ "execution_count": 15,
702
+ "id": "b940e5dc-ac25-4f48-9764-6030cf18ff1e",
703
+ "metadata": {},
704
+ "outputs": [],
705
+ "source": [
706
+ "if resample_voxel_size:\n",
707
+ " nsdgeneral_path = f'{glmsingle_path}/{sub}_task-{task}_nsdgeneral_resampled.nii.gz' \n",
708
+ " if resample_post_glmsingle:\n",
709
+ " assert os.path.exists(orig_glmsingle_path)\n",
710
+ " roi_in_path = f\"{orig_glmsingle_path}/{sub}_task-{task}_nsdgeneral.nii.gz\" # the input file is the original nsdgeneral mask (without resampling), from the original glmsingle directory\n",
711
+ " applyxfm(roi_in_path, ref_name, omat_name, resample_method, output=nsdgeneral_path)"
712
+ ]
713
+ },
714
+ {
715
+ "cell_type": "code",
716
+ "execution_count": 16,
717
+ "id": "a3187c14-13df-4e51-915c-bb866eec413f",
718
+ "metadata": {},
719
+ "outputs": [
720
+ {
721
+ "name": "stdout",
722
+ "output_type": "stream",
723
+ "text": [
724
+ "(76, 90, 74)\n"
725
+ ]
726
+ },
727
+ {
728
+ "data": {
729
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAqgAAAFyCAYAAAA59SiIAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjguMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8g+/7EAAAACXBIWXMAAA9hAAAPYQGoP6dpAAAek0lEQVR4nO3dfYxU9bkH8AcUHQqKBlxXoAU0Xo2CYn3DdqNNb1utbSyxNl6LLYgmjWijsdj6RjE12lrTGPpmrVotVVODUkwt2FRaX9aKSCuK1pfaCxFEXLFoRV2L7t4/vLsOZdedGWbm/M7M55NMMswMZ35z9pzZ7z7P+Z0zKCK6AwAAEjE46wEAAEAxARUAgKQIqAAAJEVABQAgKQIqAABJEVABAEiKgAoAQFIEVAAAkiKgAgCQFAEVAICkCKgAACRFQAUAICkCKgAASRFQAQBIioAKAEBSBFQAAJIioAIAkBQBFQCApAioAAAkRUAFACApAioAAEkRUAEASIqACgBAUgRUAACSIqACAJAUARUAgKQIqAAAJEVABQAgKQIqAABJEVABAEiKgAoAQFIEVAAAkiKgAgCQFAEVAICkCKgAACRFQAUAICkCKgAASRFQAQBIioAKAEBSBFQAAJIioAIAkBQBFQCApAioAAAkRUAFACApAioAAEkRUAEASIqACgBAUgRUAACSIqACAJAUARUAgKQIqAAAJEVABQAgKQIqAABJEVABAEiKgAoAQFIEVAAAkiKgAgCQFAEVAICkCKgAACRFQAUAICkCKgAASRFQAQBIioAKAEBSBFQAAJIioAIAkBQBFQCApAioAAAkRUAFACApAioAAEkRUAEASIqACgBAUgRUAKApFAqFKBQKWQ+DEgioAEDDKxQK0d7eHu3t7UJqDgioAAAkRUAFgERMnz49uru749BDD816KDSwnu2s57Zly5ZYt25d3HjjjTF69OishxcRETtmPQAAAOpvzpw5sXr16igUCjFlypSYMWNGtLW1xcSJE+Ptt9/OdGwCKgBAE1qyZEn85S9/iYiIG264ITZu3BgXXHBBnHDCCbFgwYJMx6bFDwBAPPDAAxERsc8++2Q8EgEVAICIGD9+fEREbNq0KduBhBY/AEBTGjFiRIwcOTIKhUIceeSRMXfu3Ojs7Iy77ror66EJqAAAzWjp0qVb/Xv16tVx6qmnxgsvvJDRiN4noAIANKFZs2bFs88+GyNGjIiZM2fG0Ucfnfns/R4CKgBAE1q+fHnvLP5FixZFe3t73HrrrbHffvvFG2+8kenYTJICAGhyXV1dceGFF8aYMWPi7LPPzno4AioAABH33XdfPPzww3HuuefGzjvvnOlYtPgBIDEzZ86M4447bpvH582bF5s3b85gRDSLq666Km6//faYMWNGXHvttZmNQ0AFgMTMmjWrz8dvuukmAZWaWrhwYTz33HMxe/bsuO6666KrqyuTcQyKiO5M3hkAoE4KhUK0t7dHRERbW1t0dnZmPCI+iAoqAJCkxYsXV21ZJ554YtWWRe2ZJAUAQFIEVAAAkuIY1AZXKBQiIhxrw3azLQGlqGZbvtpaWloiIqKjo6Nqyzz++OOrtizep4LawHoOCG9vb+8NF1AJ2xIA9SSgAgCQlMxn8U+fPj1uuummOOyww3qvBwvV1rOd9XjnnXfipZdeij/84Q9x8cUXx/r167MbXBNLtRWoZQelSXUfrqdS1oHvlPJlHlChnubMmROrV6+OQqEQU6ZMiRkzZkRbW1tMnDgx3n777ayHBwCEgEqTWbJkSW+l/oYbboiNGzfGBRdcECeccEIsWLAg49EBABECKk3ugQceiAsuuCD22WefrIfS8PLUCszTWKtB+5FyNNv+UQ0DrTP74LZMkqKpjR8/PiIiNm3alO1AAIBeKqg0lREjRsTIkSOjUCjEkUceGXPnzo3Ozs646667sh5arqmo5JtJHvTFfl0/fa3rZt/nBFSaytKlS7f69+rVq+PUU0+NF154IaMRAQD/SUClqcyaNSueffbZGDFiRMycOTOOPvpos/cBIDECKk1l+fLlvbP4Fy1aFO3t7XHrrbfGfvvtF2+88UbGo8sfLcDm0fOzbva2Y57YP/Ot+OfXjPudSVI0ra6urrjwwgtjzJgxcfbZZ2c9HADg/wmoNLX77rsvHn744Tj33HNj5513zno4AEAk1OKfOXNmHHfccds8Pm/evNi8eXMGI6JZXHXVVXH77bfHjBkz4tprr816OMlbuHBh1kMgQ/21jZuxBZkibf3G1Iyz/JMJqLNmzerz8ZtuuklApaYWLlwYzz33XMyePTuuu+666OrqynpIANDUBkVEd9aDoDYKhUK0t7dHRERbW1t0dnZmPCLyqnhb6ujoyHg0pKjRqzl5oYL6wVpaWiKi8b7HGnH/S6aCyntqdcLsStuyjbjRA9XX7DOOgeoySQoAgKRo8WeoHq2YWrQzVEeaz+LFixu2NUZ9+N6oLa390jTD91ij7GsqqAAAJEVABQAgKVr8dZBl66XW7YxGaSXwvv6212ZojZEG3yul0dYvX7N9j+V5X1JBBQAgKQIqAABJcR7UKmu2lstAnzfP7QUgG86pCtXRsy/lcT9SQQUAICkqqBVqtkpppVRC8sH2TKryXAECKqeCCgBAUgRUAACS4jyoJchz+zMP53zTustGudt1HrYlmk+zfX/k+fdRCnyP5WefUUEFACApAioAAEnR4i/SiK2TPLUz8tJ2yLtKt/M8bUs0p2b4DmnE31P15HtsaynvMyqoAAAkRUAFACApTtQfWiapcFJ/AKiflH/vqqACAJCUpq2gqpqmLeW/6vLI9k4zcFlUaBwqqAAAJEVABQAgKU3V4tfmrL6PTvpon4//ddVfq/Ye2naVsb1DY7AvUw+p/a5VQQUAICkCKgAASWmKFr/2SHX119Yv5zWVHAJgZv/AbOvQ/37gewPyQwUVAICkCKgAACSlYVv8Wp2VK6WFX8/36OtwAO3+rdneofHYr8lCKr9fVVABAEhKQ1VQU/1rs7haWM3zg1ZTPaqmtZLKX3tA2nxXQH6ooAIAkBQBFQCApOS+xZ9CW7+cy32m2u4vHkut2v31eA+AUqR2WcceKfxOgx5ZHhajggoAQFIEVAAAkpLbFn9KbZD+WtcDtbGbud1fzvv23C9lHaXatgMASqeCCgBAUgRUAACSktsWP/VRzXZ/OYc8bK9mOCF3Soe5AEA1qaACAJCUXFVQU6oYNeN5PHuqqfX47ClNGgOopZR+t0F/6t2ZVEEFACApAioAAEnJVYu/nrSxAQCyoYIKAEBSBFQAAJKSfIu/kWc3pnqp04H0N9ZmPLNBvfW3P5Sz7vO0rUGjauTfbTS+elxWXAUVAICkJF9BbWSNVsmq5lWnGJh1DECjUkEFACApAioAAElJssWf1cHjtWqZNlorvx76mkBWys+nr3Vd78uzVVvx+LX1AWgGKqgAACRFQAUAIClJtvizYhZ6Oqr5s8hjW7+W+lqfDkOB+nD+UyiNCioAAEkRUAEASIoWPzVRzsz7vtT7/6WsET8TAPlXy7PkqKACAJAUFdQaytPEk77OO9ooNqzd0Hu/9cOtGY4EACiFCioAAEkRUAEASErTtvhr1dLOW3u8VhNwUp3Ys2jBot77U780NbNxAAD9U0EFACApAioAAElJvsVfjVZ8qe3mStvSeWvrN7M8tfVdEhFqxyWQIW0qqAAAJEVABQAgKcm3+IsN1O4vp0W/vW35PLf1U51hXyvLli3Legi50tf2keftHYD8UUEFACApSVZQiw9eL75MZTU1w4SoyRMnx/qO9b33Bw/y90ie1LPSPdB7lbMt5WkfAaA6iif2VmMSosQCAEBSBFQAAJKSZIu/FM020accPeumq7ur97GVT6ys6xi2dxIbQLU59ynkhwoqAABJEVABAEhKblv81dQIs45LmYVt5nX6Fi1YlPUQoKFo60M+qaACAJAUFVQiYuCrdDWK4grl1C9NzWwcjaZ4mxmomt/I2xdpUDWF/FNBBQAgKQIqAABJ0eKP91uSeWk95vV8ouW0gWtFW782SjlEJK/bLfmhtQ+NQwUVAICkCKgAACSl5BZ/oVCo5Tj6VXy5zlqp1WVAJ0+cXPLyy3ltzzopfm3P/+/rdf95vy8pXAq1r89QDf/zlf/pvZ/VdlyqQYMG9d6vx7ZfqnK2pf5+jnk5hIZ8OfHEE3vvb+/+3dnZub3D6dPixYtrslxoZIMioruUF65YsaLGQwGA7LS1tdUkpAqo6WhpaYmIiI6OjoxH0tiqcTy4Fj8AAEkpucXf1tZWy3H0a+HChb33a9UCrmZ7u1ZjrERXd1dseHlDRES07tH6gZc67W8dbO/nKefwhlrZ+7/2runyq+m2m2/b5rHDjzg8g5FsrZxtqVi9Dx2hORS39aupVi1+oHwlB9QUdtxSfylmKdUxDh40uKKx1ePz1Po9Uth2S9Xdve0RN6ltU5VuS1Atedqngcokfx7U4uMYNqzdkOFIttZo53Ss1efpb7kmzADlcI5TaC7KIAAAJEVABQAgKcm3+OuhrzZ0CpflbGS1XqcOIciOdU81ae1Dc1JBBQAgKQIqAABJSb7Fv2jBokzet7gFrd1PLWW1jVeTtj7bSysfKKaCCgBAUgRUAACSknyLH4DGpbUPjaHa+7IKKgAASUm+gjr1S1N77y9evLj3fj0nK+VhYpSJXFvLU1WmeBvva8JU64dbe++ndLnfCJOjqEye9k8gGyqoAAAkRUAFACApybf4ixW3hVJrdaakp+06eeLkbAdCVaR2ntSVT6zMegjkhFY+zaAah9Xl9XCpWu7jKqgAACRFQAUAICm5avEX66sc3myz1wdqCax8YmW0tLTUaTRpWLZsWdZD2C4DzejPSjNuS1ROa39rxeuj+Gw0edLf5b+pDut3WyqoAAAkJbcV1J6/SPP61+j2qOSvq+KJLX1Vmuv9F1uzVbsr0VNNLa6kFleIp0yZUtP391c8A1EpBWoltwEVAKgPf7Dmx0AFoLz8LLX4AQBIyqCI6M56ENWSt3Oj1uOvmJ6JLR0dHTV/r6w0U5uxv4lTtWj3/+f22QzbEv1rpv2sHprx8LQU1OJ7LKvzoFb7ULlKxuA8qAAANA0BFQCApOR+klR/M5yL1Xq2c3/yciAy+VHN86TaPumLVj6Upp5no0ntvVo/3FrzcaigAgCQFAEVAICk5L7FX9zyHEjxrMlqlsu1SutPG7Lvdv9Ow3bqfay/bdz2Sg/7EbXmEp6Va/YL2qigAgCQlNxXUCvlLzkaSV+dBOdZpIdKaXqKfyaNvK/6Xdu/1NbNQBXbekyMKqaCCgBAUgRUAACS0rQtfvJHm7I8zdJCxL4BNB4VVAAAkiKgAgCQFC1+aALa/Y1DOx/S0DMLv5zzlaY2c38g9Z65X0wFFQCApAioAAAkRYuf5GlpVlfP+tTqzw/7AAMZqM2ct9YyqKACAJCUpqqgmigC77M/pEellHKUMzmn+LXNVk3t+exd3V2xvmN9JmPI0zrPcmJUMRVUAACSIqACAJCUQRHRnfUgstbI7c2WlpaIiOjo6Mh4JOXR6sxOf/tDXrelPLC906Ov/a+cVn6l8tSCLkVf66y4xT+6ZXQMHvReja6vz17tdV7J+q3Hz71YzxhT+T5SQQUAICkCKgAASWmqWfzAwJwntT5SaaORluLtYsPaDXV7377ayY3W9u9PvVvpqUrtO0kFFQCApAioAAAkxSz+Io3Y0uyZed3W1hadnZ0Rke7nTK29wPsWL15sFn8V2MapRD1b/RH5be0P1KrvbxZ/NfW37nrGVsq6rechB6mclL8vKqgAACTFJKkGduKJJ0Z7e/s2j7vEJeXqb1tiYKqm5EFeq6YR2U1y6llnpbx/ntdvVlRQAQBIigoqAJSpUCjU7b26urvq9l55VOr6KX5dNddptX8+9fx513M77tEzH2YgJkn1oxFa38Vt2eJJUn0p5/NqWTafQqHQ57bUCPtJrdhPGtuKFSuyHgLk0mGHHVbS67T4AQBIihY/AJSpra0tk/f932f/t+bvsfKJlTVZ7uSJk0te/va+tuexYl3dXbHh5fdO2dW6R+sHnmaqVuugHH19hmrY+7/2rslyq01A7UcjtOfKObakET4v9dcI243DFKhEqcfRVdvoj4zuvV+r86PW6rKnPYGw2rPuK1nu4EGDa3Ie1IFkdcaBlM932h8tfgAAkqKCCjS1RqgCAzQaARUAcmjZsmW996dMmVLT9ypuTdfjpPPlnAS/Fqr9ebP6HMXbSN5o8QMAkBQVVACgZJVOoiqnKppVxbFHni9NWjz2qV+amt1AtpMKKgAASRFQAQBIihY/AORQX+3bWp0bdSDlTCrq7/ms2/rFKp0klcJnaJQzk6igAgCQlNwF1B133DGefPLJ6O7ujm984xtbPTd37tzo7u7u9/axj30so1FDbU2dOjXuvvvueOGFF6KzszPWrl0bCxYsiAMPPDDroQFA2XLX4v/6178eH/nIR/p8buHChfHcc89t8/gVV1wRw4cPj0ceeaTWw4NMTJo0KTZt2hTz5s2LjRs3Rmtra8ycOTOWL18eRx11VDz++ONZDxGog+JLWuah3Z8HA32eFNr6ebyU6UByFVD32GOP+Pa3vx1XXnllXHbZZds8v2rVqli1atVWj40dOzbGjh0b119/fWzZsqVeQ4W66mt/uP7662PdunVx5plnxplnnpnBqACgMmW1+D/xiU9Ed3d3TJ06dZvnTjnllOju7q7p1Sy+973vxTPPPBM333xzyf/nlFNOicGDB8ctt9xSs3HBQAqFQjz11FPx1FNPRaFQ6H189913j/Xr18eDDz4YgwdX94ibjo6OePPNN2O33Xar6nKBfGj9cGvvbdmyZb23evropI/23ip5PjXF48163MU/30ZUVgX13nvvjeeffz6mTZsWixYt2uq5adOmxXPPPRfLli2LnXbaKXbZZZeSlvnKK6+U9LrDDz88pk+fHm1tbdHd3V3ymKdNmxbPP/983H///SX/H6i2zs7OmD59ejz44INx+eWX9x4//ZOf/CRGjBgRM2bMiK6uru3ed0aMGBFDhgyJ1tbWOPfcc2PEiBGxdOnSqn4WAKi1slv8N998c5x33nmx6667xr/+9a+IiBg1alR85jOficsvvzwi3qta3nTTTSUtb9CgQSW97kc/+lHcdtttsWzZshg3blxJ/+eAAw6Igw8+OK688sqSXg+1tHz58vj+978f3/rWt+I3v/lN7LnnnnHKKafEOeecE3//+98jYvv3nWXLlsX+++8fERGvv/56XHbZZXHDDTdU7TMAQD2UHVDnz58fF110UZx00knxi1/8IiIiTj755BgyZEhv6/33v/99fOpTn6raIGfMmBGTJk2Kk046qaz/N23atIgI7X2Scemll8bnP//5+OUvfxnDhw+Pe++9N374wx/2Pr+9+85pp50Wu+66a+y9995x2mmnxdChQ2OHHXaId955pxrDB3Kqr3OmLlqwqPd+LQ/P+yDFk47y0ubPQvGhGXm+fGk5yg6ozzzzTCxfvjymTZvWG1CnTZsWDz30UPzjH/+IiIgNGzbEhg3lzR4cNmxYDB8+vPff7777bmzcuDF22WWX+O53vxtXXXVVrFu3rqxlfvnLX+5z4hRkZcuWLTFz5sxYsWJFvPXWW3Haaadt9Xwl+06x4i+xX//61/HUU09FRMT5559f8TIBoN4qmsU/f/78mDdvXowZMyZ23nnnOOqoo+Kss87qfb5QKMSIESNKWtZLL70UERGzZ8+OSy+9tPfxNWvWxIQJE2L27Nmx0047xW233dbb2h87dmxEvDfBZNy4cbF+/fptZuh//OMfj/Hjx8cFF1xQyUdsCJ2dndHW1tZ7nzQce+yxERExdOjQ2HfffWPNmjW9z1Wy7/Tn1VdfjT/+8Y8xbdq07Q6otiUA6mlQRJQ+4+j/jRw5MtavXx8XX3xxDB06NC655JIYPXp076SN6dOnl30c3YQJE2Lvvffuffytt96KP//5z3HjjTfGjBkzPnAZkydPjscee2yrx37605/G1772tRg/fnysXbu29A8HNTRp0qR45JFH4pZbbonJkyfHqFGjYtKkSb3Hc1ey73yQhQsXxrHHHhvDhg3bnmEDDS6Fdn+tdXV3xfqO9RERMbpldAwelPa1ihp1dn6pKgqoERGLFi2K8ePHR6FQiGeeeSa+8IUv9D7X2tpa8hVsBpphfMghh2xzYv6Wlpb4+c9/HjfeeGPceeed8ac//an3F3zEe1ebevHFF+Nvf/tbHHPMMWV8KqidHXfcMR5++OHYfffd46CDDooJEyb0htXTTz89Iirfd/bYY494+eWXt3p+3Lhx8fjjj8fKlSvtB8AHElDT0+wBteIT9c+fPz/uuOOOiIiYM2fOVs9t73F0xR599NF49NFHt3qsp9X/5JNPxp133rnN/zn22GNj1KhRJkeRlEsuuSQmT54c//3f/x2bN2+OVatWxXe+8524/PLL4/bbb48lS5ZUvO+sWrUqli5dGitXroxNmzbFvvvuG6effnoMGTKkqQ9zASCfKq6gDhkyJDZs2BCDBw+O1tbWePvtt6s8tP6NGzcu1qxZE7Nnz44f/OAH2zx/6623xhe/+MVobW2NTZs21W1c0J9DDjkkHn744bjmmmvinHPO6X188ODB8dBDD8WYMWPiwAMPjNdee62i5c+dOzc+97nPxT777BO77LJLdHR0xP333x9XXHFFPPHEE9X6GEATyepSqdXWc6aAyRMn56KC2uyV0x4VB9Qddtgh1q9fH7/97W/jjDPOqPKwAIAsCajZEFDfU3GLf+rUqdHS0hLz58+v5ngAgAQUB6U8h9We86t2dXdlPJKt1fuys3lTdkA94ogj4qCDDoo5c+bEX//6V5cQBQCgqsqub5955plxzTXXREdHR3z1q1+txZgAAGhiFR+DCgA0n7ydkiqFY1B72vnNcpnSakjzCGEAAJqWgAoAQFIqnsUPADSfvtrUeWv7V1PxbHwt/OpRQQUAICkCKgAkaP/9948lS5bE66+/Hq+88krMnz8/Ro0alfWwoC60+AEgMWPGjIn7778/Xnvttbjoooti+PDhMXv27Jg0aVIcccQRsWXLlqyHuJVSWtt5Ptl/j75Orq+tXxsCKgAk5qKLLophw4bFoYceGmvXro2IiOXLl8c999wTM2bMiOuuuy7jEUJtOQ8qAFRg3LhxsWbNmn6fHzRoUMXL3rBhQ9x3331x8sknb/X4008/HWvXro1Pf/rTFS87a8UTqio10ESsviYu3Xn7nTFm/JiIiHhhzQvR3V1+/FEtrR8VVACowMsvvxynnnrqVo8NGTIkrr766vj3v/8dERFDhw6ND33oQwMu6913341XX301IiJGjx4de+65Z6xYsWKb1y1fvjyOP/747R88JE5ABYAKvPnmm3HLLbds9diPf/zjGD58eG+F85vf/GZceumlAy5rzZo1MWHChIiI2GuvvSIi4sUXX9zmdS+++GKMHDkydtppp94QDI1IQAWAKvjKV74SZ511Vpx33nlx7733RkTE/Pnzo729fcD/+9Zbb/XeHzp0aEREvP3229u8rrOzs/c1eQ2o1WiTV3KYwBdO+kIUCoWIeH89ki4BFQC208EHHxw/+9nP4tZbb42rr7669/HVq1fH6tWry1pWT1jdeeedt3muJ2AVB1pKJ5jmh4AKANtht912izvuuCOeffbZOOOMM7Z6btiwYTF8+PABl/Huu+/Gxo0bI+L91n5Pq7/YXnvtFa+88kpuq6dQjm43Nzc3Nze38m+DBg3q/t3vfte9cePG7vHjx2/z/Ny5c7tLsXr16q3+30svvdR92223bbO8p59+uvuee+7J/HO7udX6poIKABWaO3duHHvssfHZz362z1NOVXIMakTEHXfcEdOnT4+xY8fGunXrIiLik5/8ZOy3335bHUIAjcp5UAGgAhMnTozHHnss7r///rj++uu3ef4/Z/iXY+zYsfHoo4/Gq6++GvPmzYvhw4fH+eefH+vWrYvDDz9ci5+mkHkZ183Nzc3NLW+3Y4455gPb9tu7/AMOOKD77rvv7t68eXP3P//5z+5f/epX3S0tLZl/bje3etxUUAEASMrgrAcAAADFBFQAAJIioAIAkBQBFQCApAioAAAkRUAFACApAioAAEkRUAEASIqACgBAUgRUAACSIqACAJAUARUAgKQIqAAAJEVABQAgKQIqAABJEVABAEiKgAoAQFIEVAAAkiKgAgCQFAEVAICkCKgAACRFQAUAICkCKgAASRFQAQBIioAKAEBSBFQAAJIioAIAkBQBFQCApAioAAAkRUAFACApAioAAEkRUAEASIqACgBAUgRUAACSIqACAJAUARUAgKQIqAAAJEVABQAgKQIqAABJEVABAEiKgAoAQFIEVAAAkiKgAgCQFAEVAICkCKgAACRFQAUAICkCKgAASRFQAQBIioAKAEBSBFQAAJIioAIAkBQBFQCApAioAAAkRUAFACApAioAAEkRUAEASIqACgBAUgRUAACSIqACAJAUARUAgKQIqAAAJEVABQAgKQIqAABJEVABAEiKgAoAQFL+D9p3kCKPFOwwAAAAAElFTkSuQmCC",
730
+ "text/plain": [
731
+ "<Figure size 660x350 with 4 Axes>"
732
+ ]
733
+ },
734
+ "metadata": {},
735
+ "output_type": "display_data"
736
+ }
737
+ ],
738
+ "source": [
739
+ "roi = nib.load(nsdgeneral_path)\n",
740
+ "print(roi.shape)\n",
741
+ "plot_roi(roi, bg_img=avg_mask)\n",
742
+ "plt.show()"
743
+ ]
744
+ },
745
+ {
746
+ "cell_type": "code",
747
+ "execution_count": 17,
748
+ "id": "d906312b-ea5d-418d-8326-e8b395c9a9c2",
749
+ "metadata": {},
750
+ "outputs": [
751
+ {
752
+ "name": "stdout",
753
+ "output_type": "stream",
754
+ "text": [
755
+ "total voxels (whole brain) = 183408\n",
756
+ "nsdgeneral voxels = 19577\n"
757
+ ]
758
+ }
759
+ ],
760
+ "source": [
761
+ "avg_mask = avg_mask.get_fdata().flatten()\n",
762
+ "print(f\"total voxels (whole brain) = {int(avg_mask.sum())}\")\n",
763
+ "\n",
764
+ "roi = roi.get_fdata()\n",
765
+ "roi = roi.flatten()\n",
766
+ "roi = roi[avg_mask.astype(bool)]\n",
767
+ "roi[np.isnan(roi)] = 0\n",
768
+ "roi = roi.astype(bool)\n",
769
+ "print(f\"nsdgeneral voxels = {roi.sum()}\")"
770
+ ]
771
+ },
772
+ {
773
+ "cell_type": "code",
774
+ "execution_count": 18,
775
+ "id": "ce12274a-3b35-444d-92b0-7cfd0949badc",
776
+ "metadata": {},
777
+ "outputs": [
778
+ {
779
+ "name": "stdout",
780
+ "output_type": "stream",
781
+ "text": [
782
+ "vox before ROI exclusion: (693, 183408)\n",
783
+ "vox after ROI exclusion: (693, 19577)\n"
784
+ ]
785
+ }
786
+ ],
787
+ "source": [
788
+ "# ROI masking?\n",
789
+ "print(f\"vox before ROI exclusion: {vox.shape}\")\n",
790
+ "vox = vox[:,roi]\n",
791
+ "print(f\"vox after ROI exclusion: {vox.shape}\")\n",
792
+ "\n",
793
+ "if np.any(np.isnan(vox)):\n",
794
+ " print(\"NaNs found! Removing voxels...\")\n",
795
+ " x,y = np.where(np.isnan(vox))\n",
796
+ " vox = vox[:,np.setdiff1d(np.arange(vox.shape[-1]), y)]"
797
+ ]
798
+ },
799
+ {
800
+ "cell_type": "code",
801
+ "execution_count": 18,
802
+ "id": "26802a5b-7bc8-4d47-b8e0-1dfa557fc6ad",
803
+ "metadata": {},
804
+ "outputs": [],
805
+ "source": [
806
+ "pairs_homog = np.array([[p[0], p[1]] for p in pairs])"
807
+ ]
808
+ },
809
+ {
810
+ "cell_type": "code",
811
+ "execution_count": 19,
812
+ "id": "50d52f93-af1d-448d-92e4-5af8096aaaf2",
813
+ "metadata": {},
814
+ "outputs": [
815
+ {
816
+ "name": "stderr",
817
+ "output_type": "stream",
818
+ "text": [
819
+ "100%|██████████| 19302/19302 [00:01<00:00, 17349.27it/s]"
820
+ ]
821
+ },
822
+ {
823
+ "name": "stdout",
824
+ "output_type": "stream",
825
+ "text": [
826
+ "rels (19302,)\n"
827
+ ]
828
+ },
829
+ {
830
+ "name": "stderr",
831
+ "output_type": "stream",
832
+ "text": [
833
+ "\n"
834
+ ]
835
+ }
836
+ ],
837
+ "source": [
838
+ "vox_pairs = utils.zscore(vox[pairs_homog])\n",
839
+ "rels = np.full(vox.shape[-1],np.nan)\n",
840
+ "for v in tqdm(range(vox.shape[-1])):\n",
841
+ " rels[v] = np.corrcoef(vox_pairs[:,0,v], vox_pairs[:,1,v])[1,0]\n",
842
+ "print(\"rels\", rels.shape)\n",
843
+ "assert np.sum(np.all(np.isnan(rels))) == 0"
844
+ ]
845
+ },
846
+ {
847
+ "cell_type": "code",
848
+ "execution_count": 20,
849
+ "id": "84be077b-fbef-4b23-895c-4928228229d2",
850
+ "metadata": {},
851
+ "outputs": [
852
+ {
853
+ "name": "stdout",
854
+ "output_type": "stream",
855
+ "text": [
856
+ "(162, 19302, 2)\n"
857
+ ]
858
+ },
859
+ {
860
+ "name": "stderr",
861
+ "output_type": "stream",
862
+ "text": [
863
+ "100%|██████████| 162/162 [00:00<00:00, 3290.51it/s]\n"
864
+ ]
865
+ }
866
+ ],
867
+ "source": [
868
+ "# creating img x vox x repetitions matrix | shape=(150, 18419, 2)\n",
869
+ "vox0 = np.zeros((len(pairs_homog), vox.shape[-1], 2))\n",
870
+ "print(vox0.shape)\n",
871
+ "for ipair, pair in enumerate(tqdm(pairs_homog)):\n",
872
+ " pair = pair[:2] # to keep things consistent, just using the first two repeats\n",
873
+ " i,j = pair\n",
874
+ " vox0[ipair, :, :] = vox[pair].T\n",
875
+ "vox_avg = vox0.mean(-1) # average across the repetitions"
876
+ ]
877
+ },
878
+ {
879
+ "cell_type": "code",
880
+ "execution_count": 21,
881
+ "id": "6206a31e-3d0a-4a30-ada2-4cffa1009856",
882
+ "metadata": {},
883
+ "outputs": [
884
+ {
885
+ "name": "stdout",
886
+ "output_type": "stream",
887
+ "text": [
888
+ "\n",
889
+ "vox before reliability thresholding: (1386, 19302)\n",
890
+ "\n",
891
+ "vox after reliability thresholding: (1386, 1053)\n"
892
+ ]
893
+ }
894
+ ],
895
+ "source": [
896
+ "# Reliability thresholding?\n",
897
+ "print(f\"\\nvox before reliability thresholding: {vox.shape}\")\n",
898
+ "vox = vox[:,rels>.2]\n",
899
+ "print(f\"\\nvox after reliability thresholding: {vox.shape}\")"
900
+ ]
901
+ },
902
+ {
903
+ "cell_type": "code",
904
+ "execution_count": 22,
905
+ "id": "e6f632cc-2b26-4dc8-a4d5-12b641765601",
906
+ "metadata": {},
907
+ "outputs": [
908
+ {
909
+ "name": "stdout",
910
+ "output_type": "stream",
911
+ "text": [
912
+ "torch.Size([1386, 3, 224, 224])\n",
913
+ "(1386, 1053)\n"
914
+ ]
915
+ }
916
+ ],
917
+ "source": [
918
+ "print(images.shape)\n",
919
+ "print(vox.shape)"
920
+ ]
921
+ },
922
+ {
923
+ "cell_type": "code",
924
+ "execution_count": 23,
925
+ "id": "735dfc27-a9bd-4a22-ac3f-a1f44515293e",
926
+ "metadata": {},
927
+ "outputs": [
928
+ {
929
+ "name": "stdout",
930
+ "output_type": "stream",
931
+ "text": [
932
+ "1138 248\n"
933
+ ]
934
+ }
935
+ ],
936
+ "source": [
937
+ "utils.seed_everything(seed)\n",
938
+ "\n",
939
+ "# add_repeats = 48\n",
940
+ "# imageTrain = np.arange(len(images))\n",
941
+ "# train_image_indices = np.array([item for item in imageTrain if item not in pairs.flatten()])\n",
942
+ "# train_image_indices = np.sort(np.append(train_image_indices, np.array(pairs[:add_repeats].flatten())))\n",
943
+ "\n",
944
+ "# # check that there's no repeat indices in training data\n",
945
+ "# assert len(sorted(np.append(np.array([item for item in imageTrain if item not in pairs.flatten()]), np.array(pairs[:add_repeats].flatten())))) == len(set(sorted(np.append(np.array([item for item in imageTrain if item not in pairs.flatten()]), np.array(pairs[:add_repeats].flatten())))))\n",
946
+ "\n",
947
+ "# test_image_indices = pairs[add_repeats:]\n",
948
+ "# print(len(train_image_indices), len(test_image_indices))\n",
949
+ "\n",
950
+ "if train_test_split == 'orig':\n",
951
+ " # train = all images except images that were repeated\n",
952
+ " # test = average of the same-image presentations\n",
953
+ " imageTrain = np.arange(len(images))\n",
954
+ " train_image_indices = np.array([item for item in imageTrain if item not in pairs.flatten()])\n",
955
+ " test_image_indices = pairs\n",
956
+ " print(len(train_image_indices), len(test_image_indices))\n",
957
+ "elif train_test_split == 'MST':\n",
958
+ " # non-MST images are the train split\n",
959
+ " # MST images are the test split\n",
960
+ " train_image_indices = np.where(MST_images==False)[0]\n",
961
+ " test_image_indices = np.where(MST_images==True)[0]\n",
962
+ " print(len(train_image_indices), len(test_image_indices))\n",
963
+ " # for i in test_image_indices:\n",
964
+ " # assert i in pairs # all MST images have pairs"
965
+ ]
966
+ },
967
+ {
968
+ "cell_type": "code",
969
+ "execution_count": 24,
970
+ "id": "a292cfad-83f4-4bf8-994e-da2c871c0a6c",
971
+ "metadata": {},
972
+ "outputs": [],
973
+ "source": [
974
+ "# test_image_indices"
975
+ ]
976
+ },
977
+ {
978
+ "cell_type": "code",
979
+ "execution_count": 25,
980
+ "id": "b81220cd-c11d-4a2a-8755-53b70d90cfe7",
981
+ "metadata": {},
982
+ "outputs": [],
983
+ "source": [
984
+ "# repeats_in_test = []\n",
985
+ "# for p in pairs:\n",
986
+ "# group = []\n",
987
+ "# for item in p:\n",
988
+ "# curr = np.where(test_image_indices == item)\n",
989
+ "# if curr[0].size > 0:\n",
990
+ "# group.append(curr[0][0])\n",
991
+ "# # print(np.array(group))\n",
992
+ "# if len(group) > 0:\n",
993
+ "# repeats_in_test.append(np.array(group))\n",
994
+ "# # if p[0] in test_image_indices:\n",
995
+ "# # repeats_in_test.append(p)\n",
996
+ " \n",
997
+ "# repeats_in_test = np.array(repeats_in_test)\n"
998
+ ]
999
+ },
1000
+ {
1001
+ "cell_type": "code",
1002
+ "execution_count": 26,
1003
+ "id": "5528d877-b662-41f7-8982-3f31051871f6",
1004
+ "metadata": {},
1005
+ "outputs": [
1006
+ {
1007
+ "name": "stdout",
1008
+ "output_type": "stream",
1009
+ "text": [
1010
+ "voxels have been zscored\n",
1011
+ "-0.0318167 1.0120775\n",
1012
+ "vox (1386, 1053)\n"
1013
+ ]
1014
+ }
1015
+ ],
1016
+ "source": [
1017
+ "train_mean = np.mean(vox[train_image_indices],axis=0)\n",
1018
+ "train_std = np.std(vox[train_image_indices],axis=0)\n",
1019
+ "\n",
1020
+ "vox = utils.zscore(vox,train_mean=train_mean,train_std=train_std)\n",
1021
+ "print(\"voxels have been zscored\")\n",
1022
+ "print(vox[:,0].mean(), vox[:,0].std())\n",
1023
+ "print(\"vox\", vox.shape)\n",
1024
+ "\n",
1025
+ "images = torch.Tensor(images)\n",
1026
+ "vox = torch.Tensor(vox)"
1027
+ ]
1028
+ },
1029
+ {
1030
+ "cell_type": "code",
1031
+ "execution_count": 27,
1032
+ "id": "1eb5d464-7ffa-419a-a6b4-d0108f8e196a",
1033
+ "metadata": {},
1034
+ "outputs": [],
1035
+ "source": [
1036
+ "test_data = torch.utils.data.TensorDataset(torch.tensor(test_image_indices))"
1037
+ ]
1038
+ },
1039
+ {
1040
+ "cell_type": "markdown",
1041
+ "id": "d8a3901c-60dd-4ae2-b0f5-8a55aa231908",
1042
+ "metadata": {},
1043
+ "source": [
1044
+ "# Model"
1045
+ ]
1046
+ },
1047
+ {
1048
+ "cell_type": "code",
1049
+ "execution_count": 28,
1050
+ "id": "64672583-9f00-46f5-8d4e-00e4c7068a1d",
1051
+ "metadata": {
1052
+ "tags": []
1053
+ },
1054
+ "outputs": [
1055
+ {
1056
+ "name": "stdout",
1057
+ "output_type": "stream",
1058
+ "text": [
1059
+ "Loaded test dl for subj1!\n",
1060
+ "\n"
1061
+ ]
1062
+ }
1063
+ ],
1064
+ "source": [
1065
+ "subj_list = [subj]\n",
1066
+ "subj = subj_list[0]\n",
1067
+ "test_dl = torch.utils.data.DataLoader(test_data, batch_size=len(test_data), shuffle=False, drop_last=True, pin_memory=True)\n",
1068
+ "print(f\"Loaded test dl for subj{subj}!\\n\")"
1069
+ ]
1070
+ },
1071
+ {
1072
+ "cell_type": "code",
1073
+ "execution_count": 29,
1074
+ "id": "a3cbeea8-e95b-48d9-9bc2-91af260c93d1",
1075
+ "metadata": {},
1076
+ "outputs": [
1077
+ {
1078
+ "name": "stdout",
1079
+ "output_type": "stream",
1080
+ "text": [
1081
+ "0 248 248\n"
1082
+ ]
1083
+ }
1084
+ ],
1085
+ "source": [
1086
+ "test_voxels, test_images = None, None\n",
1087
+ "for test_i, behav in enumerate(test_dl):\n",
1088
+ " behav = behav[0]\n",
1089
+ "\n",
1090
+ " if behav.ndim>1:\n",
1091
+ " test_image = images[behav[:,0].long().cpu()].to(device)\n",
1092
+ " test_vox = vox[behav.long().cpu()].mean(1)\n",
1093
+ " else:\n",
1094
+ " test_image = images[behav.long().cpu()].to(device)\n",
1095
+ " test_vox = vox[behav.long().cpu()]\n",
1096
+ " \n",
1097
+ " if test_voxels is None:\n",
1098
+ " test_voxels = test_vox\n",
1099
+ " test_images = test_image\n",
1100
+ " else:\n",
1101
+ " test_voxels = torch.vstack((test_voxels, test_vox))\n",
1102
+ " test_images = torch.vstack((test_images, test_image))\n",
1103
+ "\n",
1104
+ "print(test_i, len(test_voxels), len(test_images))"
1105
+ ]
1106
+ },
1107
+ {
1108
+ "cell_type": "code",
1109
+ "execution_count": 30,
1110
+ "id": "a3ae7a06-7135-4073-b315-59579e35e2a1",
1111
+ "metadata": {},
1112
+ "outputs": [],
1113
+ "source": [
1114
+ "num_voxels_list = []\n",
1115
+ "num_voxels_list.append(test_voxels.shape[-1])"
1116
+ ]
1117
+ },
1118
+ {
1119
+ "cell_type": "code",
1120
+ "execution_count": 31,
1121
+ "id": "de0400d4-cbd6-4941-a0b2-1a4bc2ae97da",
1122
+ "metadata": {
1123
+ "tags": []
1124
+ },
1125
+ "outputs": [],
1126
+ "source": [
1127
+ "## USING OpenCLIP ViT-bigG ###\n",
1128
+ "sys.path.append('generative_models/')\n",
1129
+ "import sgm\n",
1130
+ "from generative_models.sgm.modules.encoders.modules import FrozenOpenCLIPImageEmbedder\n",
1131
+ "\n",
1132
+ "try:\n",
1133
+ " print(clip_img_embedder)\n",
1134
+ "except:\n",
1135
+ " clip_img_embedder = FrozenOpenCLIPImageEmbedder(\n",
1136
+ " arch=\"ViT-bigG-14\",\n",
1137
+ " version=\"laion2b_s39b_b160k\",\n",
1138
+ " output_tokens=True,\n",
1139
+ " only_tokens=True,\n",
1140
+ " )\n",
1141
+ " clip_img_embedder.to(device)\n",
1142
+ "clip_seq_dim = 256\n",
1143
+ "clip_emb_dim = 1664"
1144
+ ]
1145
+ },
1146
+ {
1147
+ "cell_type": "code",
1148
+ "execution_count": 32,
1149
+ "id": "56b606a4-7302-4ac5-b89d-bbe4fcb00d11",
1150
+ "metadata": {},
1151
+ "outputs": [],
1152
+ "source": [
1153
+ "import utils"
1154
+ ]
1155
+ },
1156
+ {
1157
+ "cell_type": "code",
1158
+ "execution_count": 33,
1159
+ "id": "e452b5b2-47d9-4271-b9fc-ea331fbac1bc",
1160
+ "metadata": {},
1161
+ "outputs": [
1162
+ {
1163
+ "name": "stdout",
1164
+ "output_type": "stream",
1165
+ "text": [
1166
+ "MindEyeModule()\n",
1167
+ "param counts:\n",
1168
+ "1,079,296 total\n",
1169
+ "1,079,296 trainable\n",
1170
+ "param counts:\n",
1171
+ "1,079,296 total\n",
1172
+ "1,079,296 trainable\n",
1173
+ "param counts:\n",
1174
+ "453,360,280 total\n",
1175
+ "453,360,280 trainable\n",
1176
+ "param counts:\n",
1177
+ "454,439,576 total\n",
1178
+ "454,439,576 trainable\n",
1179
+ "param counts:\n",
1180
+ "259,865,216 total\n",
1181
+ "259,865,200 trainable\n",
1182
+ "param counts:\n",
1183
+ "714,304,792 total\n",
1184
+ "714,304,776 trainable\n"
1185
+ ]
1186
+ }
1187
+ ],
1188
+ "source": [
1189
+ "model = utils.prepare_model_and_training(\n",
1190
+ " num_voxels_list=num_voxels_list,\n",
1191
+ " n_blocks=n_blocks,\n",
1192
+ " hidden_dim=hidden_dim,\n",
1193
+ " clip_emb_dim=clip_emb_dim,\n",
1194
+ " clip_seq_dim=clip_seq_dim,\n",
1195
+ " use_prior=use_prior,\n",
1196
+ " clip_scale=clip_scale\n",
1197
+ ")"
1198
+ ]
1199
+ },
1200
+ {
1201
+ "cell_type": "code",
1202
+ "execution_count": null,
1203
+ "id": "f726f617-39f5-49e2-8d0c-d11d27d01c30",
1204
+ "metadata": {},
1205
+ "outputs": [
1206
+ {
1207
+ "name": "stderr",
1208
+ "output_type": "stream",
1209
+ "text": [
1210
+ "WARNING:sgm.modules.attention:SpatialTransformer: Found context dims [1664] of depth 1, which does not match the specified 'depth' of 2. Setting context_dim to [1664, 1664] now.\n",
1211
+ "WARNING:sgm.modules.attention:SpatialTransformer: Found context dims [1664] of depth 1, which does not match the specified 'depth' of 2. Setting context_dim to [1664, 1664] now.\n",
1212
+ "WARNING:sgm.modules.attention:SpatialTransformer: Found context dims [1664] of depth 1, which does not match the specified 'depth' of 10. Setting context_dim to [1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664] now.\n",
1213
+ "WARNING:sgm.modules.attention:SpatialTransformer: Found context dims [1664] of depth 1, which does not match the specified 'depth' of 10. Setting context_dim to [1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664] now.\n",
1214
+ "WARNING:sgm.modules.attention:SpatialTransformer: Found context dims [1664] of depth 1, which does not match the specified 'depth' of 10. Setting context_dim to [1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664] now.\n",
1215
+ "WARNING:sgm.modules.attention:SpatialTransformer: Found context dims [1664] of depth 1, which does not match the specified 'depth' of 10. Setting context_dim to [1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664] now.\n",
1216
+ "WARNING:sgm.modules.attention:SpatialTransformer: Found context dims [1664] of depth 1, which does not match the specified 'depth' of 10. Setting context_dim to [1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664] now.\n",
1217
+ "WARNING:sgm.modules.attention:SpatialTransformer: Found context dims [1664] of depth 1, which does not match the specified 'depth' of 10. Setting context_dim to [1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664] now.\n",
1218
+ "WARNING:sgm.modules.attention:SpatialTransformer: Found context dims [1664] of depth 1, which does not match the specified 'depth' of 2. Setting context_dim to [1664, 1664] now.\n",
1219
+ "WARNING:sgm.modules.attention:SpatialTransformer: Found context dims [1664] of depth 1, which does not match the specified 'depth' of 2. Setting context_dim to [1664, 1664] now.\n",
1220
+ "WARNING:sgm.modules.attention:SpatialTransformer: Found context dims [1664] of depth 1, which does not match the specified 'depth' of 2. Setting context_dim to [1664, 1664] now.\n"
1221
+ ]
1222
+ }
1223
+ ],
1224
+ "source": [
1225
+ "# prep unCLIP\n",
1226
+ "config = OmegaConf.load(\"/scratch/gpfs/ri4541/MindEyeV2/src/generative_models/configs/unclip6.yaml\")\n",
1227
+ "config = OmegaConf.to_container(config, resolve=True)\n",
1228
+ "unclip_params = config[\"model\"][\"params\"]\n",
1229
+ "network_config = unclip_params[\"network_config\"]\n",
1230
+ "denoiser_config = unclip_params[\"denoiser_config\"]\n",
1231
+ "first_stage_config = unclip_params[\"first_stage_config\"]\n",
1232
+ "conditioner_config = unclip_params[\"conditioner_config\"]\n",
1233
+ "sampler_config = unclip_params[\"sampler_config\"]\n",
1234
+ "scale_factor = unclip_params[\"scale_factor\"]\n",
1235
+ "disable_first_stage_autocast = unclip_params[\"disable_first_stage_autocast\"]\n",
1236
+ "offset_noise_level = unclip_params[\"loss_fn_config\"][\"params\"][\"offset_noise_level\"]\n",
1237
+ "\n",
1238
+ "first_stage_config['target'] = 'sgm.models.autoencoder.AutoencoderKL'\n",
1239
+ "sampler_config['params']['num_steps'] = 38\n",
1240
+ "\n",
1241
+ "diffusion_engine = DiffusionEngine(network_config=network_config,\n",
1242
+ " denoiser_config=denoiser_config,\n",
1243
+ " first_stage_config=first_stage_config,\n",
1244
+ " conditioner_config=conditioner_config,\n",
1245
+ " sampler_config=sampler_config,\n",
1246
+ " scale_factor=scale_factor,\n",
1247
+ " disable_first_stage_autocast=disable_first_stage_autocast)\n",
1248
+ "# set to inference\n",
1249
+ "diffusion_engine.eval().requires_grad_(False)\n",
1250
+ "diffusion_engine.to(device)\n",
1251
+ "\n",
1252
+ "ckpt_path = '/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/unclip6_epoch0_step110000.ckpt' \n",
1253
+ "ckpt = torch.load(ckpt_path, map_location='cpu')\n",
1254
+ "diffusion_engine.load_state_dict(ckpt['state_dict'])\n",
1255
+ "\n",
1256
+ "batch={\"jpg\": torch.randn(1,3,1,1).to(device), # jpg doesnt get used, it's just a placeholder\n",
1257
+ " \"original_size_as_tuple\": torch.ones(1, 2).to(device) * 768,\n",
1258
+ " \"crop_coords_top_left\": torch.zeros(1, 2).to(device)}\n",
1259
+ "out = diffusion_engine.conditioner(batch)\n",
1260
+ "vector_suffix = out[\"vector\"].to(device)\n",
1261
+ "print(\"vector_suffix\", vector_suffix.shape)"
1262
+ ]
1263
+ },
1264
+ {
1265
+ "cell_type": "code",
1266
+ "execution_count": null,
1267
+ "id": "68abd440-7e6b-4023-9dc8-05b1b5c0baa9",
1268
+ "metadata": {},
1269
+ "outputs": [],
1270
+ "source": [
1271
+ "# setup text caption networks\n",
1272
+ "from transformers import AutoProcessor, AutoModelForCausalLM\n",
1273
+ "from modeling_git import GitForCausalLMClipEmb\n",
1274
+ "# processor = AutoProcessor.from_pretrained(\"microsoft/git-large-coco\")\n",
1275
+ "# clip_text_model = GitForCausalLMClipEmb.from_pretrained(\"microsoft/git-large-coco\")\n",
1276
+ "processor = AutoProcessor.from_pretrained(\"/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2\")\n",
1277
+ "clip_text_model = GitForCausalLMClipEmb.from_pretrained(\"/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2\")\n",
1278
+ "\n",
1279
+ "clip_text_model.to(device) # if you get OOM running this script, you can switch this to cpu and lower minibatch_size to 4\n",
1280
+ "clip_text_model.eval().requires_grad_(False)\n",
1281
+ "clip_text_seq_dim = 257\n",
1282
+ "clip_text_emb_dim = 1024\n",
1283
+ "\n",
1284
+ "class CLIPConverter(torch.nn.Module):\n",
1285
+ " def __init__(self):\n",
1286
+ " super(CLIPConverter, self).__init__()\n",
1287
+ " self.linear1 = nn.Linear(clip_seq_dim, clip_text_seq_dim)\n",
1288
+ " self.linear2 = nn.Linear(clip_emb_dim, clip_text_emb_dim)\n",
1289
+ " def forward(self, x):\n",
1290
+ " x = x.permute(0,2,1)\n",
1291
+ " x = self.linear1(x)\n",
1292
+ " x = self.linear2(x.permute(0,2,1))\n",
1293
+ " return x\n",
1294
+ " \n",
1295
+ "clip_convert = CLIPConverter()\n",
1296
+ "state_dict = torch.load(\"/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/bigG_to_L_epoch8.pth\", map_location='cpu')['model_state_dict']\n",
1297
+ "clip_convert.load_state_dict(state_dict, strict=True)\n",
1298
+ "clip_convert.to(device) # if you get OOM running this script, you can switch this to cpu and lower minibatch_size to 4\n",
1299
+ "del state_dict"
1300
+ ]
1301
+ },
1302
+ {
1303
+ "cell_type": "code",
1304
+ "execution_count": null,
1305
+ "id": "41b4a640",
1306
+ "metadata": {},
1307
+ "outputs": [],
1308
+ "source": [
1309
+ "# Load pretrained model ckpt\n",
1310
+ "tag='last'\n",
1311
+ "outdir = os.path.abspath(f'/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/train_logs/{model_name}')\n",
1312
+ "print(f\"\\n---loading {outdir}/{tag}.pth ckpt---\\n\")\n",
1313
+ "checkpoint = torch.load(outdir+f'/{tag}.pth', map_location='cpu')\n",
1314
+ "state_dict = checkpoint['model_state_dict']\n",
1315
+ "model.load_state_dict(state_dict, strict=True)\n",
1316
+ "del checkpoint\n",
1317
+ "print(\"ckpt loaded!\")"
1318
+ ]
1319
+ },
1320
+ {
1321
+ "cell_type": "code",
1322
+ "execution_count": 31,
1323
+ "id": "c6a706a3-d151-4643-bb34-7d08aa7361c8",
1324
+ "metadata": {
1325
+ "tags": []
1326
+ },
1327
+ "outputs": [
1328
+ {
1329
+ "name": "stderr",
1330
+ "output_type": "stream",
1331
+ "text": [
1332
+ " 0%| | 0/4 [00:00<?, ?it/s]"
1333
+ ]
1334
+ },
1335
+ {
1336
+ "data": {
1337
+ "application/vnd.jupyter.widget-view+json": {
1338
+ "model_id": "1899d669587f464ba356a29615d5b8be",
1339
+ "version_major": 2,
1340
+ "version_minor": 0
1341
+ },
1342
+ "text/plain": [
1343
+ "sampling loop time step: 0%| | 0/19 [00:00<?, ?it/s]"
1344
+ ]
1345
+ },
1346
+ "metadata": {},
1347
+ "output_type": "display_data"
1348
+ },
1349
+ {
1350
+ "name": "stdout",
1351
+ "output_type": "stream",
1352
+ "text": [
1353
+ "['a kitchen with a counter and a microwave.', 'a cat is sitting on a table.', 'a large room with a lot of furniture.', 'a giraffe standing next to a tree.', 'a room with a view.', 'a room with a lot of furniture.', 'a large field with a lot of grass.', 'a kitchen with a lot of furniture.', 'a large room with a lot of furniture.', 'a garden with a plant and a fence.', 'a kitchen with a lot of furniture.', 'a kitchen with a counter and a stove', 'a room with a view', 'a plate with a cake on it', 'a snowboarder is skiing down a hill.', 'a clock on a building.', 'a young boy is standing in a pool of water.', 'a bed or beds in a room at the inn', 'a large building with a clock on it.', 'a room with a lot of furniture.', 'a table with a bunch of items on it', 'a tree with a lot of leaves.', 'a night view of a city.', 'a white wall', 'a large building with a clock on it.', 'a large group of people.', 'a small room with a clock and a vase.', 'a large truck is parked next to a building.', 'a room with a view.', 'a table with a lot of items on it', 'a picture of a room with a lot of things in it.', 'a room with a lot of furniture.']\n"
1354
+ ]
1355
+ },
1356
+ {
1357
+ "name": "stderr",
1358
+ "output_type": "stream",
1359
+ "text": [
1360
+ "/home/ri4541/.conda/envs/rt_mindEye2/lib/python3.11/site-packages/torch/utils/checkpoint.py:429: UserWarning: torch.utils.checkpoint: please pass in use_reentrant=True or use_reentrant=False explicitly. The default value of use_reentrant will be updated to be False in the future. To maintain current behavior, pass use_reentrant=True. It is recommended that you use use_reentrant=False. Refer to docs for more details on the differences between the two variants.\n",
1361
+ " warnings.warn(\n",
1362
+ "/home/ri4541/.conda/envs/rt_mindEye2/lib/python3.11/site-packages/torch/utils/checkpoint.py:61: UserWarning: None of the inputs have requires_grad=True. Gradients will be None\n",
1363
+ " warnings.warn(\n",
1364
+ " 25%|██▌ | 1/4 [02:21<07:04, 141.65s/it]"
1365
+ ]
1366
+ },
1367
+ {
1368
+ "name": "stdout",
1369
+ "output_type": "stream",
1370
+ "text": [
1371
+ "sub-001_ses-01_bs24_MST_rishab_MSTsplit\n"
1372
+ ]
1373
+ },
1374
+ {
1375
+ "data": {
1376
+ "application/vnd.jupyter.widget-view+json": {
1377
+ "model_id": "a0628499d46941cf9f65527d8eb5d525",
1378
+ "version_major": 2,
1379
+ "version_minor": 0
1380
+ },
1381
+ "text/plain": [
1382
+ "sampling loop time step: 0%| | 0/19 [00:00<?, ?it/s]"
1383
+ ]
1384
+ },
1385
+ "metadata": {},
1386
+ "output_type": "display_data"
1387
+ },
1388
+ {
1389
+ "name": "stdout",
1390
+ "output_type": "stream",
1391
+ "text": [
1392
+ "['a train is driving through a field.', 'a cat sitting on a table.', 'a zebra standing in a field.', 'a plate of food', 'a car driving down a street.', 'a large field with a bunch of people on it', 'a man on a boat in a lake.', 'a view of a table.', 'a large area with a lot of grass.', 'a display of a cell phone.', 'a bunch of different types of flowers', 'a large open area with a lot of space for a small table.', 'a room with a lot of furniture.', 'a large planter with a bunch of flowers on it.', 'a bathroom with a shower and a sink.', 'a large body of water.', 'a street light and a street sign', 'a plate with a piece of food on it', 'a room with a view', 'a bunch of flowers on a table.', 'a small table with a small display.', 'a glass door with a window.', 'a stuffed toy bear is sitting on a table.', 'a small tree in a field.', 'a plate of food with a fork.', 'a snowboarder is on a hill.', 'a room with a lot of furniture.', 'a bathroom with a toilet and a sink.', 'a table with a bunch of food on it', 'a small white and black wall', 'a person sitting down.', 'a clock tower with a clock on it.']\n"
1393
+ ]
1394
+ },
1395
+ {
1396
+ "name": "stderr",
1397
+ "output_type": "stream",
1398
+ "text": [
1399
+ " 50%|█████ | 2/4 [04:42<04:42, 141.19s/it]"
1400
+ ]
1401
+ },
1402
+ {
1403
+ "name": "stdout",
1404
+ "output_type": "stream",
1405
+ "text": [
1406
+ "sub-001_ses-01_bs24_MST_rishab_MSTsplit\n"
1407
+ ]
1408
+ },
1409
+ {
1410
+ "data": {
1411
+ "application/vnd.jupyter.widget-view+json": {
1412
+ "model_id": "518fa75876214f5f857ebdb9c0c9da3c",
1413
+ "version_major": 2,
1414
+ "version_minor": 0
1415
+ },
1416
+ "text/plain": [
1417
+ "sampling loop time step: 0%| | 0/19 [00:00<?, ?it/s]"
1418
+ ]
1419
+ },
1420
+ "metadata": {},
1421
+ "output_type": "display_data"
1422
+ },
1423
+ {
1424
+ "name": "stdout",
1425
+ "output_type": "stream",
1426
+ "text": [
1427
+ "['a man on a surfboard in the water.', 'a woman standing on a sidewalk next to a water.', 'a table with a plate and a plate on it', 'a large truck is parked on the side of the road.', 'a clock tower with a tower in the background.', 'a room with a lot of furniture.', 'a white wall with a window', 'a glass door with a window.', 'a picture of a tree.', 'a view of a large room.', 'a grassy field with a few animals in it.', 'a white wall', 'a plate of food with a fork.', 'a plate with a piece of food on it', 'a clock tower with a tower in the background.', 'a clock tower with a tower in the background.', 'a room with a view.', 'a laptop computer sitting on top of a table.', 'a zebra standing on a dirt field.', 'a surfer riding a wave on a sunny day.', 'a toilet with a lid', 'a room with a table and chairs.', 'a kitchen with a sink and a counter', 'a bathroom with a sink and a mirror.', \"a close up of a person's head\", 'a group of animals standing on a field.', 'a white table with a glass top', 'a white room with a toilet and a sink', 'a plate of food with a fork on it.', 'a picture of a large group of trees.', 'a table with a plate of food on it', 'a kitchen with a counter and a sink.']\n"
1428
+ ]
1429
+ },
1430
+ {
1431
+ "name": "stderr",
1432
+ "output_type": "stream",
1433
+ "text": [
1434
+ " 75%|███████▌ | 3/4 [07:04<02:21, 141.66s/it]"
1435
+ ]
1436
+ },
1437
+ {
1438
+ "name": "stdout",
1439
+ "output_type": "stream",
1440
+ "text": [
1441
+ "sub-001_ses-01_bs24_MST_rishab_MSTsplit\n"
1442
+ ]
1443
+ },
1444
+ {
1445
+ "data": {
1446
+ "application/vnd.jupyter.widget-view+json": {
1447
+ "model_id": "40c339474faf47bc9795754130e0060b",
1448
+ "version_major": 2,
1449
+ "version_minor": 0
1450
+ },
1451
+ "text/plain": [
1452
+ "sampling loop time step: 0%| | 0/19 [00:00<?, ?it/s]"
1453
+ ]
1454
+ },
1455
+ "metadata": {},
1456
+ "output_type": "display_data"
1457
+ },
1458
+ {
1459
+ "name": "stdout",
1460
+ "output_type": "stream",
1461
+ "text": [
1462
+ "['a room with a bed and a television.', 'a large building with a clock on it.', 'a man on a surfboard in the water.', 'a train is driving down the tracks.']\n"
1463
+ ]
1464
+ },
1465
+ {
1466
+ "name": "stderr",
1467
+ "output_type": "stream",
1468
+ "text": [
1469
+ "100%|██████████| 4/4 [07:22<00:00, 110.66s/it]"
1470
+ ]
1471
+ },
1472
+ {
1473
+ "name": "stdout",
1474
+ "output_type": "stream",
1475
+ "text": [
1476
+ "sub-001_ses-01_bs24_MST_rishab_MSTsplit\n"
1477
+ ]
1478
+ },
1479
+ {
1480
+ "name": "stderr",
1481
+ "output_type": "stream",
1482
+ "text": [
1483
+ "\n",
1484
+ "/home/ri4541/.conda/envs/rt_mindEye2/lib/python3.11/site-packages/torchvision/transforms/functional.py:1603: UserWarning: The default value of the antialias parameter of all the resizing transforms (Resize(), RandomResizedCrop(), etc.) will change from None to True in v0.17, in order to be consistent across the PIL and Tensor backends. To suppress this warning, directly pass antialias=True (recommended, future default), antialias=None (current default, which means False for Tensors and True for PIL), or antialias=False (only works on Tensors - PIL will still use antialiasing). This also applies if you are using the inference transforms from the models weights: update the call to weights.transforms(antialias=True).\n",
1485
+ " warnings.warn(\n"
1486
+ ]
1487
+ },
1488
+ {
1489
+ "name": "stdout",
1490
+ "output_type": "stream",
1491
+ "text": [
1492
+ "saved sub-001_ses-01_bs24_MST_rishab_MSTsplit outputs!\n"
1493
+ ]
1494
+ }
1495
+ ],
1496
+ "source": [
1497
+ "# get all reconstructions\n",
1498
+ "model.to(device)\n",
1499
+ "model.eval().requires_grad_(False)\n",
1500
+ "\n",
1501
+ "all_blurryrecons = None\n",
1502
+ "all_images = None\n",
1503
+ "all_recons = None\n",
1504
+ "all_predcaptions = []\n",
1505
+ "all_clipvoxels = None\n",
1506
+ "all_prior_out = None\n",
1507
+ "all_backbones = None\n",
1508
+ "\n",
1509
+ "minibatch_size = 32\n",
1510
+ "num_samples_per_image = 1\n",
1511
+ "plotting = True\n",
1512
+ "\n",
1513
+ "with torch.no_grad():\n",
1514
+ " for batch in tqdm(range(0,len(test_images),minibatch_size)):\n",
1515
+ " start_time = time.time() \n",
1516
+ "\n",
1517
+ " image = test_images[batch:batch+minibatch_size]\n",
1518
+ " voxel = test_voxels[batch:batch+minibatch_size].unsqueeze(1).to(device)\n",
1519
+ "\n",
1520
+ " # Save ground truth images\n",
1521
+ " if all_images is None:\n",
1522
+ " all_images = image\n",
1523
+ " else:\n",
1524
+ " all_images = torch.vstack((all_images, image))\n",
1525
+ " \n",
1526
+ " voxel_ridge = model.ridge(voxel,0)\n",
1527
+ " backbone, clip_voxels, blurry_image_enc_ = model.backbone(voxel_ridge)\n",
1528
+ " \n",
1529
+ " # Save retrieval submodule outputs\n",
1530
+ " if clip_scale>0:\n",
1531
+ " if all_clipvoxels is None:\n",
1532
+ " all_clipvoxels = clip_voxels.cpu()\n",
1533
+ " else:\n",
1534
+ " all_clipvoxels = torch.vstack((all_clipvoxels, clip_voxels.cpu()))\n",
1535
+ " \n",
1536
+ " # Feed voxels through OpenCLIP-bigG diffusion prior\n",
1537
+ " prior_out = model.diffusion_prior.p_sample_loop(backbone.shape, \n",
1538
+ " text_cond = dict(text_embed = backbone), \n",
1539
+ " cond_scale = 1., timesteps = 20).cpu()\n",
1540
+ " \n",
1541
+ " if all_prior_out is None:\n",
1542
+ " all_prior_out = prior_out\n",
1543
+ " else:\n",
1544
+ " all_prior_out = torch.vstack((all_prior_out, prior_out))\n",
1545
+ "\n",
1546
+ " pred_caption_emb = clip_convert(prior_out.to(device).float())\n",
1547
+ " generated_ids = clip_text_model.generate(pixel_values=pred_caption_emb, max_length=20)\n",
1548
+ " generated_caption = processor.batch_decode(generated_ids, skip_special_tokens=True)\n",
1549
+ " all_predcaptions = np.hstack((all_predcaptions, generated_caption))\n",
1550
+ " print(generated_caption)\n",
1551
+ " \n",
1552
+ " # Feed diffusion prior outputs through unCLIP\n",
1553
+ " if plotting:\n",
1554
+ " jj=-1\n",
1555
+ " fig, axes = plt.subplots(1, 12, figsize=(10, 4))\n",
1556
+ "\n",
1557
+ " for i in range(len(voxel)):\n",
1558
+ " samples = utils.unclip_recon(prior_out[[i]],\n",
1559
+ " diffusion_engine,\n",
1560
+ " vector_suffix,\n",
1561
+ " num_samples=num_samples_per_image)\n",
1562
+ " if all_recons is None:\n",
1563
+ " all_recons = samples.cpu()\n",
1564
+ " else:\n",
1565
+ " all_recons = torch.vstack((all_recons, samples.cpu()))\n",
1566
+ " \n",
1567
+ " if plotting: \n",
1568
+ " jj+=1\n",
1569
+ " axes[jj].imshow(utils.torch_to_Image(image[i]))\n",
1570
+ " axes[jj].axis('off')\n",
1571
+ " jj+=1\n",
1572
+ " axes[jj].imshow(utils.torch_to_Image(samples.cpu()[0]))\n",
1573
+ " axes[jj].axis('off')\n",
1574
+ " \n",
1575
+ " plt.show()\n",
1576
+ "\n",
1577
+ " print(model_name)\n",
1578
+ " # err # dont actually want to run the whole thing with plotting=True\n",
1579
+ "\n",
1580
+ "# resize outputs before saving\n",
1581
+ "imsize = 256\n",
1582
+ "all_images = transforms.Resize((imsize,imsize))(all_images).float()\n",
1583
+ "all_recons = transforms.Resize((imsize,imsize))(all_recons).float()\n",
1584
+ "if blurry_recon: \n",
1585
+ " all_blurryrecons = transforms.Resize((imsize,imsize))(all_blurryrecons).float()\n",
1586
+ " \n",
1587
+ "## Saving ##\n",
1588
+ "if not os.path.exists(eval_dir):\n",
1589
+ " os.mkdir(eval_dir)\n",
1590
+ "\n",
1591
+ "if \"MST\" in model_name:\n",
1592
+ " np.save(f\"{eval_dir}/{model_name}_MST_ID.npy\", MST_ID)\n",
1593
+ "torch.save(all_images.cpu(),f\"{eval_dir}/{model_name}_all_images.pt\")\n",
1594
+ "\n",
1595
+ "# repeats_in_test = []\n",
1596
+ "# for p in pairs:\n",
1597
+ "# if p[0] in test_image_indices:\n",
1598
+ "# repeats_in_test.append(p)\n",
1599
+ " \n",
1600
+ "# repeats_in_test = np.array(repeats_in_test)\n",
1601
+ "\n",
1602
+ "# torch.save(test_image_indices, f\"{eval_dir}/{model_name}_test_image_indices.pt\")\n",
1603
+ "# torch.save(repeats_in_test, f\"{eval_dir}/{model_name}_repeats_in_test.pt\")\n",
1604
+ "torch.save(all_recons,f\"{eval_dir}/{model_name}_all_recons.pt\")\n",
1605
+ "if clip_scale>0:\n",
1606
+ " torch.save(all_clipvoxels,f\"{eval_dir}/{model_name}_all_clipvoxels.pt\")\n",
1607
+ "torch.save(all_prior_out,f\"{eval_dir}/{model_name}_all_prior_out.pt\")\n",
1608
+ "torch.save(all_predcaptions,f\"{eval_dir}/{model_name}_all_predcaptions.pt\")\n",
1609
+ "print(f\"saved {model_name} outputs!\")"
1610
+ ]
1611
+ },
1612
+ {
1613
+ "cell_type": "code",
1614
+ "execution_count": null,
1615
+ "id": "73b243d7-6552-4fc8-bef7-d5ad03b17cb2",
1616
+ "metadata": {},
1617
+ "outputs": [],
1618
+ "source": [
1619
+ "if \"MST\" in model_name:\n",
1620
+ " np.save(f\"{eval_dir}/{model_name}_MST_ID.npy\", MST_ID)"
1621
+ ]
1622
+ },
1623
+ {
1624
+ "cell_type": "code",
1625
+ "execution_count": null,
1626
+ "id": "6c6856c3-9205-48f5-bfb2-7e0099f429a4",
1627
+ "metadata": {},
1628
+ "outputs": [],
1629
+ "source": [
1630
+ "all_images.shape"
1631
+ ]
1632
+ },
1633
+ {
1634
+ "cell_type": "code",
1635
+ "execution_count": null,
1636
+ "id": "f9a7162f-ca3b-4b14-9676-3037094994c8",
1637
+ "metadata": {},
1638
+ "outputs": [],
1639
+ "source": [
1640
+ "x = torch.permute(all_images, (0,2,3,1))\n",
1641
+ "y = torch.permute(all_recons, (0,2,3,1))"
1642
+ ]
1643
+ },
1644
+ {
1645
+ "cell_type": "code",
1646
+ "execution_count": null,
1647
+ "id": "7fa41429-ab6a-4aa6-96b9-5c963016b33a",
1648
+ "metadata": {},
1649
+ "outputs": [],
1650
+ "source": [
1651
+ "fig, ax = plt.subplots(5, 2, figsize=(8, 8))\n",
1652
+ "for row, _ in enumerate(ax):\n",
1653
+ " ax[row][0].imshow(x.cpu()[row])\n",
1654
+ " ax[row][1].imshow(y.cpu()[row])\n",
1655
+ "plt.tight_layout()\n",
1656
+ "plt.show()"
1657
+ ]
1658
+ },
1659
+ {
1660
+ "cell_type": "code",
1661
+ "execution_count": null,
1662
+ "id": "d553a7b3-9bdf-44b3-a0bf-398cf5cf402b",
1663
+ "metadata": {},
1664
+ "outputs": [],
1665
+ "source": []
1666
+ }
1667
+ ],
1668
+ "metadata": {
1669
+ "kernelspec": {
1670
+ "display_name": "rt_mindEye2 [~/.conda/envs/rt_mindEye2/]",
1671
+ "language": "python",
1672
+ "name": "conda_rt_mindeye2"
1673
+ },
1674
+ "language_info": {
1675
+ "codemirror_mode": {
1676
+ "name": "ipython",
1677
+ "version": 3
1678
+ },
1679
+ "file_extension": ".py",
1680
+ "mimetype": "text/x-python",
1681
+ "name": "python",
1682
+ "nbconvert_exporter": "python",
1683
+ "pygments_lexer": "ipython3",
1684
+ "version": "3.11.7"
1685
+ }
1686
+ },
1687
+ "nbformat": 4,
1688
+ "nbformat_minor": 5
1689
+ }
recon_inference-multisession_union_mask.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
recon_inference-multisession_union_mask_sdxl_turbo.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
recon_inference.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
rt_glmsingle.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
run_all_batch.slurm ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=sub-005_ses-01-02_task-C_finetune_rtpreproc_unionmask
3
+ #SBATCH --ntasks-per-node=1
4
+ #SBATCH --nodes=1
5
+ #SBATCH --gres=gpu:1
6
+ #SBATCH --constraint=gpu80
7
+ #SBATCH --gpus-per-task=1 # Set to equal gres=gpu:#!
8
+ #SBATCH --cpus-per-task=1 # 40 / 80 / 176 distributed across node
9
+ #SBATCH --time=02:35:00 # total run time limit (HH:MM:SS)
10
+ #SBATCH -e slurms/%A_%a.err # first create a "slurms" folder in current directory to store logs
11
+ #SBATCH -o slurms/%A_%a.out
12
+ #SBATCH --no-requeue
13
+ #SBATCH --array=0 # 0 or 0-9
14
+ #SBATCH --mail-type=END
15
+ #SBATCH [email protected]
16
+
17
+ echo "My SLURM_ARRAY_JOB_ID is ${SLURM_ARRAY_JOB_ID}"
18
+ echo "My SLURM_ARRAY_TASK_ID is ${SLURM_ARRAY_TASK_ID}"
19
+ echo "Executing on the machine: $(hostname)"
20
+
21
+ module purge
22
+ module load anaconda3/2023.3
23
+ module load fsl/6.0.6.2
24
+ conda activate rt_mindEye2
25
+ # source /scratch/gpfs/ri4541/MindEyeV2/src/fmri/bin/activate
26
+
27
+ # verify these variables before submitting
28
+ # ---
29
+ sub="sub-005"
30
+ session="all"
31
+ session_label='ses-01-02'
32
+ split=MST # MST train/test split, alternative would be train on non-repeats and test on images that repeat (split=orig)
33
+ task=C
34
+ func_task_name=C
35
+ resample_voxel_size=False
36
+ resample_post_glmsingle=False
37
+ load_from_resampled_file=True
38
+ remove_close_to_MST=False
39
+ remove_random_n=False
40
+ resampled_vox_size=2.0
41
+ resample_method="trilinear"
42
+ # Convert decimal point to underscore
43
+ vox_dim_str=${resampled_vox_size//./_}
44
+
45
+ # model_name="${sub}_multi_task-${task}_bs24_MST_rishab_${split}split"
46
+ model_name="${sub}_${session}_task-${task}_bs24_MST_rishab_${split}split_finetune_rtpreproc_unionmask"
47
+ # model_name="${sub}_${session}_task-${task}_bs24_MST_rishab_${split}split_resampled_${vox_dim_str}mm_${resample_method}_seed${SLURM_ARRAY_TASK_ID}"
48
+ main_script="main-finetune-rt-preproc"
49
+ # glmsingle_path="/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/glmsingle-multi"
50
+ glmsingle_path="/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/glmsingle_${sub}_${session_label}_task-${task}"
51
+ # ---
52
+
53
+ export NUM_GPUS=1 # Set to equal gres=gpu:#!
54
+ export BATCH_SIZE=24
55
+ export GLOBAL_BATCH_SIZE=$((BATCH_SIZE * NUM_GPUS))
56
+
57
+ Make sure another job doesnt use same port, here using random number
58
+ export MASTER_PORT=$((RANDOM % (19000 - 11000 + 1) + 11000))
59
+ export HOSTNAMES=$(scontrol show hostnames "$SLURM_JOB_NODELIST")
60
+ export MASTER_ADDR=$(scontrol show hostnames "$SLURM_JOB_NODELIST" | head -n 1)
61
+ export COUNT_NODE=$(scontrol show hostnames "$SLURM_JOB_NODELIST" | wc -l)
62
+ echo MASTER_ADDR=${MASTER_ADDR}
63
+ echo MASTER_PORT=${MASTER_PORT}
64
+ echo WORLD_SIZE=${COUNT_NODE}
65
+
66
+ echo model_name=${model_name}
67
+
68
+ eval_dir="/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/evals/${model_name}"
69
+ export SUB=${sub}
70
+ export SESSION=${session}
71
+ export SESSION_LABEL=${session_label}
72
+ export SPLIT=${split}
73
+ export TASK=${task}
74
+ export FUNC_TASK_NAME=${func_task_name}
75
+ export RESAMPLE_VOXEL_SIZE=${resample_voxel_size}
76
+ export RESAMPLE_POST_GLMSINGLE=${resample_post_glmsingle}
77
+ export LOAD_FROM_RESAMPLED_FILE=${load_from_resampled_file}
78
+ export REMOVE_CLOSE_TO_MST=${remove_close_to_MST}
79
+ export REMOVE_RANDOM_N=${remove_random_n}
80
+ export RESAMPLED_VOX_SIZE=${resampled_vox_size}
81
+ export RESAMPLE_METHOD=${resample_method}
82
+
83
+ export glmsingle_path=${glmsingle_path}
84
+ export eval_dir=${eval_dir}
85
+ export WANDB_MODE="offline"
86
+
87
+ # singlesubject finetuning
88
+ jupyter nbconvert "${main_script}.ipynb" --to python && \
89
+ accelerate launch --num_processes=$(($NUM_GPUS * $COUNT_NODE)) --num_machines=$COUNT_NODE --main_process_ip=$MASTER_ADDR --main_process_port=$MASTER_PORT "${main_script}.py" --data_path=/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2 --model_name=${model_name} --no-multi_subject --subj=1 --batch_size=${BATCH_SIZE} --max_lr=3e-4 --mixup_pct=.33 --num_epochs=150 --use_prior --prior_scale=30 --clip_scale=1 --no-blurry_recon --blur_scale=.5 --no-use_image_aug --n_blocks=4 --hidden_dim=1024 --num_sessions=40 --ckpt_interval=999 --ckpt_saving --wandb_log --multisubject_ckpt=/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/train_logs/multisubject_subj01_1024hid_nolow_300ep --seed="${SLURM_ARRAY_TASK_ID}" && \
90
+
91
+ # jupyter nbconvert recon_inference-multisession.ipynb --to python && \
92
+ # python recon_inference-multisession.py --model_name=${model_name} --subj=1 --no-blurry_recon --use_prior --hidden_dim=1024 --n_blocks=4 --glmsingle_path="${glmsingle_path}" && \
93
+
94
+ # #jupyter nbconvert recon_inference_orig.ipynb --to python && \
95
+ # #python recon_inference_orig.py --model_name=${model_name} --subj=1 --no-blurry_recon --use_prior --hidden_dim=1024 --n_blocks=4 && \
96
+
97
+ # jupyter nbconvert enhanced_recon_inference.ipynb --to python && \
98
+ # python enhanced_recon_inference.py --model_name=${model_name} --all_recons_path=${eval_dir}/${model_name}_all_recons.pt && \
99
+
100
+ # #jupyter nbconvert enhanced_recon_inference_orig.ipynb --to python && \
101
+ # #python enhanced_recon_inference_orig.py --model_name=${model_name} && \
102
+
103
+ # jupyter nbconvert final_evaluations.ipynb --to python && \
104
+ # python final_evaluations.py --model_name=${model_name} --all_recons_path=${eval_dir}/all_enhancedrecons.pt --data_path=/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2 --eval_dir=${eval_dir} && \
105
+
106
+ #jupyter nbconvert final_evaluations_orig.ipynb --to python && \
107
+ #python final_evaluations_orig.py --model_name=${model_name} --all_recons_path=/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/evals/${model_name}/${model_name}_all_recons.pt --data_path=/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2 --eval_dir=/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/evals/${model_name}
108
+
109
+ echo "Remember to sync wandb logs with online node!"
run_all_nb.slurm ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=ses-01_old
3
+ #SBATCH --ntasks-per-node=1
4
+ #SBATCH --nodes=1
5
+ #SBATCH --gres=gpu:1
6
+ ##SBATCH --constraint=gpu80
7
+ #SBATCH --gpus-per-task=1 # Set to equal gres=gpu:#!
8
+ #SBATCH --cpus-per-task=40 # 40 / 80 / 176 distributed across node
9
+ #SBATCH --time=00:20:00 # total run time limit (HH:MM:SS)
10
+ #SBATCH -e slurms/%j.err # first create a "slurms" folder in current directory to store logs
11
+ #SBATCH -o slurms/%j.out
12
+ #SBATCH --no-requeue
13
+ #SBATCH --array=0 # 0-9
14
+ #SBATCH --mail-type=END
15
+ #SBATCH [email protected]
16
+
17
+ echo "My SLURM_ARRAY_JOB_ID is ${SLURM_ARRAY_JOB_ID}"
18
+ echo "My SLURM_ARRAY_TASK_ID is ${SLURM_ARRAY_TASK_ID}"
19
+ echo "Executing on the machine: $(hostname)"
20
+
21
+ module purge
22
+ module load anaconda3/2023.3
23
+ conda activate rt_mindEye2
24
+ #source /scratch/gpfs/ri4541/MindEyeV2/src/fmri/bin/activate
25
+
26
+ # verify these variables before submitting
27
+ # ---
28
+ sub=sub-001
29
+ session=ses-01
30
+ split=MST # MST train/test split, alternative would be train on non-repeats and test on images that repeat (split=orig)
31
+ model_name=${sub}_${session}_bs24_MST_rishab_${split}split_old
32
+ main_script='main-multisession_old'
33
+ # ---
34
+
35
+ jupyter nbconvert "${main_script}.ipynb" --to python
36
+
37
+ export NUM_GPUS=1 # Set to equal gres=gpu:#!
38
+ export BATCH_SIZE=24
39
+ export GLOBAL_BATCH_SIZE=$((BATCH_SIZE * NUM_GPUS))
40
+
41
+ # Make sure another job doesnt use same port, here using random number
42
+ export MASTER_PORT=$((RANDOM % (19000 - 11000 + 1) + 11000))
43
+ export HOSTNAMES=$(scontrol show hostnames "$SLURM_JOB_NODELIST")
44
+ export MASTER_ADDR=$(scontrol show hostnames "$SLURM_JOB_NODELIST" | head -n 1)
45
+ export COUNT_NODE=$(scontrol show hostnames "$SLURM_JOB_NODELIST" | wc -l)
46
+ echo MASTER_ADDR=${MASTER_ADDR}
47
+ echo MASTER_PORT=${MASTER_PORT}
48
+ echo WORLD_SIZE=${COUNT_NODE}
49
+
50
+ # singlesubject finetuning
51
+ echo model_name=${model_name}
52
+ #accelerate launch --num_processes=$(($NUM_GPUS * $COUNT_NODE)) --num_machines=$COUNT_NODE --main_process_ip=$MASTER_ADDR --main_process_port=$MASTER_PORT "${main_script}.py" --data_path=/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2 --model_name=${model_name} --no-multi_subject --subj=1 --batch_size=${BATCH_SIZE} --max_lr=3e-4 --mixup_pct=.33 --num_epochs=150 --use_prior --prior_scale=30 --clip_scale=1 --no-blurry_recon --blur_scale=.5 --no-use_image_aug --n_blocks=4 --hidden_dim=1024 --num_sessions=40 --ckpt_interval=999 --ckpt_saving --multisubject_ckpt=/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/train_logs/multisubject_subj01_1024hid_nolow_300ep && \
53
+
54
+ #jupyter nbconvert recon_inference-multisession-simple.ipynb --to python && \
55
+ #python recon_inference-multisession-simple.py --model_name=${model_name} --subj=1 --no-blurry_recon --use_prior --hidden_dim=1024 --n_blocks=4 --glmsingle_path="/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/glmsingle_${session}" && \
56
+
57
+ #jupyter nbconvert recon_inference-multisession.ipynb --to python && \
58
+ #python recon_inference-multisession.py --model_name=${model_name} --subj=1 --no-blurry_recon --use_prior --hidden_dim=1024 --n_blocks=4 --glmsingle_path="/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/glmsingle_${session}_paul" && \
59
+
60
+ jupyter nbconvert enhanced_recon_inference.ipynb --to python && \
61
+ python enhanced_recon_inference.py --model_name=${model_name} --all_recons_path=/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/evals/${model_name}/${model_name}_all_recons.pt && \
62
+
63
+ jupyter nbconvert final_evaluations.ipynb --to python && \
64
+ python final_evaluations.py --model_name=${model_name} --all_recons_path=/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/evals/${model_name}/all_enhancedrecons.pt --data_path=/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2 --eval_dir=/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/evals/${model_name}
run_all_workstation.sh ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ echo "Executing on the machine: $(hostname)"
3
+
4
+ module purge
5
+ source ~/rt_mindeye/bin/activate
6
+
7
+ # verify these variables before submitting
8
+ # ---
9
+ sub=sub-001
10
+ session=ses-01
11
+ split=MST # MST train/test split, alternative would be train on non-repeats and test on images that repeat (split=orig)
12
+ model_name="${sub}_${session}_bs24_MST_rishab_${split}split"
13
+ main_script='main-multisession'
14
+ # ---
15
+
16
+ export NUM_GPUS=1 # Set to equal gres=gpu:#!
17
+ export BATCH_SIZE=24
18
+ export GLOBAL_BATCH_SIZE=$((BATCH_SIZE * NUM_GPUS))
19
+
20
+ Make sure another job doesnt use same port, here using random number
21
+ export MASTER_PORT=$((RANDOM % (19000 - 11000 + 1) + 11000))
22
+ export HOSTNAMES=$(scontrol show hostnames "$SLURM_JOB_NODELIST")
23
+ export MASTER_ADDR=$(scontrol show hostnames "$SLURM_JOB_NODELIST" | head -n 1)
24
+ export COUNT_NODE=$(scontrol show hostnames "$SLURM_JOB_NODELIST" | wc -l)
25
+ echo MASTER_ADDR=${MASTER_ADDR}
26
+ echo MASTER_PORT=${MASTER_PORT}
27
+ echo WORLD_SIZE=${COUNT_NODE}
28
+
29
+ echo model_name=${model_name}
30
+
31
+ # singlesubject finetuning
32
+ jupyter nbconvert "${main_script}.ipynb" --to python && \
33
+ accelerate launch --num_processes=$(($NUM_GPUS * $COUNT_NODE)) --num_machines=$COUNT_NODE --main_process_ip=$MASTER_ADDR --main_process_port=$MASTER_PORT "${main_script}.py" --data_path=/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2 --model_name=${model_name} --no-multi_subject --subj=1 --batch_size=${BATCH_SIZE} --max_lr=3e-4 --mixup_pct=.33 --num_epochs=150 --use_prior --prior_scale=30 --clip_scale=1 --no-blurry_recon --blur_scale=.5 --no-use_image_aug --n_blocks=4 --hidden_dim=1024 --num_sessions=40 --ckpt_interval=999 --ckpt_saving --wandb_log --multisubject_ckpt=/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/train_logs/multisubject_subj01_1024hid_nolow_300ep --seed="${SLURM_ARRAY_TASK_ID}" && \
34
+
35
+ jupyter nbconvert recon_inference-multisession.ipynb --to python && \
36
+ python recon_inference-multisession.py --model_name=${model_name} --subj=1 --no-blurry_recon --use_prior --hidden_dim=1024 --n_blocks=4 --glmsingle_path="/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/glmsingle_${session}_paul" && \
37
+
38
+ jupyter nbconvert enhanced_recon_inference.ipynb --to python && \
39
+ python enhanced_recon_inference.py --model_name=${model_name} --all_recons_path=/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/evals/${model_name}/${model_name}_all_recons.pt && \
40
+
41
+ jupyter nbconvert final_evaluations.ipynb --to python && \
42
+ python final_evaluations.py --model_name=${model_name} --all_recons_path=/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/evals/${model_name}/all_enhancedrecons.pt --data_path=/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2 --eval_dir=/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/evals/${model_name}
run_main_finetune.slurm ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=sub-005_ses-01-03_finetune_unionmask
3
+ #SBATCH --ntasks-per-node=1
4
+ #SBATCH --nodes=1
5
+ #SBATCH --gres=gpu:1
6
+ #SBATCH --constraint=gpu80
7
+ #SBATCH --gpus-per-task=1 # Set to equal gres=gpu:#!
8
+ #SBATCH --cpus-per-task=40 # 40 / 80 / 176 distributed across node
9
+ #SBATCH --time=02:35:00 # total run time limit (HH:MM:SS)
10
+ #SBATCH -e slurms/%A_%a.err # first create a "slurms" folder in current directory to store logs
11
+ #SBATCH -o slurms/%A_%a.out
12
+ #SBATCH --no-requeue
13
+ #SBATCH --array=0 # 0 or 0-9
14
+ #SBATCH --mail-type=END
15
+ #SBATCH [email protected]
16
+
17
+ echo "My SLURM_ARRAY_JOB_ID is ${SLURM_ARRAY_JOB_ID}"
18
+ echo "My SLURM_ARRAY_TASK_ID is ${SLURM_ARRAY_TASK_ID}"
19
+ echo "Executing on the machine: $(hostname)"
20
+
21
+ module purge
22
+ module load anaconda3/2023.3
23
+ module load fsl/6.0.6.2
24
+ conda activate rt_mindEye2
25
+ # source /scratch/gpfs/ri4541/MindEyeV2/src/fmri/bin/activate
26
+
27
+ # verify these variables before submitting
28
+ # ---
29
+ sub="sub-005"
30
+ session="all"
31
+ session_label='ses-01-03'
32
+ split=MST # MST train/test split, alternative would be train on non-repeats and test on images that repeat (split=orig)
33
+ task=C
34
+ func_task_name=C
35
+ # resample_voxel_size=True
36
+ # resample_post_glmsingle=False
37
+ # load_from_resampled_file=True
38
+ # remove_close_to_MST=False
39
+ # remove_random_n=False
40
+ # resampled_vox_size=2.0
41
+ # resample_method="trilinear"
42
+ # # Convert decimal point to underscore
43
+ # vox_dim_str=${resampled_vox_size//./_}
44
+
45
+ # model_name="${sub}_multi_task-${task}_bs24_MST_rishab_${split}split"
46
+ model_name="${sub}_${session_label}_task-${task}_bs24_MST_rishab_${split}split_unionmask_ses-01-03_finetune"
47
+ main_script="main-finetune"
48
+ # glmsingle_path="/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/glmsingle-multi"
49
+ glmsingle_path="/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/glmsingle_${sub}_${session_label}_task-${task}"
50
+ # ---
51
+
52
+ export NUM_GPUS=1 # Set to equal gres=gpu:#!
53
+ export BATCH_SIZE=24
54
+ export GLOBAL_BATCH_SIZE=$((BATCH_SIZE * NUM_GPUS))
55
+
56
+ Make sure another job doesnt use same port, here using random number
57
+ export MASTER_PORT=$((RANDOM % (19000 - 11000 + 1) + 11000))
58
+ export HOSTNAMES=$(scontrol show hostnames "$SLURM_JOB_NODELIST")
59
+ export MASTER_ADDR=$(scontrol show hostnames "$SLURM_JOB_NODELIST" | head -n 1)
60
+ export COUNT_NODE=$(scontrol show hostnames "$SLURM_JOB_NODELIST" | wc -l)
61
+ echo MASTER_ADDR=${MASTER_ADDR}
62
+ echo MASTER_PORT=${MASTER_PORT}
63
+ echo WORLD_SIZE=${COUNT_NODE}
64
+
65
+ echo model_name=${model_name}
66
+
67
+ eval_dir="/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/evals/${model_name}"
68
+ export sub=${sub}
69
+ export session=${session}
70
+ export session_label=${session_label}
71
+ export SPLIT=${split}
72
+ export task=${task}
73
+ export FUNC_TASK_NAME=${func_task_name}
74
+ export RESAMPLE_VOXEL_SIZE=${resample_voxel_size}
75
+ export RESAMPLE_POST_GLMSINGLE=${resample_post_glmsingle}
76
+ export LOAD_FROM_RESAMPLED_FILE=${load_from_resampled_file}
77
+ export REMOVE_CLOSE_TO_MST=${remove_close_to_MST}
78
+ export REMOVE_RANDOM_N=${remove_random_n}
79
+ export RESAMPLED_VOX_SIZE=${resampled_vox_size}
80
+ export RESAMPLE_METHOD=${resample_method}
81
+
82
+ export glmsingle_path=${glmsingle_path}
83
+ export eval_dir=${eval_dir}
84
+ export WANDB_MODE="offline"
85
+
86
+ # singlesubject finetuning
87
+ jupyter nbconvert "${main_script}.ipynb" --to python && \
88
+ accelerate launch --num_processes=$(($NUM_GPUS * $COUNT_NODE)) --num_machines=$COUNT_NODE --main_process_ip=$MASTER_ADDR --main_process_port=$MASTER_PORT "${main_script}.py" --data_path=/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2 --model_name=${model_name} --no-multi_subject --subj=1 --batch_size=${BATCH_SIZE} --max_lr=3e-4 --mixup_pct=.33 --num_epochs=150 --use_prior --prior_scale=30 --clip_scale=1 --no-blurry_recon --blur_scale=.5 --no-use_image_aug --n_blocks=4 --hidden_dim=1024 --num_sessions=40 --ckpt_interval=999 --ckpt_saving --wandb_log --multisubject_ckpt=/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/train_logs/multisubject_subj01_1024hid_nolow_300ep --seed="${SLURM_ARRAY_TASK_ID}" && \
89
+
90
+ echo "Remember to sync wandb logs with online node!"
unit_test.py ADDED
@@ -0,0 +1,234 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+ from utils import filter_and_average_mst, verify_image_patterns, compute_vox_rels, compute_avg_repeat_corrs
4
+
5
+ # === filter_and_average_mst tests ===
6
+
7
+ def test_no_mst_images():
8
+ vox = np.array([[1,2,3], [4,5,6], [7,8,9]])
9
+ vox_image_dict = {0: 'image1.jpg', 1: 'image2.jpg', 2: 'image3.jpg'}
10
+
11
+ filtered_vox, kept_indices = filter_and_average_mst(vox, vox_image_dict)
12
+
13
+ np.testing.assert_array_equal(filtered_vox, vox)
14
+ np.testing.assert_array_equal(kept_indices, [0, 1, 2])
15
+
16
+ def test_single_mst_image_set():
17
+ vox = np.array([[1,2,3], [4,5,6], [7,8,9], [10,11,12]])
18
+ vox_image_dict = {0: 'image1.jpg', 1: 'MST_pairs/image2.jpg', 2: 'image3.jpg', 3: 'MST_pairs/image2.jpg'}
19
+
20
+ filtered_vox, kept_indices = filter_and_average_mst(vox, vox_image_dict)
21
+
22
+ expected_vox = np.array([[1,2,3], [7,8,9], [7,8,9]])
23
+ expected_indices = [0, 1, 2]
24
+
25
+ np.testing.assert_array_equal(filtered_vox, expected_vox)
26
+ np.testing.assert_array_equal(kept_indices, expected_indices)
27
+
28
+ def test_multiple_mst_image_sets():
29
+ vox = np.array([[1,2,3], [4,5,6], [7,8,9], [7,8,9], [10,11,12], [12,15,12]])
30
+ vox_image_dict = {
31
+ 0: 'image1.jpg',
32
+ 1: 'MST_pairs/image2.jpg',
33
+ 2: 'image3.jpg',
34
+ 3: 'MST_pairs/image2.jpg',
35
+ 4: 'MST_pairs/image4.jpg',
36
+ 5: 'MST_pairs/image4.jpg'
37
+ }
38
+
39
+ filtered_vox, kept_indices = filter_and_average_mst(vox, vox_image_dict)
40
+
41
+ expected_vox = np.array([[1,2,3], [5.5, 6.5, 7.5], [7,8,9], [11,13,12]])
42
+ expected_indices = [0, 1, 2, 4]
43
+
44
+ np.testing.assert_array_equal(filtered_vox, expected_vox)
45
+ np.testing.assert_array_equal(kept_indices, expected_indices)
46
+
47
+ def test_empty_input():
48
+ vox = np.array([])
49
+ vox_image_dict = {}
50
+
51
+ filtered_vox, kept_indices = filter_and_average_mst(vox, vox_image_dict)
52
+
53
+ assert len(filtered_vox) == 0
54
+ assert len(kept_indices) == 0
55
+
56
+ def test_input_shape():
57
+ vox = np.random.rand(5, 3)
58
+ vox_image_dict = {0: 'a', 1: 'b', 2: 'c', 3: 'd', 4: 'e'}
59
+
60
+ filtered_vox, _ = filter_and_average_mst(vox, vox_image_dict)
61
+
62
+ assert filtered_vox.shape[1] == vox.shape[1]
63
+
64
+
65
+ # === verify_image_patterns tests ===
66
+
67
+ def test_valid_special515():
68
+ image_to_indices = {
69
+ "all_stimuli/special515/image1.jpg": [[1, 2, 3], []],
70
+ "all_stimuli/special515/image2.jpg": [[], [10, 11, 12]],
71
+ }
72
+ failures = verify_image_patterns(image_to_indices)
73
+ assert failures == []
74
+
75
+ def test_invalid_special515():
76
+ image_to_indices = {
77
+ "all_stimuli/special515/image1.jpg": [[1, 2], []],
78
+ "all_stimuli/special515/image2.jpg": [[1, 2], [3]],
79
+ }
80
+ failures = verify_image_patterns(image_to_indices)
81
+ assert len(failures) == 2
82
+
83
+ def test_valid_MST_pairs():
84
+ image_to_indices = {
85
+ "all_stimuli/MST_pairs/image1.png": [[4, 5], [6, 7]],
86
+ }
87
+ failures = verify_image_patterns(image_to_indices)
88
+ assert failures == []
89
+
90
+ def test_invalid_MST_pairs():
91
+ image_to_indices = {
92
+ "all_stimuli/MST_pairs/image1.png": [[4, 5, 6], [7]],
93
+ }
94
+ failures = verify_image_patterns(image_to_indices)
95
+ assert len(failures) == 1
96
+
97
+ def test_valid_other_images():
98
+ image_to_indices = {
99
+ "all_stimuli/other/image1.png": [[123], []],
100
+ "all_stimuli/other/image2.png": [[], [456]],
101
+ }
102
+ failures = verify_image_patterns(image_to_indices)
103
+ assert failures == []
104
+
105
+ def test_invalid_other_images():
106
+ image_to_indices = {
107
+ "all_stimuli/other/image1.png": [[123, 124], []],
108
+ "all_stimuli/other/image2.png": [[123], [456]],
109
+ }
110
+ failures = verify_image_patterns(image_to_indices)
111
+ assert len(failures) == 2
112
+
113
+
114
+ # === compute_vox_rels tests ===
115
+
116
+ # def test_reliability_two_repeats():
117
+ # np.random.seed(0)
118
+ # vox = np.random.rand(70, 10) # 50 trials, 10 voxels
119
+ # pairs = [
120
+ # [0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11], [12, 13, 14],
121
+ # [15, 16, 17], [18, 19, 20], [21, 22, 23], [24, 25, 26], [27, 28, 29],
122
+ # [30, 31, 32], [33, 34, 35], [36, 37, 38], [39, 40, 41], [42, 43, 44],
123
+ # [45, 46, 47], [48, 49, 50], [51, 52, 53], [54, 55, 56],
124
+ # [57, 58, 59, 60], [61, 62, 63, 64]
125
+ # ]
126
+
127
+ # rels = compute_vox_rels(vox, pairs, "sub-01", "ses-01")
128
+
129
+ # assert rels.shape == (10,)
130
+ # assert not np.all(np.isnan(rels)), "All voxel reliabilities are NaN!"
131
+ # assert np.all((rels >= -1) & (rels <= 1))
132
+
133
+
134
+ # def test_reliability_three_repeats():
135
+ # np.random.seed(1)
136
+ # vox = np.random.rand(15, 3) # 15 trials, 3 voxels
137
+ # pairs = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
138
+
139
+ # rels = compute_vox_rels(vox, pairs, "sub-01", "ses-02")
140
+
141
+ # assert rels.shape == (3,)
142
+ # assert not np.all(np.isnan(rels)), "All voxel reliabilities are NaN!"
143
+ # assert np.all((rels >= -1) & (rels <= 1))
144
+
145
+
146
+ # def test_reliability_four_repeats_mixed():
147
+ # np.random.seed(2)
148
+ # vox = np.random.rand(20, 4) # 20 trials, 4 voxels
149
+ # pairs = [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9]] # includes 2 and 4 repeats
150
+
151
+ # rels = compute_vox_rels(vox, pairs, "sub-test", "ses-test")
152
+
153
+ # assert rels.shape == (4,)
154
+ # assert not np.all(np.isnan(rels)), "All voxel reliabilities are NaN!"
155
+ # assert np.all((rels >= -1) & (rels <= 1))
156
+
157
+
158
+ # def test_near_uniform_data():
159
+ # np.random.seed(42)
160
+ # # Add very small noise to a constant baseline
161
+ # vox = np.ones((6, 3)) + np.random.normal(0, 1e-5, (6, 3))
162
+ # pairs = [[0, 1], [2, 3], [4, 5]]
163
+
164
+ # rels = compute_vox_rels(vox, pairs, "sub-near-uniform", "ses-01")
165
+
166
+ # assert rels.shape == (3,)
167
+ # assert not np.all(np.isnan(rels)), "All voxel reliabilities are NaN!"
168
+ # assert np.all((rels >= -1) & (rels <= 1))
169
+
170
+ # def test_invalid_pairs_length():
171
+ # vox = np.random.rand(10, 3)
172
+ # pairs = [[0]] # should raise due to too few repeats
173
+
174
+ # with pytest.raises(AssertionError):
175
+ # compute_vox_rels(vox, pairs, "sub-err", "ses-01")
176
+
177
+
178
+ def test_basic_case():
179
+ """Test with 2 repeats and 2 voxels, with basic correlation"""
180
+ vox_repeats = np.random.rand(30, 50)
181
+ breakpoint()
182
+ rels = compute_avg_repeat_corrs(vox_repeats)
183
+
184
+ # Expected correlation for each voxel should be the correlation between repeat 0 and repeat 1
185
+ assert rels.shape == (2,) # Should return a vector of size 2 (one per voxel)
186
+
187
+ # Check that the correlation is valid and close to expected value
188
+ assert np.all(np.isfinite(rels)) # Ensure no NaNs in the results
189
+
190
+ for v in range(2): # Check correlation for each voxel
191
+ expected_corr = np.corrcoef(vox_repeats[:, v])[0, 1]
192
+ assert np.allclose(rels[v], expected_corr, atol=1e-5) # Allow for floating point errors
193
+
194
+ def test_multiple_repeats():
195
+ """Test with more repeats (3) and multiple voxels (3)"""
196
+ vox_repeats = np.array([[1, 2, 3], [2, 3, 4], [3, 4, 5]]) # 3 repeats, 3 voxels
197
+ rels = compute_avg_repeat_corrs(vox_repeats)
198
+
199
+ assert rels.shape == (3,) # Should return a vector of size 3 (one per voxel)
200
+ for v in range(3):
201
+ assert not np.isnan(rels[v]) # Ensure no NaNs are present
202
+
203
+ def test_identical_repeats():
204
+ """Test with all identical repeats (perfect correlation)"""
205
+ vox_repeats = np.array([[1, 1], [1, 1]]) # Identical repeats, 2 voxels
206
+ rels = compute_avg_repeat_corrs(vox_repeats)
207
+
208
+ assert rels.shape == (2,)
209
+ assert np.allclose(rels, 1) # Perfect correlation (should be 1 for all voxels)
210
+
211
+ def test_anticorrelation():
212
+ """Test with perfect anti-correlation (correlation = -1)"""
213
+ vox_repeats = np.array([[1, 2], [2, 1]]) # Perfect anti-correlation between repeats
214
+ rels = compute_avg_repeat_corrs(vox_repeats)
215
+
216
+ assert rels.shape == (2,)
217
+ assert np.allclose(rels, -1) # Perfect negative correlation
218
+
219
+ def test_zero_variance_repeats():
220
+ """Test with repeats having zero variance (e.g., all values are the same)"""
221
+ vox_repeats = np.array([[1, 1], [1, 1], [1, 1]]) # Zero variance across repeats
222
+ rels = compute_avg_repeat_corrs(vox_repeats)
223
+
224
+ assert rels.shape == (2,)
225
+ # Since variance is zero, the correlation will be NaN
226
+ assert np.all(np.isnan(rels))
227
+
228
+ def test_edge_case_two_repeats_and_one_voxel():
229
+ """Test with only 2 repeats and 1 voxel (minimal edge case)"""
230
+ vox_repeats = np.array([[1], [2]]) # 2 repeats, 1 voxel
231
+ rels = compute_avg_repeat_corrs(vox_repeats)
232
+
233
+ assert rels.shape == (1,)
234
+ assert np.allclose(rels[0], np.corrcoef([1], [2])[1, 0]) # Correlation between the two repeats
utils.py ADDED
@@ -0,0 +1,1151 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from torchvision import transforms
3
+ import torch
4
+ import torch.nn as nn
5
+ import torch.nn.functional as F
6
+ import PIL
7
+ import random
8
+ import os
9
+ import matplotlib.pyplot as plt
10
+ import pandas as pd
11
+ import math
12
+ import webdataset as wds
13
+ import tempfile
14
+ from torchvision.utils import make_grid
15
+
16
+ import json
17
+ from torchmetrics.image.fid import FrechetInceptionDistance
18
+ from PIL import Image
19
+ import requests
20
+ import io
21
+ import time
22
+
23
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
24
+
25
+ def is_interactive():
26
+ import __main__ as main
27
+ return not hasattr(main, '__file__')
28
+
29
+ def seed_everything(seed=0, cudnn_deterministic=True):
30
+ random.seed(seed)
31
+ os.environ['PYTHONHASHSEED'] = str(seed)
32
+ np.random.seed(seed)
33
+ torch.manual_seed(seed)
34
+ torch.cuda.manual_seed(seed)
35
+ torch.cuda.manual_seed_all(seed)
36
+ if cudnn_deterministic:
37
+ torch.backends.cudnn.deterministic = True
38
+ else:
39
+ ## needs to be False to use conv3D
40
+ print('Note: not using cudnn.deterministic')
41
+
42
+ def np_to_Image(x):
43
+ if x.ndim==4:
44
+ x=x[0]
45
+ return PIL.Image.fromarray((x.transpose(1, 2, 0)*127.5+128).clip(0,255).astype('uint8'))
46
+
47
+ def torch_to_Image(x):
48
+ if x.ndim==4:
49
+ x=x[0]
50
+ return transforms.ToPILImage()(x)
51
+
52
+ def Image_to_torch(x):
53
+ try:
54
+ x = (transforms.ToTensor()(x)[:3].unsqueeze(0)-.5)/.5
55
+ except:
56
+ x = (transforms.ToTensor()(x[0])[:3].unsqueeze(0)-.5)/.5
57
+ return x
58
+
59
+ def torch_to_matplotlib(x,device=device):
60
+ if torch.mean(x)>10:
61
+ x = (x.permute(0, 2, 3, 1)).clamp(0, 255).to(torch.uint8)
62
+ else:
63
+ x = (x.permute(0, 2, 3, 1) * 255).clamp(0, 255).to(torch.uint8)
64
+ if device=='cpu':
65
+ return x[0]
66
+ else:
67
+ return x.cpu().numpy()[0]
68
+
69
+ def pairwise_cosine_similarity(A, B, dim=1, eps=1e-8):
70
+ #https://stackoverflow.com/questions/67199317/pytorch-cosine-similarity-nxn-elements
71
+ numerator = A @ B.T
72
+ A_l2 = torch.mul(A, A).sum(axis=dim)
73
+ B_l2 = torch.mul(B, B).sum(axis=dim)
74
+ denominator = torch.max(torch.sqrt(torch.outer(A_l2, B_l2)), torch.tensor(eps))
75
+ return torch.div(numerator, denominator)
76
+
77
+ def batchwise_pearson_correlation(Z, B):
78
+ # Calculate means
79
+ Z_mean = torch.mean(Z, dim=1, keepdim=True)
80
+ B_mean = torch.mean(B, dim=1, keepdim=True)
81
+
82
+ # Subtract means
83
+ Z_centered = Z - Z_mean
84
+ B_centered = B - B_mean
85
+
86
+ # Calculate Pearson correlation coefficient
87
+ numerator = Z_centered @ B_centered.T
88
+ Z_centered_norm = torch.linalg.norm(Z_centered, dim=1, keepdim=True)
89
+ B_centered_norm = torch.linalg.norm(B_centered, dim=1, keepdim=True)
90
+ denominator = Z_centered_norm @ B_centered_norm.T
91
+
92
+ pearson_correlation = (numerator / denominator)
93
+ return pearson_correlation
94
+
95
+ def batchwise_cosine_similarity(Z,B):
96
+ Z = Z.flatten(1)
97
+ B = B.flatten(1).T
98
+ Z_norm = torch.linalg.norm(Z, dim=1, keepdim=True) # Size (n, 1).
99
+ B_norm = torch.linalg.norm(B, dim=0, keepdim=True) # Size (1, b).
100
+ cosine_similarity = ((Z @ B) / (Z_norm @ B_norm)).T
101
+ return cosine_similarity
102
+
103
+ def prenormed_batchwise_cosine_similarity(Z,B):\
104
+ return (Z @ B.T).T
105
+
106
+ def cosine_similarity(Z,B,l=0):
107
+ Z = nn.functional.normalize(Z, p=2, dim=1)
108
+ B = nn.functional.normalize(B, p=2, dim=1)
109
+ # if l>0, use distribution normalization
110
+ # https://twitter.com/YifeiZhou02/status/1716513495087472880
111
+ Z = Z - l * torch.mean(Z,dim=0)
112
+ B = B - l * torch.mean(B,dim=0)
113
+ cosine_similarity = (Z @ B.T).T
114
+ return cosine_similarity
115
+
116
+ def topk(similarities,labels,k=5):
117
+ if k > similarities.shape[0]:
118
+ k = similarities.shape[0]
119
+ topsum=0
120
+ for i in range(k):
121
+ topsum += torch.sum(torch.argsort(similarities,axis=1)[:,-(i+1)] == labels)/len(labels)
122
+ return topsum
123
+
124
+ def get_non_diagonals(a):
125
+ a = torch.triu(a,diagonal=1)+torch.tril(a,diagonal=-1)
126
+ # make diagonals -1
127
+ a=a.fill_diagonal_(-1)
128
+ return a
129
+
130
+ def gather_features(image_features, voxel_features, accelerator):
131
+ all_image_features = accelerator.gather(image_features.contiguous())
132
+ if voxel_features is not None:
133
+ all_voxel_features = accelerator.gather(voxel_features.contiguous())
134
+ return all_image_features, all_voxel_features
135
+ return all_image_features
136
+
137
+ def soft_clip_loss(preds, targs, temp=0.125): #, distributed=False, accelerator=None):
138
+ # if not distributed:
139
+ clip_clip = (targs @ targs.T)/temp
140
+ brain_clip = (preds @ targs.T)/temp
141
+ # else:
142
+ # all_targs = gather_features(targs, None, accelerator)
143
+ # clip_clip = (targs @ all_targs.T)/temp
144
+ # brain_clip = (preds @ all_targs.T)/temp
145
+
146
+ loss1 = -(brain_clip.log_softmax(-1) * clip_clip.softmax(-1)).sum(-1).mean()
147
+ loss2 = -(brain_clip.T.log_softmax(-1) * clip_clip.softmax(-1)).sum(-1).mean()
148
+
149
+ loss = (loss1 + loss2)/2
150
+ return loss
151
+
152
+ def soft_siglip_loss(preds, targs, temp, bias):
153
+ temp = torch.exp(temp)
154
+
155
+ logits = (preds @ targs.T) * temp + bias
156
+ # diagonals (aka paired samples) should be >0 and off-diagonals <0
157
+ labels = (targs @ targs.T) - 1 + (torch.eye(len(targs)).to(targs.dtype).to(targs.device))
158
+
159
+ loss1 = -torch.sum(nn.functional.logsigmoid(logits * labels[:len(preds)])) / len(preds)
160
+ loss2 = -torch.sum(nn.functional.logsigmoid(logits.T * labels[:,:len(preds)])) / len(preds)
161
+ loss = (loss1 + loss2)/2
162
+ return loss
163
+
164
+ def mixco_hard_siglip_loss(preds, targs, temp, bias, perm, betas):
165
+ temp = torch.exp(temp)
166
+
167
+ probs = torch.diag(betas)
168
+ probs[torch.arange(preds.shape[0]).to(preds.device), perm] = 1 - betas
169
+
170
+ logits = (preds @ targs.T) * temp + bias
171
+ labels = probs * 2 - 1
172
+ #labels = torch.eye(len(targs)).to(targs.dtype).to(targs.device) * 2 - 1
173
+
174
+ loss1 = -torch.sum(nn.functional.logsigmoid(logits * labels)) / len(preds)
175
+ loss2 = -torch.sum(nn.functional.logsigmoid(logits.T * labels)) / len(preds)
176
+ loss = (loss1 + loss2)/2
177
+ return loss
178
+
179
+ def mixco(voxels, beta=0.15, s_thresh=0.5, perm=None, betas=None, select=None):
180
+ if perm is None:
181
+ perm = torch.randperm(voxels.shape[0])
182
+ voxels_shuffle = voxels[perm].to(voxels.device,dtype=voxels.dtype)
183
+ if betas is None:
184
+ betas = torch.distributions.Beta(beta, beta).sample([voxels.shape[0]]).to(voxels.device,dtype=voxels.dtype)
185
+ if select is None:
186
+ select = (torch.rand(voxels.shape[0]) <= s_thresh).to(voxels.device)
187
+ betas_shape = [-1] + [1]*(len(voxels.shape)-1)
188
+ voxels[select] = voxels[select] * betas[select].reshape(*betas_shape) + \
189
+ voxels_shuffle[select] * (1 - betas[select]).reshape(*betas_shape)
190
+ betas[~select] = 1
191
+ return voxels, perm, betas, select
192
+
193
+ def mixco_clip_target(clip_target, perm, select, betas):
194
+ clip_target_shuffle = clip_target[perm]
195
+ clip_target[select] = clip_target[select] * betas[select].reshape(-1, 1) + \
196
+ clip_target_shuffle[select] * (1 - betas[select]).reshape(-1, 1)
197
+ return clip_target
198
+
199
+ def mixco_nce(preds, targs, temp=0.1, perm=None, betas=None, select=None, distributed=False,
200
+ accelerator=None, local_rank=None, bidirectional=True):
201
+ brain_clip = (preds @ targs.T)/temp
202
+
203
+ if perm is not None and betas is not None and select is not None:
204
+ probs = torch.diag(betas)
205
+ probs[torch.arange(preds.shape[0]).to(preds.device), perm] = 1 - betas
206
+
207
+ loss = -(brain_clip.log_softmax(-1) * probs).sum(-1).mean()
208
+ if bidirectional:
209
+ loss2 = -(brain_clip.T.log_softmax(-1) * probs.T).sum(-1).mean()
210
+ loss = (loss + loss2)/2
211
+ return loss
212
+ else:
213
+ loss = F.cross_entropy(brain_clip, torch.arange(brain_clip.shape[0]).to(brain_clip.device))
214
+ if bidirectional:
215
+ loss2 = F.cross_entropy(brain_clip.T, torch.arange(brain_clip.shape[0]).to(brain_clip.device))
216
+ loss = (loss + loss2)/2
217
+ return loss
218
+
219
+ def count_params(model):
220
+ total = sum(p.numel() for p in model.parameters())
221
+ trainable = sum(p.numel() for p in model.parameters() if p.requires_grad)
222
+ print('param counts:\n{:,} total\n{:,} trainable'.format(total, trainable))
223
+ return trainable
224
+
225
+ def image_grid(imgs, rows, cols):
226
+ w, h = imgs[0].size
227
+ grid = PIL.Image.new('RGB', size=(cols*w, rows*h))
228
+ for i, img in enumerate(imgs):
229
+ grid.paste(img, box=(i%cols*w, i//cols*h))
230
+ return grid
231
+
232
+ def check_loss(loss):
233
+ if loss.isnan().any():
234
+ raise ValueError('NaN loss')
235
+
236
+ def cosine_anneal(start, end, steps):
237
+ return end + (start - end)/2 * (1 + torch.cos(torch.pi*torch.arange(steps)/(steps-1)))
238
+
239
+ def resize(img, img_size=128):
240
+ if img.ndim == 3: img = img[None]
241
+ return nn.functional.interpolate(img, size=(img_size, img_size), mode='nearest')
242
+
243
+ import braceexpand
244
+ def get_dataloaders(
245
+ batch_size,
246
+ image_var='images',
247
+ num_devices=None,
248
+ num_workers=None,
249
+ train_url=None,
250
+ val_url=None,
251
+ meta_url=None,
252
+ num_train=None,
253
+ num_val=None,
254
+ cache_dir="/scratch/tmp/wds-cache",
255
+ seed=0,
256
+ voxels_key="nsdgeneral.npy",
257
+ val_batch_size=None,
258
+ to_tuple=["voxels", "images", "trial"],
259
+ local_rank=0,
260
+ world_size=1,
261
+ ):
262
+ print("Getting dataloaders...")
263
+ assert image_var == 'images'
264
+
265
+ def my_split_by_node(urls):
266
+ return urls
267
+
268
+ train_url = list(braceexpand.braceexpand(train_url))
269
+ val_url = list(braceexpand.braceexpand(val_url))
270
+
271
+ if num_devices is None:
272
+ num_devices = torch.cuda.device_count()
273
+
274
+ if num_workers is None:
275
+ num_workers = num_devices
276
+
277
+ if num_train is None:
278
+ metadata = json.load(open(meta_url))
279
+ num_train = metadata['totals']['train']
280
+ if num_val is None:
281
+ metadata = json.load(open(meta_url))
282
+ num_val = metadata['totals']['val']
283
+
284
+ if val_batch_size is None:
285
+ val_batch_size = batch_size
286
+
287
+ global_batch_size = batch_size * num_devices
288
+ num_batches = math.floor(num_train / global_batch_size)
289
+ num_worker_batches = math.floor(num_batches / num_workers)
290
+ if num_worker_batches == 0: num_worker_batches = 1
291
+
292
+ print("\nnum_train",num_train)
293
+ print("global_batch_size",global_batch_size)
294
+ print("batch_size",batch_size)
295
+ print("num_workers",num_workers)
296
+ print("num_batches",num_batches)
297
+ print("num_worker_batches", num_worker_batches)
298
+
299
+ # train_url = train_url[local_rank:world_size]
300
+ train_data = wds.WebDataset(train_url, resampled=False, cache_dir=cache_dir, nodesplitter=my_split_by_node)\
301
+ .shuffle(500, initial=500, rng=random.Random(42))\
302
+ .decode("torch")\
303
+ .rename(images="jpg;png", voxels=voxels_key, trial="trial.npy", coco="coco73k.npy", reps="num_uniques.npy")\
304
+ .to_tuple(*to_tuple)#\
305
+ # .batched(batch_size, partial=True)#\
306
+ # .with_epoch(num_worker_batches)
307
+
308
+ # BATCH SIZE SHOULD BE NONE!!! FOR TRAIN AND VAL | resampled=True for train | .batched(val_batch_size, partial=False)
309
+ train_dl = torch.utils.data.DataLoader(train_data, batch_size=batch_size, num_workers=1, shuffle=False)
310
+
311
+ # Validation
312
+ print("val_batch_size",val_batch_size)
313
+ val_data = wds.WebDataset(val_url, resampled=False, cache_dir=cache_dir, nodesplitter=my_split_by_node)\
314
+ .shuffle(500, initial=500, rng=random.Random(42))\
315
+ .decode("torch")\
316
+ .rename(images="jpg;png", voxels=voxels_key, trial="trial.npy", coco="coco73k.npy", reps="num_uniques.npy")\
317
+ .to_tuple(*to_tuple)#\
318
+ # .batched(val_batch_size, partial=True)
319
+ val_dl = torch.utils.data.DataLoader(val_data, batch_size=val_batch_size, num_workers=1, shuffle=False, drop_last=True)
320
+
321
+ return train_dl, val_dl, num_train, num_val
322
+
323
+ pixcorr_preprocess = transforms.Compose([
324
+ transforms.Resize(425, interpolation=transforms.InterpolationMode.BILINEAR),
325
+ ])
326
+ def pixcorr(images,brains,nan=True):
327
+ all_images_flattened = pixcorr_preprocess(images).reshape(len(images), -1)
328
+ all_brain_recons_flattened = pixcorr_preprocess(brains).view(len(brains), -1)
329
+ if nan:
330
+ corrmean = torch.nanmean(torch.diag(batchwise_pearson_correlation(all_images_flattened, all_brain_recons_flattened)))
331
+ else:
332
+ corrmean = torch.mean(torch.diag(batchwise_pearson_correlation(all_images_flattened, all_brain_recons_flattened)))
333
+ return corrmean
334
+
335
+ def select_annotations(annots, random=True):
336
+ """
337
+ There are 5 annotations per image. Select one of them for each image.
338
+ """
339
+ for i, b in enumerate(annots):
340
+ t = ''
341
+ if random:
342
+ # select random non-empty annotation
343
+ while t == '':
344
+ rand = torch.randint(5, (1,1))[0][0]
345
+ t = b[rand]
346
+ else:
347
+ # select first non-empty annotation
348
+ for j in range(5):
349
+ if b[j] != '':
350
+ t = b[j]
351
+ break
352
+ if i == 0:
353
+ txt = np.array(t)
354
+ else:
355
+ txt = np.vstack((txt, t))
356
+ txt = txt.flatten()
357
+ return txt
358
+
359
+ def add_saturation(image, alpha=2):
360
+ gray_image = 0.2989 * image[:, 0, :, :] + 0.5870 * image[:, 1, :, :] + 0.1140 * image[:, 2, :, :]
361
+ gray_image = gray_image.unsqueeze(1).expand_as(image)
362
+ saturated_image = alpha * image + (1 - alpha) * gray_image
363
+ return torch.clamp(saturated_image, 0, 1)
364
+
365
+ def find_prompt_by_image_number(image_number, data):
366
+ target_image_filename = f"img_t{image_number}.jpg"
367
+ for entry in data:
368
+ if 'target' in entry and entry['target'].endswith(target_image_filename):
369
+ return entry['prompt']
370
+ return -1
371
+
372
+ def compute_negative_l1_losses(preds, targets):
373
+ batch_size = preds.size(0)
374
+
375
+ # Expand dimensions for broadcasting
376
+ expanded_preds = preds.unsqueeze(1) # Shape: [batch_size, 1, 100]
377
+ expanded_targets = targets.unsqueeze(0) # Shape: [1, batch_size, 100]
378
+
379
+ # Compute pairwise L1 differences
380
+ l1_diffs = torch.abs(expanded_preds - expanded_targets) # Shape: [batch_size, batch_size, 100]
381
+
382
+ # Mask the diagonal to exclude positive pairs
383
+ mask = torch.eye(batch_size).bool().to(l1_diffs.device)
384
+ l1_diffs[mask] = 0
385
+
386
+ # Sum L1 differences for each sample against all negatives
387
+ negative_losses = l1_diffs.sum(dim=-1).mean()
388
+
389
+ return negative_losses
390
+
391
+
392
+ def unclip_recon(x, diffusion_engine, vector_suffix,
393
+ num_samples=1, offset_noise_level=0.04):
394
+ from generative_models.sgm.util import append_dims
395
+ assert x.ndim==3
396
+ if x.shape[0]==1:
397
+ x = x[[0]]
398
+ with torch.no_grad(), torch.cuda.amp.autocast(dtype=torch.float16), diffusion_engine.ema_scope():
399
+ z = torch.randn(num_samples,4,96,96).to(device) # starting noise, can change to VAE outputs of initial image for img2img
400
+
401
+ # clip_img_tokenized = clip_img_embedder(image)
402
+ # tokens = clip_img_tokenized
403
+ token_shape = x.shape
404
+ tokens = x
405
+ c = {"crossattn": tokens.repeat(num_samples,1,1), "vector": vector_suffix.repeat(num_samples,1)}
406
+
407
+ tokens = torch.randn_like(x)
408
+ uc = {"crossattn": tokens.repeat(num_samples,1,1), "vector": vector_suffix.repeat(num_samples,1)}
409
+
410
+ for k in c:
411
+ c[k], uc[k] = map(lambda y: y[k][:num_samples].to(device), (c, uc))
412
+
413
+ noise = torch.randn_like(z)
414
+ sigmas = diffusion_engine.sampler.discretization(diffusion_engine.sampler.num_steps)
415
+ sigma = sigmas[0].to(z.device)
416
+
417
+ if offset_noise_level > 0.0:
418
+ noise = noise + offset_noise_level * append_dims(
419
+ torch.randn(z.shape[0], device=z.device), z.ndim
420
+ )
421
+ noised_z = z + noise * append_dims(sigma, z.ndim)
422
+ noised_z = noised_z / torch.sqrt(
423
+ 1.0 + sigmas[0] ** 2.0
424
+ ) # Note: hardcoded to DDPM-like scaling. need to generalize later.
425
+
426
+ def denoiser(x, sigma, c):
427
+ return diffusion_engine.denoiser(diffusion_engine.model, x, sigma, c)
428
+
429
+ samples_z = diffusion_engine.sampler(denoiser, noised_z, cond=c, uc=uc)
430
+ samples_x = diffusion_engine.decode_first_stage(samples_z)
431
+ samples = torch.clamp((samples_x*.8+.2), min=0.0, max=1.0)
432
+ # samples = torch.clamp((samples_x + .5) / 2.0, min=0.0, max=1.0)
433
+ return samples
434
+
435
+ def soft_cont_loss(student_preds, teacher_preds, teacher_aug_preds, temp=0.125):
436
+ teacher_teacher_aug = (teacher_preds @ teacher_aug_preds.T)/temp
437
+ teacher_teacher_aug_t = (teacher_aug_preds @ teacher_preds.T)/temp
438
+ student_teacher_aug = (student_preds @ teacher_aug_preds.T)/temp
439
+ student_teacher_aug_t = (teacher_aug_preds @ student_preds.T)/temp
440
+
441
+ loss1 = -(student_teacher_aug.log_softmax(-1) * teacher_teacher_aug.softmax(-1)).sum(-1).mean()
442
+ loss2 = -(student_teacher_aug_t.log_softmax(-1) * teacher_teacher_aug_t.softmax(-1)).sum(-1).mean()
443
+
444
+ loss = (loss1 + loss2)/2
445
+ return loss
446
+
447
+ def iterate_range(start, length, batchsize):
448
+ batch_count = int(length // batchsize )
449
+ residual = int(length % batchsize)
450
+ for i in range(batch_count):
451
+ yield range(start+i*batchsize, start+(i+1)*batchsize),batchsize
452
+ if(residual>0):
453
+ yield range(start+batch_count*batchsize,start+length),residual
454
+
455
+
456
+ # Torch fwRF
457
+ def get_value(_x):
458
+ return np.copy(_x.data.cpu().numpy())
459
+
460
+
461
+ #subject: nsd subject index between 1-8
462
+ #mode: vision, imagery
463
+ #stimtype: all, simple, complex, concepts
464
+ #average: whether to average across trials, will produce x that is (stimuli, 1, voxels)
465
+ #nest: whether to nest the data according to stimuli, will produce x that is (stimuli, trials, voxels)
466
+ import pickle
467
+ def condition_average(x, y, cond, nest=False):
468
+ idx, idx_count = np.unique(cond, return_counts=True)
469
+ idx_list = [np.array(cond)==i for i in np.sort(idx)]
470
+ if nest:
471
+ avg_x = torch.zeros((len(idx), idx_count.max(), x.shape[1]), dtype=torch.float32)
472
+ else:
473
+ avg_x = torch.zeros((len(idx), 1, x.shape[1]), dtype=torch.float32)
474
+ for i, m in enumerate(idx_list):
475
+ if nest:
476
+ avg_x[i] = x[m]
477
+ else:
478
+ avg_x[i] = torch.mean(x[m], axis=0)
479
+
480
+ return avg_x, y, len(idx_count)
481
+ def load_nsd_mental_imagery(subject, mode, stimtype="all", average=False, nest=False):
482
+ # This file has a bunch of information about the stimuli and cue associations that will make loading it easier
483
+ img_stim_file = "imagery/nsd_imagery/data/nsddata_stimuli/stimuli/nsdimagery_stimuli.pkl3"
484
+ ex_file = open(img_stim_file, 'rb')
485
+ imagery_dict = pickle.load(ex_file)
486
+ ex_file.close()
487
+ # Indicates what experiments trials belong to
488
+ exps = imagery_dict['exps']
489
+ # Indicates the cues for different stimuli
490
+ cues = imagery_dict['cues']
491
+ # Maps the cues to the stimulus image information
492
+ image_map = imagery_dict['image_map']
493
+ # Organize the indices of the trials according to the modality and the type of stimuli
494
+ cond_idx = {
495
+ 'visionsimple': np.arange(len(exps))[exps=='visA'],
496
+ 'visioncomplex': np.arange(len(exps))[exps=='visB'],
497
+ 'visionconcepts': np.arange(len(exps))[exps=='visC'],
498
+ 'visionall': np.arange(len(exps))[np.logical_or(np.logical_or(exps=='visA', exps=='visB'), exps=='visC')],
499
+ 'imagerysimple': np.arange(len(exps))[np.logical_or(exps=='imgA_1', exps=='imgA_2')],
500
+ 'imagerycomplex': np.arange(len(exps))[np.logical_or(exps=='imgB_1', exps=='imgB_2')],
501
+ 'imageryconcepts': np.arange(len(exps))[np.logical_or(exps=='imgC_1', exps=='imgC_2')],
502
+ 'imageryall': np.arange(len(exps))[np.logical_or(
503
+ np.logical_or(
504
+ np.logical_or(exps=='imgA_1', exps=='imgA_2'),
505
+ np.logical_or(exps=='imgB_1', exps=='imgB_2')),
506
+ np.logical_or(exps=='imgC_1', exps=='imgC_2'))]}
507
+ # Load normalized betas
508
+ x = torch.load("imagery/nsd_imagery/data/preprocessed_data/subject{}/nsd_imagery.pt".format(subject)).requires_grad_(False).to("cpu")
509
+ # Find the trial indices conditioned on the type of trials we want to load
510
+ cond_im_idx = {n: [image_map[c] for c in cues[idx]] for n,idx in cond_idx.items()}
511
+ conditionals = cond_im_idx[mode+stimtype]
512
+ # Stimuli file is of shape (18,3,425,425), these can be converted back into PIL images using transforms.ToPILImage()
513
+ y = torch.load("imagery/nsd_imagery/data/nsddata_stimuli/stimuli/imagery_stimuli_18.pt").requires_grad_(False).to("cpu")
514
+ # Prune the beta file down to specific experimental mode/stimuli type
515
+ x = x[cond_idx[mode+stimtype]]
516
+ # If stimtype is not all, then prune the image data down to the specific stimuli type
517
+ if stimtype == "simple":
518
+ y = y[:6]
519
+ elif stimtype == "complex":
520
+ y = y[6:12]
521
+ elif stimtype == "concepts":
522
+ y = y[12:]
523
+
524
+ # Average or nest the betas across trials
525
+ if average or nest:
526
+ x, y, sample_count = condition_average(x, y, conditionals, nest=nest)
527
+ else:
528
+ x = x.reshape((x.shape[0], 1, x.shape[1]))
529
+
530
+ # print(x.shape)
531
+ return x, y
532
+
533
+ def bb_soft_clip_loss(preds, targs, temp=0.125):
534
+ temp = np.exp(temp)
535
+ clip_clip = (targs @ targs.T)/temp
536
+ brain_brain = (preds @ preds.T)/temp
537
+
538
+ # loss1 = -(brain_brain.log_softmax(-1) * clip_clip.softmax(-1)).sum(-1).mean()
539
+ # loss2 = -(brain_brain.T.log_softmax(-1) * clip_clip.softmax(-1)).sum(-1).mean()
540
+ # loss = (loss1 + loss2)/2
541
+
542
+ loss = nn.functional.kl_div(brain_brain.log_softmax(-1), clip_clip.softmax(-1), reduction='batchmean')
543
+ return loss #* 1e5
544
+
545
+ def bb_cossim_loss(preds, targs, temp=None):
546
+ clip_clip = (targs @ targs.T)
547
+ brain_brain = (preds @ preds.T)
548
+ loss = 1 - nn.functional.cosine_similarity(brain_brain, clip_clip).mean()
549
+ return loss
550
+
551
+ def load_images_to_numpy(folder_path):
552
+ file_names = [f for f in os.listdir(folder_path) if (f.endswith('.png') or f.endswith('.jpg') or f.endswith('.jpeg'))]
553
+ image_data = []
554
+ image_names = []
555
+ for file_name in file_names:
556
+ image_path = os.path.join(folder_path, file_name)
557
+ image_names.append(file_name)
558
+ with Image.open(image_path) as img:
559
+ img_array = np.array(img)
560
+ if img_array.shape[1] != 224:
561
+ img = img.resize((224,224))
562
+ img_array = np.array(img)
563
+ image_data.append(img_array)
564
+ images_np = np.stack(image_data, axis=0)
565
+ return images_np, image_names
566
+
567
+
568
+ import hashlib
569
+ def hash_image(image_tensor):
570
+ # Convert tensor to bytes
571
+ image_bytes = image_tensor.detach().cpu().numpy().tobytes()
572
+ # Hash the bytes using SHA-256
573
+ hash_object = hashlib.sha256(image_bytes)
574
+ hex_dig = hash_object.hexdigest()
575
+ return hex_dig
576
+
577
+
578
+ def find_paired_indices(x):
579
+ unique_elements, counts = torch.unique(x, return_counts=True)
580
+ repeated_elements = unique_elements[counts > 1]
581
+ paired_indices = []
582
+
583
+ for element in repeated_elements:
584
+ indices = (x == element).nonzero(as_tuple=True)[0]
585
+ # Instead of creating pairs, just collect the entire set of indices once
586
+ paired_indices.append(indices[:len(indices)].tolist())
587
+
588
+ return paired_indices
589
+
590
+
591
+ def zscore(data,train_mean=None,train_std=None):
592
+ # assuming that first dim is num_samples and second dim is num_voxels
593
+ if train_mean is None:
594
+ train_mean = np.mean(data,axis=0)
595
+ if train_std is None:
596
+ train_std = np.std(data,axis=0)
597
+ zscored_data = (data - train_mean) / (train_std + 1e-6)
598
+ return zscored_data
599
+
600
+
601
+ def log_io(func): # the first argument must be input; output must be a kwarg for this to work properly
602
+ def wrapper(*args, **kwargs):
603
+ inp = args[0]
604
+ output = kwargs['output']
605
+ print(f'\n*** Loading data from {inp} ***\n')
606
+ result = func(*args, **kwargs)
607
+ print(f'\n*** Saved resampled data to {output} ***\n')
608
+ return result
609
+ return wrapper
610
+
611
+ @log_io
612
+ def resample(inp, ref, target_size, omat, output=None):
613
+ os.system(f"flirt -in {inp} \
614
+ -ref {ref} \
615
+ -applyisoxfm {target_size} -nosearch \
616
+ -omat {omat} \
617
+ -out {output}")
618
+
619
+ @log_io
620
+ def applyxfm(inp, ref, init, interp, output=None):
621
+ os.system(f"flirt -in {inp} \
622
+ -ref {ref} \
623
+ -out {output} \
624
+ -applyxfm -init {init} \
625
+ -interp {interp}")
626
+
627
+ @log_io
628
+ def apply_thresh(inp, thresh, output=None):
629
+ os.system(f"fslmaths {inp} -thr {thresh} -bin {output}")
630
+
631
+ def resample_betas(orig_glmsingle_path, sub, session, task_name, vox, glmsingle_path, glm_save_path_resampled, ref_name, omat):
632
+ # convert vox to nifti object and save
633
+ orig_mask = nib.load(f"{orig_glmsingle_path}/{sub}_{session}{task_name}_brain.nii.gz")
634
+
635
+ # apply mask and save original betas
636
+ print("original:", vox.shape)
637
+ vox_nii = unmask(vox, orig_mask)
638
+ glm_save_path = f"{glmsingle_path}/vox.nii.gz"
639
+ nib.save(vox_nii, glm_save_path)
640
+ print(f"saved original glmsingle betas to {glm_save_path}")
641
+
642
+ # resample and save betas
643
+ applyxfm(glm_save_path, ref_name, omat, resample_method, output=glm_save_path_resampled)
644
+ vox = nib.load(glm_save_path_resampled)
645
+ print("vox after resampling", vox.shape)
646
+
647
+ return vox
648
+
649
+
650
+ def load_preprocess_betas(glmsingle_path, session, ses_list,
651
+ remove_close_to_MST, image_names,
652
+ remove_random_n, vox_idx):
653
+ glmsingle = np.load(f"{glmsingle_path}/TYPED_FITHRF_GLMDENOISE_RR.npz", allow_pickle=True)
654
+ vox = glmsingle['betasmd'].T
655
+
656
+ print("vox", vox.shape)
657
+
658
+ # Preprocess betas
659
+ if vox.ndim == 4:
660
+ vox = vox[:, 0, 0]
661
+ print("vox", vox.shape)
662
+
663
+ if remove_close_to_MST:
664
+ x = [x for x in image_names if x != 'blank.jpg' and str(x) != 'nan']
665
+ close_to_MST_idx = [y for y, z in enumerate(x) if 'closest_pairs' in z]
666
+ close_to_MST_mask = np.ones(len(vox), dtype=bool)
667
+ close_to_MST_mask[close_to_MST_idx] = False
668
+ vox = vox[close_to_MST_mask]
669
+ print("vox after removing close_to_MST", vox.shape)
670
+
671
+ elif remove_random_n:
672
+ random_n_mask = np.ones(len(vox), dtype=bool)
673
+ random_n_mask[vox_idx] = False
674
+ vox = vox[random_n_mask]
675
+ print(f"vox after removing {n_to_remove}", vox.shape)
676
+
677
+ return vox
678
+
679
+
680
+ def prepare_model_and_training(
681
+ num_voxels_list,
682
+ n_blocks,
683
+ hidden_dim,
684
+ clip_emb_dim,
685
+ clip_seq_dim,
686
+ clip_scale,
687
+ use_prior=False,
688
+ ):
689
+ """
690
+ Prepare MindEye model, optimizer, and learning rate scheduler.
691
+
692
+ Args:
693
+ num_voxels_list (list): List of number of voxels for each subject
694
+ hidden_dim (int): Hidden dimension for model layers
695
+ clip_emb_dim (int): CLIP embedding dimension
696
+ clip_seq_dim (int): CLIP sequence dimension
697
+ use_prior (bool): Whether to include diffusion prior network
698
+
699
+ Returns:
700
+ model
701
+ """
702
+ import torch
703
+ import torch.nn as nn
704
+ import numpy as np
705
+ from models import VersatileDiffusionPriorNetwork, BrainDiffusionPrior
706
+ from MindEye2 import MindEyeModule, RidgeRegression, BrainNetwork
707
+ import utils
708
+
709
+ model = MindEyeModule()
710
+ print(model)
711
+
712
+ model.ridge = RidgeRegression(num_voxels_list, out_features=hidden_dim)
713
+ utils.count_params(model.ridge)
714
+ utils.count_params(model)
715
+
716
+ model.backbone = BrainNetwork(h=hidden_dim, in_dim=hidden_dim, out_dim=clip_emb_dim*clip_seq_dim, seq_len=1, n_blocks=n_blocks,
717
+ clip_size=clip_emb_dim)
718
+ utils.count_params(model.backbone)
719
+ utils.count_params(model)
720
+
721
+ if use_prior:
722
+ # setup diffusion prior network
723
+ out_dim = clip_emb_dim
724
+ depth = 6
725
+ dim_head = 52
726
+ heads = clip_emb_dim//52 # heads * dim_head = clip_emb_dim
727
+ timesteps = 100
728
+ prior_network = VersatileDiffusionPriorNetwork(
729
+ dim=out_dim,
730
+ depth=depth,
731
+ dim_head=dim_head,
732
+ heads=heads,
733
+ causal=False,
734
+ num_tokens = clip_seq_dim,
735
+ learned_query_mode="pos_emb"
736
+ )
737
+ model.diffusion_prior = BrainDiffusionPrior(
738
+ net=prior_network,
739
+ image_embed_dim=out_dim,
740
+ condition_on_text_encodings=False,
741
+ timesteps=timesteps,
742
+ cond_drop_prob=0.2,
743
+ image_embed_scale=None,
744
+ )
745
+
746
+ utils.count_params(model.diffusion_prior)
747
+ utils.count_params(model)
748
+
749
+ return model
750
+
751
+
752
+ def get_slurm_seed(default=0):
753
+ """Returns SLURM array seed or a default seed if not running in SLURM."""
754
+ try:
755
+ seed = int(os.environ["SLURM_ARRAY_TASK_ID"])
756
+ print(f"Using SLURM job array seed: {seed}")
757
+ except KeyError:
758
+ print(f"SLURM seed not found, using default: {default}")
759
+ seed = default
760
+ return seed
761
+
762
+
763
+ def get_slurm_job():
764
+ """Returns ID of current SLURM job"""
765
+ return int(os.environ["SLURM_ARRAY_JOB_ID"])
766
+
767
+
768
+ def filter_and_average_mst(vox, vox_image_dict):
769
+ """
770
+ Filters and averages repeated MST images while retaining unique images.
771
+
772
+ Args:
773
+ vox (np.ndarray): Original array of shape (num_images, num_features).
774
+ vox_image_dict (dict): Maps image indices to file paths.
775
+ Returns:
776
+ tuple: Filtered array and corresponding kept indices.
777
+ """
778
+ from copy import deepcopy
779
+
780
+ # Identify repeated MST paths
781
+ repeats = {}
782
+ for idx, path in vox_image_dict.items():
783
+ if "MST_pairs" in path:
784
+ repeats.setdefault(path, []).append(idx)
785
+
786
+ # Create mask to track kept entries
787
+ keep_mask = np.ones(vox.shape[0], dtype=bool)
788
+ output_vox = deepcopy(vox).astype(np.float32)
789
+
790
+ # Average repeated MST images
791
+ for indices in repeats.values():
792
+ if len(indices) > 1:
793
+ avg_values = np.mean(vox[indices], axis=0)
794
+ output_vox[indices[0]] = avg_values
795
+ keep_mask[indices[1:]] = False
796
+
797
+ return output_vox[keep_mask], np.where(keep_mask)[0]
798
+
799
+
800
+ def verify_image_patterns(image_to_indices):
801
+ failures = []
802
+ for image_name, sessions in image_to_indices.items():
803
+ session1, session2 = sessions
804
+ total_count = len(session1) + len(session2)
805
+
806
+ if "special515" in image_name:
807
+ if not (
808
+ (len(session1) == 3 and len(session2) == 0) or
809
+ (len(session1) == 0 and len(session2) == 3) or
810
+ (len(session1) == 1 and len(session2) == 0) or
811
+ (len(session1) == 0 and len(session2) == 1)
812
+ ):
813
+ failures.append(f"{image_name} does not appear 3x in only 1 session.")
814
+ elif "MST_pairs" in image_name:
815
+ if not (len(session1) == 2 and len(session2) == 2):
816
+ failures.append(f"{image_name} does not appear 2x in both sessions.")
817
+ else:
818
+ if not (
819
+ (total_count == 1) and
820
+ (len(session1) == 1 and len(session2) == 0 or len(session1) == 0 and len(session2) == 1)
821
+ ):
822
+ failures.append(f"{image_name} does not appear 1x in only 1 session.")
823
+
824
+ return failures
825
+
826
+ def compute_avg_repeat_corrs(vox_repeats: np.ndarray) -> np.ndarray:
827
+ """
828
+ Given an array of shape (n_repeats, n_voxels), compute the average correlation
829
+ across all unique repeat combinations for each voxel.
830
+ Returns:
831
+ rels: (n_voxels,) array of averaged correlations
832
+ """
833
+ import itertools
834
+ n_repeats, n_vox = vox_repeats.shape
835
+ combos = list(itertools.combinations(range(n_repeats), 2))
836
+
837
+ rels = np.full(n_vox, np.nan)
838
+
839
+ # For each voxel
840
+ for v in range(n_vox):
841
+ corrs = []
842
+ # Calculate correlation for each pair of repeats
843
+ for i, j in combos:
844
+ r = np.corrcoef(vox_repeats[i, v], vox_repeats[j, v])[0, 1]
845
+ corrs.append(r)
846
+ # Average across all pairwise correlations
847
+ rels[v] = np.mean(corrs)
848
+
849
+ return rels
850
+
851
+
852
+ def get_pairs(data, repeat_indices=(0, 1)):
853
+ """
854
+ Extract pairs based on specified repeat indices, falling back to available repeats.
855
+
856
+ Parameters:
857
+ - data: List of items, where each item may have different number of repeats
858
+ - repeat_indices: Tuple of indices (i, j) to extract if available
859
+
860
+ Returns:
861
+ - Array of pairs
862
+ """
863
+ result = []
864
+
865
+ for item in data:
866
+ # Determine what repeats are actually available
867
+ num_repeats = len(item)
868
+
869
+ # Handle the requested indices
870
+ i, j = repeat_indices
871
+
872
+ # Adjust indices if they're out of bounds
873
+ if i >= num_repeats:
874
+ i = min(num_repeats - 1, 0)
875
+ if j >= num_repeats:
876
+ j = min(num_repeats - 1, 1 if num_repeats > 1 else 0)
877
+
878
+ # Create the pair
879
+ result.append([item[i], item[j]])
880
+
881
+ return np.array(result)
882
+
883
+
884
+ def compute_vox_rels(vox, pairs, sub, session, rdm=False, repeat_indices=(0,1)):
885
+ from tqdm import tqdm
886
+ pairs = get_pairs(pairs, repeat_indices=repeat_indices)
887
+ # print(pairs)
888
+ # _tmp = [(i[0],i[-1]) for i in pairs]
889
+ # breakpoint()
890
+ # vox_pairs = zscore(vox[_tmp]) # zscoring based on first and last repeat only
891
+ # rels = compute_avg_repeat_corrs(vox_pairs)
892
+
893
+ # _tmp = [(i[0],i[1]) for i in pairs]
894
+ # vox_pairs = zscore(vox[_tmp])
895
+
896
+ vox_pairs = zscore(vox[pairs])
897
+ rels = np.full(vox.shape[-1], np.nan)
898
+ for v in tqdm(range(vox.shape[-1])):
899
+ rels[v] = np.corrcoef(vox_pairs[:, 0, v], vox_pairs[:, 1, v])[1, 0]
900
+
901
+ print("rels", rels.shape)
902
+ assert np.sum(np.all(np.isnan(rels))) == 0
903
+
904
+ if rdm: # generate a Representational Dissimilarity Matrix to visualize how similar the voxel patterns are across images
905
+ # average voxel patterns across repeats
906
+ vox0 = np.zeros((len(pairs), vox.shape[-1], 2))
907
+ for ipair, pair in enumerate(tqdm(pairs)):
908
+ i, j = pair[:2] # Using the first two repeats
909
+ vox0[ipair, :, :] = vox[pair].T
910
+ vox_avg = vox0.mean(-1)
911
+
912
+ # plot the RDM at various thresholds
913
+ r_thresholds = np.array([.0, .1, .2, .3])
914
+ rdm = np.zeros((len(r_thresholds), len(pairs), len(pairs)))
915
+
916
+ for ir_thresh, r_thresh in enumerate(r_thresholds):
917
+ print(f"reliability threshold = {r_thresh}")
918
+ for i in tqdm(range(len(pairs))):
919
+ for j in range(len(pairs)):
920
+ rdm[ir_thresh, i, j] = np.corrcoef(vox_avg[i, rels > r_thresh],
921
+ vox_avg[j, rels > r_thresh])[0, 1]
922
+ n_thresh = len(r_thresholds)
923
+ fig, axs = plt.subplots(1, n_thresh, figsize=(4 * n_thresh, 4), squeeze=False)
924
+
925
+ for i, r_thresh in enumerate(r_thresholds):
926
+ ax = axs[0, i]
927
+ im = ax.imshow(rdm[i], clim=(-1, 1))
928
+ ax.set_title(f"r > {r_thresh:.1f}")
929
+ ax.set_xlabel("Image")
930
+ ax.set_ylabel("Image")
931
+ fig.colorbar(im, ax=ax, shrink=0.8)
932
+
933
+ # Optional: add a supertitle with subject/session/repeat info
934
+ fig.suptitle(f"{sub}_{session}\nrepeat combo {r}", fontsize=14)
935
+ plt.tight_layout(rect=[0, 0.03, 1, 0.95]) # Leave space for suptitle
936
+ plt.show()
937
+
938
+ # thresh = .2
939
+ # plt.figure(figsize=(4, 4))
940
+ # plt.imshow(rdm[np.where(r_thresholds == thresh)[0].item()], clim=(-1, 1))
941
+ # plt.colorbar(shrink=0.8)
942
+ # plt.title(f"{sub}_{session}\nreliability threshold={thresh}; repeats {r}")
943
+ # plt.show()
944
+
945
+ for thresh in range(rdm.shape[0]):
946
+ for img in range(rdm.shape[1]):
947
+ assert np.isclose(rdm[thresh, img, img], 1)
948
+
949
+ return rels
950
+
951
+
952
+ def load_masks(img_list):
953
+ from nilearn.masking import intersect_masks
954
+ import nilearn
955
+
956
+ masks = [nilearn.image.load_img(mask) for mask in img_list]
957
+ assert all(np.allclose(masks[0].affine, m.affine) for m in masks)
958
+ return masks, intersect_masks(masks, threshold=0.5, connected=True)
959
+
960
+
961
+ def get_mask(ses_list, sub, func_task_name):
962
+ assert isinstance(ses_list, list), "ses_list is not a list"
963
+ mask_imgs = []
964
+ nsd_imgs = []
965
+ for ses in ses_list:
966
+ prefix = f"/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/glmsingle_{sub}_{ses}_task-{func_task_name}/{sub}_{ses}_task-{func_task_name}"
967
+ mask_path = prefix + "_brain.nii.gz"
968
+ nsd_path = prefix + "_nsdgeneral.nii.gz"
969
+ print(mask_path)
970
+ print(nsd_path)
971
+ assert os.path.exists(mask_path)
972
+ assert os.path.exists(nsd_path)
973
+ mask_imgs.append(mask_path)
974
+ nsd_imgs.append(nsd_path)
975
+
976
+ func_masks, avg_mask = load_masks(mask_imgs)
977
+ print(f'intersected brain masks from {ses_list}')
978
+
979
+ nsd_masks, roi = load_masks(nsd_imgs)
980
+ print(f'intersected nsdgeneral roi masks from {ses_list}')
981
+
982
+ return func_masks, avg_mask, nsd_masks, roi
983
+
984
+
985
+
986
+ def process_images(image_names, unique_images, remove_close_to_MST=False, remove_random_n=False, imgs_to_remove=None, sub=None, session=None):
987
+ image_idx = np.array([])
988
+ vox_image_names = np.array([])
989
+ all_MST_images = {}
990
+
991
+ for i, im in enumerate(image_names):
992
+ if im == "blank.jpg" or str(im) == "nan":
993
+ continue
994
+
995
+ if remove_close_to_MST and "closest_pairs" in im:
996
+ continue
997
+
998
+ if remove_random_n and im in imgs_to_remove:
999
+ continue
1000
+
1001
+ vox_image_names = np.append(vox_image_names, im)
1002
+ image_idx_ = np.where(im == unique_images)[0].item()
1003
+ image_idx = np.append(image_idx, image_idx_)
1004
+
1005
+ if sub == 'ses-01' and session in ('ses-01', 'ses-04'):
1006
+ if ('w_' in im or 'paired_image_' in im or re.match(r'all_stimuli/rtmindeye_stimuli/\d{1,2}_\d{1,3}\.png$', im)
1007
+ or re.match(r'images/\d{1,2}_\d{1,3}\.png$', im)):
1008
+ all_MST_images[i] = im
1009
+ elif 'MST' in im:
1010
+ all_MST_images[i] = im
1011
+
1012
+ image_idx = torch.Tensor(image_idx).long()
1013
+ unique_MST_images = np.unique(list(all_MST_images.values()))
1014
+
1015
+ MST_ID = np.array([], dtype=int)
1016
+ if remove_close_to_MST:
1017
+ close_to_MST_idx = np.array([], dtype=int)
1018
+ if remove_random_n:
1019
+ random_n_idx = np.array([], dtype=int)
1020
+
1021
+ vox_idx = np.array([], dtype=int)
1022
+ j = 0 # Counter for indexing vox based on removed images
1023
+
1024
+ for i, im in enumerate(image_names):
1025
+ if im == "blank.jpg" or str(im) == "nan":
1026
+ continue
1027
+
1028
+ if remove_close_to_MST and "closest_pairs" in im:
1029
+ close_to_MST_idx = np.append(close_to_MST_idx, i)
1030
+ continue
1031
+
1032
+ if remove_random_n and im in imgs_to_remove:
1033
+ vox_idx = np.append(vox_idx, j)
1034
+ j += 1
1035
+ continue
1036
+
1037
+ j += 1
1038
+ curr = np.where(im == unique_MST_images)
1039
+
1040
+ if curr[0].size == 0:
1041
+ MST_ID = np.append(MST_ID, len(unique_MST_images)) # Out of range index for filtering later
1042
+ else:
1043
+ MST_ID = np.append(MST_ID, curr)
1044
+
1045
+ assert len(MST_ID) == len(image_idx)
1046
+
1047
+ pairs = find_paired_indices(image_idx)
1048
+ pairs = sorted(pairs, key=lambda x: x[0])
1049
+
1050
+ return image_idx, vox_image_names, pairs
1051
+
1052
+ def find_all_indices(list_, element):
1053
+ return [index for index, value in enumerate(list_) if value == element]
1054
+
1055
+
1056
+ # ENIGMA borrowed code
1057
+
1058
+ from tqdm import tqdm
1059
+ import open_clip
1060
+
1061
+ class CLIPEncoder:
1062
+ def __init__(
1063
+ self,
1064
+ model_name="ViT-H-14",
1065
+ pretrained="laion2b_s32b_b79k",
1066
+ precision="fp32",
1067
+ batch_size: int = 20,
1068
+ device="cuda",
1069
+ **kwargs,
1070
+ ):
1071
+ self.batch_size = batch_size
1072
+ self.model, self.preprocess, _ = open_clip.create_model_and_transforms(
1073
+ model_name, pretrained, precision, device=device, **kwargs
1074
+ )
1075
+ self.tokenizer = open_clip.get_tokenizer(model_name)
1076
+ self.device = device
1077
+
1078
+ def encode_text(self, text, normalize=False):
1079
+ features = []
1080
+ for i in tqdm(
1081
+ range(0, len(text), self.batch_size), desc="CLIP Encoding text..."
1082
+ ):
1083
+ batch_text = text[i : min(i + self.batch_size, len(text))]
1084
+ inputs = self.tokenizer(batch_text).to(self.device)
1085
+ with torch.no_grad():
1086
+ batch_features = self.model.encode_text(inputs)
1087
+ if normalize:
1088
+ batch_features = F.normalize(batch_features, dim=-1)
1089
+ features.append(batch_features)
1090
+ features = torch.cat(features, dim=0)
1091
+ return features.detach().cpu()
1092
+
1093
+ def encode_image(self, image, verbose = False):
1094
+ if isinstance(image, Image.Image):
1095
+ image = [image]
1096
+ elif isinstance(image, torch.Tensor):
1097
+ if image.ndim == 3:
1098
+ image = [image]
1099
+ elif image.ndim != 4:
1100
+ raise ValueError("Invalid tensor shape for image encoding.")
1101
+
1102
+ elif isinstance(image, list) and all(
1103
+ isinstance(img, Image.Image) for img in image
1104
+ ):
1105
+ image = [self.preprocess(img.convert("RGB")) for img in image]
1106
+ elif isinstance(image, list) and all(
1107
+ isinstance(img, torch.Tensor) for img in image
1108
+ ):
1109
+ image = [
1110
+ img.unsqueeze(0) if img.ndim == 3 else img for img in image
1111
+ ]
1112
+ elif isinstance(image, list) and all(
1113
+ isinstance(img, str) for img in image
1114
+ ):
1115
+ print("Preprocessing images...")
1116
+ preprocessed_image = []
1117
+ for i in tqdm(
1118
+ range(0, len(image)),
1119
+ desc="CLIP Preprocessing images...",
1120
+ ):
1121
+ preprocessed_image.append(
1122
+ self.preprocess(Image.open(image[i]).convert("RGB"))
1123
+ )
1124
+ image = preprocessed_image
1125
+ else:
1126
+ raise ValueError("Unsupported image type for encoding.")
1127
+
1128
+ features = []
1129
+ if verbose:
1130
+ iterator = tqdm(
1131
+ range(0, len(image), self.batch_size),
1132
+ desc="CLIP Encoding images...",
1133
+ )
1134
+ else:
1135
+ iterator = range(0, len(image), self.batch_size)
1136
+
1137
+ for i in iterator:
1138
+ batch_images = image[i : min(i + self.batch_size, len(image))]
1139
+ if isinstance(batch_images, list):
1140
+ batch_images = torch.stack(batch_images)
1141
+ with torch.no_grad():
1142
+ batch_features = self.model.encode_image(
1143
+ batch_images.to(self.device)
1144
+ )
1145
+ features.append(batch_features)
1146
+
1147
+ features = torch.cat(features, dim=0)
1148
+ return features.detach()
1149
+
1150
+ def __call__(self, image):
1151
+ return self.encode_image(image).unsqueeze(1) # patch to make it compatible with old ME2 embedder