#!/usr/bin/env python # coding: utf-8 # # Import packages & functions # In[1]: print("importing modules") import os import sys import json import argparse import numpy as np import time import random import string import h5py from tqdm import tqdm import webdataset as wds from PIL import Image import pandas as pd import nibabel as nib import nilearn import matplotlib.pyplot as plt import torch import torch.nn as nn from torchvision import transforms # tf32 data type is faster than standard float32 torch.backends.cuda.matmul.allow_tf32 = True import utils from utils import load_preprocess_betas, resample, applyxfm, apply_thresh, resample_betas # imports utils from mindeye_preproc as "preproc" import importlib.util parent_utils_path = "/home/ri4541/mindeye_preproc/analysis/utils.py" spec = importlib.util.spec_from_file_location("utils", parent_utils_path) preproc = importlib.util.module_from_spec(spec) parent_dir = os.path.dirname(parent_utils_path) if parent_dir not in sys.path: sys.path.append(parent_dir) spec.loader.exec_module(preproc) if utils.is_interactive(): from IPython.display import clear_output # function to clear print outputs in cell get_ipython().run_line_magic('load_ext', 'autoreload') # this allows you to change functions in models.py or utils.py and have this notebook automatically update with your revisions get_ipython().run_line_magic('autoreload', '2') seed = utils.get_slurm_seed() # # Princeton data prep # ## Load Data & Design # In[2]: if utils.is_interactive(): sub = "sub-005" session = "all" task = 'C' # 'study' or 'A'; used to search for functional run in bids format func_task_name = 'C' else: sub = os.environ["sub"] session = os.environ["session"] task = os.environ["task"] func_task_name = 'C' if session == "all": ses_list = ["ses-01", "ses-02", "ses-03"] # list of actual session IDs design_ses_list = ["ses-01", "ses-02", "ses-03"] # list of session IDs to search for design matrix else: ses_list = [session] design_ses_list = [session] task_name = f"_task-{task}" if task != 'study' else '' resample_voxel_size = False resample_post_glmsingle = False # do you want to do voxel resampling here? if resample_voxel_size = True and resample_post_glmsingle = False, assume the resampling has been done prior to GLMsingle, so just use resampled directory but otherwise proceed as normal load_from_resampled_file = False # do you want to load resampled data from file? if True, assume resampling was done in this notebook before, and that we're not using the GLMsingle resampled data train_test_split = 'MST' # 'MST', 'orig', 'unique' remove_close_to_MST = False remove_random_n = False if remove_close_to_MST or remove_random_n: assert remove_close_to_MST != remove_random_n # don't remove both sets of images n_to_remove = 0 if remove_random_n: assert train_test_split == 'MST' # MST images are excluded from the n images removed, so only makes sense if they're not in the training set n_to_remove = 150 if resample_voxel_size: # voxel size was unchanged in glmsingle, want to perform resampling here resampled_vox_size = 2.5 resample_method = "sinc" # {trilinear,nearestneighbour,sinc,spline}, credit: https://johnmuschelli.com/fslr/reference/flirt.help.html # file name helper variables vox_dim_str = str(resampled_vox_size).replace('.', '_') # in case the voxel size has a decimal, replace with an underscore resampled_suffix = f"resampled_{vox_dim_str}mm_{resample_method}" mask_resampled_suffix = resampled_suffix if resample_post_glmsingle: resampled_suffix += '_postglmsingle' else: resampled_suffix += '_preglmsingle' # In[3]: session_label = preproc.get_session_label(ses_list) print('session label:', session_label) n_runs, _ = preproc.get_runs_per_session(sub, session, ses_list) # In[4]: if utils.is_interactive(): glmsingle_path = f"/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/glmsingle_{sub}_{session_label}_task-{task}" else: glmsingle_path = os.environ["glmsingle_path"] designdir = "/home/ri4541/real_time_mindEye2" print(glmsingle_path) if resample_voxel_size: # option 1: we are using original (non-resampled) GLMsingle outputs and doing the resampling here # option 2: doing resampling pre-GLMsingle and using those outputs; no resampling involved here if resample_post_glmsingle: # option 1 orig_glmsingle_path = glmsingle_path glmsingle_path += f"_{resampled_suffix}" print("resampled glmsingle path:", glmsingle_path) if load_from_resampled_file: # resampling is already done; load from file assert os.path.exists(glmsingle_path) # the new directory must have been created if we reached here else: # don't load from file; do resampling here os.makedirs(glmsingle_path,exist_ok=True) else: # option 2 glmsingle_path += f"_{resampled_suffix}" print("glmsingle path:", glmsingle_path) assert os.path.exists(glmsingle_path) print("glmsingle path exists!") # In[5]: data, starts, images, is_new_run, image_names, unique_images, len_unique_images = preproc.load_design_files( sub=sub, session=session, func_task_name=task, designdir=designdir, design_ses_list=design_ses_list ) if sub == 'sub-001': if session == 'ses-01': assert image_names[0] == 'images/image_686_seed_1.png' elif session in ('ses-02', 'all'): assert image_names[0] == 'all_stimuli/special515/special_40840.jpg' elif session == 'ses-03': assert image_names[0] == 'all_stimuli/special515/special_69839.jpg' elif session == 'ses-04': assert image_names[0] == 'all_stimuli/rtmindeye_stimuli/image_686_seed_1.png' elif sub == 'sub-003': assert image_names[0] == 'all_stimuli/rtmindeye_stimuli/image_686_seed_1.png' unique_images = np.unique(image_names.astype(str)) unique_images = unique_images[(unique_images!="nan")] len_unique_images = len(unique_images) print("n_runs",n_runs) if (sub == 'sub-001' and session == 'ses-04') or (sub == 'sub-003' and session == 'ses-01'): assert len(unique_images) == 851 print(image_names[:4]) print(starts[:4]) print(is_new_run[:4]) if remove_random_n: # want to remove 150 imgs # 100 special515 imgs are repeated 3x (300 total) # all other train imgs are only shown once (558 total) # of the 150, want to sample proportionally since we're cutting all repeats for special515 # so take out 51 (17 unique) from special515 and 99 from rest = removing 150 total np.random.seed(seed) options_to_remove = [x for x in set(image_names) if str(x) != 'nan' and x != 'blank.jpg' and 'MST_pairs' not in x and 'special515' not in x and list(image_names).count(x)==1] # all the imgs that only appear once (this is O(N^2) b/c of count() within list comprehension but image_names is a relatively small list) options_to_remove_special515 = [x for x in set(image_names) if str(x) != 'nan' and x != 'blank.jpg' and 'MST_pairs' not in x and 'special515' in x and list(image_names).count(x)>1] # all the special515 images that are repeated (count()>1 necessary because there are special515 that are not repeated) imgs_to_remove = np.random.choice(options_to_remove, size=99, replace=False) imgs_to_remove = np.append(imgs_to_remove, np.random.choice(options_to_remove_special515, size=17, replace=False)) image_idx = np.array([]) # contains the unique index of each presented image vox_image_names = np.array([]) # contains the names of the images corresponding to image_idx all_MST_images = dict() for i, im in enumerate(image_names): # skip if blank, nan if im == "blank.jpg": i+=1 continue if str(im) == "nan": i+=1 continue vox_image_names = np.append(vox_image_names, im) if remove_close_to_MST: # optionally skip close_to_MST images if "closest_pairs" in im: i+=1 continue elif remove_random_n: if im in imgs_to_remove: i+=1 continue image_idx_ = np.where(im==unique_images)[0].item() image_idx = np.append(image_idx, image_idx_) if (sub == 'sub-001' and session == 'ses-04') or (sub == 'sub-003' and session == 'ses-01'): # MST images are ones that matched these image titles import re if ('w_' in im or 'paired_image_' in im or re.match(r'all_stimuli/rtmindeye_stimuli/\d{1,2}_\d{1,3}\.png$', im) or re.match(r'images/\d{1,2}_\d{1,3}\.png$', im)): # the regexp here looks for **_***.png, allows 1-2 chars before underscore and 1-3 chars after it # print(im) all_MST_images[i] = im i+=1 elif 'MST' in im: all_MST_images[i] = im i+=1 image_idx = torch.Tensor(image_idx).long() # for im in new_image_names[MST_images]: # assert 'MST_pairs' in im # assert len(all_MST_images) == 300 unique_MST_images = np.unique(list(all_MST_images.values())) MST_ID = np.array([], dtype=int) if remove_close_to_MST: close_to_MST_idx = np.array([], dtype=int) if remove_random_n: random_n_idx = np.array([], dtype=int) vox_idx = np.array([], dtype=int) j=0 # this is a counter keeping track of the remove_random_n used later to index vox based on the removed images; unused otherwise for i, im in enumerate(image_names): # need unique_MST_images to be defined, so repeating the same loop structure # skip if blank, nan if im == "blank.jpg": i+=1 continue if str(im) == "nan": i+=1 continue if remove_close_to_MST: # optionally skip close_to_MST images if "closest_pairs" in im: close_to_MST_idx = np.append(close_to_MST_idx, i) i+=1 continue if remove_random_n: if im in imgs_to_remove: vox_idx = np.append(vox_idx, j) i+=1 j+=1 continue j+=1 curr = np.where(im == unique_MST_images) # print(curr) if curr[0].size == 0: MST_ID = np.append(MST_ID, np.array(len(unique_MST_images))) # add a value that should be out of range based on the for loop, will index it out later else: MST_ID = np.append(MST_ID, curr) assert len(MST_ID) == len(image_idx) # assert len(np.argwhere(pd.isna(data['current_image']))) + len(np.argwhere(data['current_image'] == 'blank.jpg')) + len(image_idx) == len(data) # MST_ID = torch.tensor(MST_ID[MST_ID != len(unique_MST_images)], dtype=torch.uint8) # torch.tensor (lowercase) allows dtype kwarg, Tensor (uppercase) is an alias for torch.FloatTensor print(MST_ID.shape) if (sub == 'sub-001' and session == 'ses-04') or (sub == 'sub-003' and session == 'ses-01'): assert len(all_MST_images) == 100 # ## Load images # In[ ]: import imageio.v2 as imageio resize_transform = transforms.Resize((224, 224)) MST_images = [] images = None for im_name in tqdm(image_idx): if sub == 'sub-001' and session == 'ses-01': image_file = f"all_stimuli/rtmindeye_stimuli/{unique_images[im_name]}" else: image_file = f"{unique_images[im_name]}" im = imageio.imread(image_file) im = torch.Tensor(im / 255).permute(2,0,1) im = resize_transform(im.unsqueeze(0)) if images is None: images = im else: images = torch.vstack((images, im)) if (sub == 'sub-001' and session == 'ses-04') or (sub == 'sub-003' and session == 'ses-01'): if ('w_' in image_file or 'paired_image_' in image_file or re.match(r'all_stimuli/rtmindeye_stimuli/\d{1,2}_\d{1,3}\.png$', image_file) or re.match(r'all_stimuli/rtmindeye_stimuli/images/\d{1,2}_\d{1,3}\.png$', image_file)): MST_images.append(True) else: MST_images.append(False) else: if ("MST_pairs" in image_file): # ("_seed_" not in unique_images[im_name]) and (unique_images[im_name] != "blank.jpg") MST_images.append(True) else: MST_images.append(False) print("images", images.shape) MST_images = np.array(MST_images) print("MST_images", len(MST_images)) if (sub == 'sub-001' and session == 'ses-04') or (sub == 'sub-003' and session == 'ses-01'): assert len(MST_images[MST_images==True]) == 100 print("MST_images==True", len(MST_images[MST_images==True])) # In[ ]: # want IDs of pairmates based on MST_images # create "MST_pairmates" which is a 25x2 array with indices of the 25 pairs based on MST_images == True assert unique_MST_images.shape[0] % 2 == 0 # make sure it's divisible by 2 MST_pairmate_names = unique_MST_images.reshape(int(unique_MST_images.shape[0]/2),2) # print(MST_pairmate_names) MST_pairmate_indices = np.empty(shape=MST_pairmate_names.shape, dtype=int) for p, pair in enumerate(MST_pairmate_names): for i, im in enumerate(pair): MST_pairmate_indices[p][i] = np.where(np.isin(list(all_MST_images.values()), im))[0][0] # just take the first repeated instance of an image print(MST_pairmate_indices.shape, MST_pairmate_indices) # In[ ]: if (sub == 'sub-001' and session in ('ses-02', 'ses-03', 'all')): # MST_pairs contains the indices of repeats based on all_MST_images # all_MST_images contains the indices of images from image_names MST_pairs = utils.find_paired_indices(torch.tensor(MST_ID)) MST_pairs = np.array(sorted(MST_pairs[:-1], key=lambda x: x[0])) # we added a fake value as a placeholder so index out the last group of pairs # assert images[MST_pairs] fig, ax = plt.subplots(1, 3, figsize=(10,4)) fig.suptitle('Sample MST pairs') ax[0].imshow(images[MST_pairs[-1][0]].permute(1,2,0).numpy()) ax[0].set_title(f"Trial 0") ax[1].imshow(images[MST_pairs[-1][1]].permute(1,2,0).numpy()) ax[1].set_title(f"Trial 1") ax[2].imshow(images[MST_pairs[-1][2]].permute(1,2,0).numpy()) ax[2].set_title(f"Trial 2") plt.setp(ax, xticks=[], yticks=[]) plt.tight_layout() plt.show() # In[ ]: # pairs has the indices of all repeated images pairs = utils.find_paired_indices(image_idx) pairs = sorted(pairs, key=lambda x: x[0]) fig, axes = plt.subplots(1, 3, figsize=(6, 2)) # 1 row, 3 columns for i, ax in enumerate(axes): ax.imshow(images[i].permute(1, 2, 0).numpy()) ax.set_title(f"Trial {i}") ax.axis("off") # Hide axes for better visualization plt.tight_layout() # output_path = os.path.join(output_dir, "trials_plot.png") # plt.savefig(output_path, dpi=300) # Save figure plt.show() # In[ ]: p=0 # plot 2 repeats (anything in pairs should have 2 repeats, even if there's more) fig, ax = plt.subplots(1, 2, figsize=(10,8)) ax[0].imshow(images[pairs[p][0]].permute(1,2,0).numpy()) ax[0].set_title(f"Repeat 1") ax[1].imshow(images[pairs[p][1]].permute(1,2,0).numpy()) ax[1].set_title(f"Repeat 2") plt.setp(ax, xticks=[], yticks=[]) plt.tight_layout() plt.show() # In[ ]: def get_image_pairs(sub, session, func_task_name, designdir): """Loads design files and processes image pairs for a given session.""" _, _, _, _, image_names, unique_images, _ = preproc.load_design_files( sub=sub, session=session, func_task_name=func_task_name, designdir=designdir, design_ses_list=[session] # Ensure it's a list ) return utils.process_images(image_names, unique_images) # In[ ]: from collections import defaultdict all_dicts = [] for s_idx, s in enumerate(ses_list): im, vo, _ = get_image_pairs(sub, s, func_task_name, designdir) assert len(im) == len(vo) all_dicts.append({k:v for k,v in enumerate(vo)}) # for the train set (ses-01-02 non-MST) image_to_indices = defaultdict(lambda: [[] for _ in range(len(ses_list))]) for ses_idx, idx_to_name in enumerate(all_dicts): for idx, name in idx_to_name.items(): image_to_indices[name][ses_idx].append(idx) image_to_indices = dict(image_to_indices) # for the test set (ses-03) # test_image_to_indices = defaultdict(lambda: [[] for _ in range(len([ses_list[-1]]))]) # for ses_idx, idx_to_name in enumerate([all_dicts[-1]]): # for idx, name in idx_to_name.items(): # test_image_to_indices[name][ses_idx].append(idx) # test_image_to_indices = dict(test_image_to_indices) if sub == 'sub-005' and len(ses_list) > 1: session_length = 693 for image, session_indices_list in image_to_indices.items(): new_indices_list = [] for idx, indices in enumerate(session_indices_list): offset = idx * session_length new_indices = [i + offset for i in indices] new_indices_list.append(new_indices) image_to_indices[image] = new_indices_list import itertools assert max(itertools.chain.from_iterable(list(image_to_indices.values())))[0] == (len(ses_list)*session_length) - 1 # In[ ]: if resample_voxel_size: from nilearn.masking import apply_mask, unmask ref_name = f'{glmsingle_path}/boldref_resampled.nii.gz' omat_name = f'{glmsingle_path}/boldref_omat' # In[ ]: from nilearn.plotting import plot_roi print('loading brain mask') avg_mask = nib.load('/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/glmsingle_sub-005_task-C/sub-005_final_brain.nii.gz') final_mask = nib.load('/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/glmsingle_sub-005_task-C/sub-005_final_mask.nii.gz') # mask info dimsize=avg_mask.header.get_zooms() affine_mat = avg_mask.affine brain=avg_mask.get_fdata() xyz=brain.shape #xyz dimensionality of brain mask and epi data print('Mask dimensions:', dimsize) print('') print('Affine:') print(affine_mat) print('') print(f'There are {int(np.sum(brain))} voxels in the included brain mask\n') plot_roi(final_mask, bg_img=avg_mask) plt.show() # In[ ]: # # create union of ses-01 and ses-02 reliability masks and plot against avg_mask # rel_masks = [] # rel_masks.append(np.load('/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/glmsingle_sub-005_task-C/rel_mask_from_ses-01_to_ses-03.npy')) # rel_masks.append(np.load('/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/glmsingle_sub-005_task-C/rel_mask_from_ses-02_to_ses-03.npy')) # rel_masks = np.array(rel_masks) # for r in rel_masks: # assert r.shape[0] == int(final_mask.get_fdata().sum()) # assert r.dtype == bool # assert len(rel_masks) == 2 # should be the case if there's 2 training sessions # union_mask = np.logical_or(rel_masks[0], rel_masks[1]) # assert union_mask.sum() > rel_masks[0].sum() # assert union_mask.sum() > rel_masks[1].sum() # print(f'there are {union_mask.sum()} reliable voxels based on the union mask out of {int(final_mask.get_fdata().sum())} voxels in the nsdgeneral roi') # print(f'{(union_mask.sum() / int(final_mask.get_fdata().sum())):.2%} of the voxels in the roi were selected') # path = f'/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/glmsingle_sub-005_task-C/union_mask_from_{session_label}.npy' path = f'/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/glmsingle_sub-005_task-C/union_mask_from_ses-01-02.npy' # np.save(f'/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/glmsingle_sub-005_task-C/union_mask_from_{session_label}.npy', union_mask) # print(f'saved union mask to {path}!') union_mask = np.load(path) # ## Load GLMSingle voxel data # In[ ]: ses_mask = [] for s in ses_list: ses_mask_path = f'/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/glmsingle_sub-005_{s}_task-C/sub-005_{s}_task-C_brain.nii.gz' ses_mask.append(nib.load(ses_mask_path)) assert np.all(ses_mask[-1].affine == final_mask.affine) assert np.all(ses_mask[-1].shape == final_mask.shape) # In[ ]: ses_vox = [] vox = None needs_postprocessing = False params = (session, ses_list, remove_close_to_MST, image_names, remove_random_n, vox_idx) if resample_post_glmsingle == True: glm_save_path_resampled = f"{glmsingle_path}/vox_resampled.nii.gz" if load_from_resampled_file == True: # resampling was done in this notebook so we can load from file vox = nib.load(glm_save_path_resampled) else: # do resampling here assert os.path.exists(ref_name) and os.path.exists(omat_name), "need to generate the boldref and omat separately since we don't have access to the functional data here; either do so using flirt on the command line or copy over the glmsingle resampled outputs" vox = load_preprocess_betas(orig_glmsingle_path, *params) vox = resample_betas(orig_glmsingle_path, sub, session, task_name, vox, glmsingle_path, glm_save_path_resampled, ref_name, omat_name) needs_postprocessing = True if vox is None: for i, s in enumerate(ses_list): # either resampling was done in glmsingle or we aren't resampling ses_vox_path = f'/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/glmsingle_sub-005_{s}_task-C' assert os.path.exists(ses_vox_path) ses_vox.append(load_preprocess_betas(ses_vox_path, *params)) v = nilearn.masking.unmask(ses_vox[i], ses_mask[i]) ses_vox[i] = nilearn.masking.apply_mask(v, final_mask) vox = np.concatenate(ses_vox) print("applied final brain mask") print(vox.shape) vox = vox[:, union_mask] print("applied union roi mask") print(vox.shape) if needs_postprocessing == True: vox = apply_mask(vox, avg_mask) vox = vox.reshape(-1, vox.shape[-1]) # flatten the 3D image into np array with shape (voxels, images) print(vox.shape) assert len(vox) == len(image_idx) # In[ ]: # # get vox into the same shape as the union mask # v = nilearn.masking.unmask(vox, ses_mask) # move back to 3D based on own session mask # final_mask = nilearn.masking.intersect_masks([avg_mask, roi]) # vox = nilearn.masking.apply_mask(vox, final_mask) # re-flatten based on final mask so everything is in the same shape now # print(vox.shape) # In[ ]: pairs_homog = np.array([[p[0], p[1]] for p in pairs]) # In[ ]: same_corrs = [] diff_corrs = [] for isamp, samp in enumerate(vox[pairs_homog]): avg_same_img = [] for i in range(samp.shape[0]): for j in range(i, samp.shape[0]): if i != j: avg_same_img.append(np.array([np.corrcoef(samp[i, :], samp[j, :])[0,1]])) same_corrs.append(np.mean(avg_same_img)) avg_diff_img = [] for isamp_j, samp_j in enumerate(vox[pairs_homog]): if isamp_j != isamp: for i in range(samp_j.shape[0]): for j in range(i, samp_j.shape[0]): if i != j: avg_diff_img.append(np.array([np.corrcoef(samp[i, :], samp_j[j, :])[0,1]])) # print(len(avg_diff_img)) diff_corrs.append(np.mean(avg_diff_img)) print(len(same_corrs), len(diff_corrs)) same_corrs = np.array(same_corrs) diff_corrs = np.array(diff_corrs) plt.figure(figsize=(5,4)) plt.title(f"{sub}_{session} same/diff Pearson corr.") plt.plot(np.sort(same_corrs),c='blue',label='same') plt.plot(np.sort(diff_corrs),c='cyan',label='diff') plt.axhline(0,c='k',ls='--') plt.legend() plt.xlabel("sample") plt.ylabel("Pearson R") plt.show() # In[ ]: vox_pairs = utils.zscore(vox[pairs_homog]) plt.figure(figsize=(5,4)) plt.title(f"{sub}_{session} same minus diff difference Pearson corr.") plt.plot(np.sort(same_corrs) - np.sort(diff_corrs),c='cyan',label='difference') plt.axhline(0,c='k',ls='--') plt.legend() plt.xlabel("sample") plt.ylabel("Pearson R") plt.show() # # Training MindEye # In[ ]: utils.seed_everything(seed) if train_test_split == 'orig': # train = all images except images that were repeated # test = average of the same-image presentations imageTrain = np.arange(len(images)) train_image_indices = np.array([item for item in imageTrain if item not in pairs.flatten()]) test_image_indices = pairs print(len(train_image_indices), len(test_image_indices)) assert len(train_image_indices) + len(test_image_indices) == len(image_idx) elif train_test_split == 'MST': # non-MST images are the train split # MST images are the test split MST_idx = np.array([v for k,v in image_to_indices.items() if 'MST_pairs' in k]) non_MST_idx = [v for k,v in image_to_indices.items() if 'MST_pairs' not in k] non_MST_idx = np.array([z for y in non_MST_idx for x in y for z in x]) # flatten the indices train_image_indices = non_MST_idx test_image_indices = MST_idx.flatten() # MST_idx contains the mapping for the different test sets; test_image_indices has all MST indices combined print(len(train_image_indices), len(test_image_indices)) assert len(train_image_indices) + len(test_image_indices) == len(vox) elif train_test_split == 'unique': imageTest = np.arange(len(images)) train_image_indices = pairs.flatten() test_image_indices = np.array([item for item in imageTest if item not in pairs.flatten()]) print(len(train_image_indices), len(test_image_indices)) assert len(train_image_indices) + len(test_image_indices) == len(image_idx) else: raise Exception("invalid train_test_split") # TODO add assertion that verifies file names in train and test don't overlap, guards against repeats for i in train_image_indices: assert i not in test_image_indices # In[ ]: ses_split = vox[train_image_indices].shape[0] // 2 train_mean_s1 = np.mean(vox[train_image_indices][:ses_split], axis=0) train_std_s1 = np.std(vox[train_image_indices][:ses_split], axis=0) train_mean_s2 = np.mean(vox[train_image_indices][ses_split:], axis=0) train_std_s2 = np.std(vox[train_image_indices][ses_split:], axis=0) vox[:ses_split] = utils.zscore(vox[:ses_split],train_mean=train_mean_s1,train_std=train_std_s1) vox[ses_split:] = utils.zscore(vox[ses_split:],train_mean=train_mean_s2,train_std=train_std_s2) print("voxels have been zscored") print("ses-01:", vox[:ses_split,0].mean(), vox[:ses_split,0].std()) print("ses-02:", vox[ses_split:,0].mean(), vox[ses_split:,0].std()) print("vox", vox.shape) # In[ ]: # save the mean and std from ses-01 and 02 train_test_mean_s1 = np.mean(vox[:ses_split], axis=0) train_test_std_s1 = np.std(vox[:ses_split], axis=0) train_test_mean_s2 = np.mean(vox[ses_split:], axis=0) train_test_std_s2 = np.std(vox[ses_split:], axis=0) print(train_test_mean_s1.shape) assert np.all(train_test_mean_s1.shape == train_test_std_s1.shape) assert np.all(train_test_mean_s1.shape == train_test_mean_s2.shape) assert np.all(train_test_mean_s1.shape == train_test_std_s2.shape) # In[ ]: # for idx in deleted_indices: # # check image names to be deleted match # original_name = vox_image_dict[idx] # matching_indices = [i for i in deleted_indices if vox_image_dict[i] == original_name] # assert all(vox_image_dict[i] == original_name for i in matching_indices), \ # f"Mismatch in image names for deleted indices {matching_indices}" # # check image data to be deleted match # base_image = images[matching_indices[0]] # Reference image # for i in matching_indices[1:]: # assert np.array_equal(base_image, images[i]), \ # f"Mismatch in image data for {vox_image_dict[i]} at index {i}" # images = images[kept_indices] # In[ ]: images = torch.Tensor(images) vox = torch.Tensor(vox) assert len(images) == len(vox) # In[ ]: ### Multi-GPU config ### from accelerate import Accelerator, DeepSpeedPlugin local_rank = os.getenv('RANK') if local_rank is None: local_rank = 0 else: local_rank = int(local_rank) print("LOCAL RANK ", local_rank) data_type = torch.float32 # change depending on your mixed_precision accelerator = Accelerator(split_batches=False) batch_size = 8 # In[ ]: print("PID of this process =",os.getpid()) device = accelerator.device print("device:",device) world_size = accelerator.state.num_processes distributed = not accelerator.state.distributed_type == 'NO' num_devices = torch.cuda.device_count() global_batch_size = batch_size * num_devices print("global_batch_size", global_batch_size) if num_devices==0 or not distributed: num_devices = 1 num_workers = num_devices print(accelerator.state) # set data_type to match your mixed precision (automatically set based on deepspeed config) if accelerator.mixed_precision == "bf16": data_type = torch.bfloat16 elif accelerator.mixed_precision == "fp16": data_type = torch.float16 else: data_type = torch.float32 print("distributed =",distributed, "num_devices =", num_devices, "local rank =", local_rank, "world size =", world_size, "data_type =", data_type) print = accelerator.print # only print if local_rank=0 # ## Configurations # In[ ]: # if running this interactively, can specify jupyter_args here for argparser to use if utils.is_interactive(): model_name = 'testing_MST' # 'sub-001_multi_bs24_MST_rishab_MSTsplit_remove_150_random_seed_0' print("model_name:", model_name) # global_batch_size and batch_size should already be defined in the above cells # other variables can be specified in the following string: # jupyter_args = f"--data_path=/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2 --model_name={model_name}" jupyter_args = f"--data_path=/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2 \ --model_name={model_name} \ --no-multi_subject --subj=1 --batch_size={batch_size} \ --hidden_dim=1024 --clip_scale=1. \ --no-blurry_recon --blur_scale=.5 \ --no-use_prior --prior_scale=30 \ --n_blocks=4 --max_lr=3e-4 --mixup_pct=.33 --num_epochs=30 --no-use_image_aug \ --ckpt_interval=999 --no-ckpt_saving --new_test \ --multisubject_ckpt=/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/train_logs/multisubject_subj01_1024hid_nolow_300ep" print(jupyter_args) jupyter_args = jupyter_args.split() # In[ ]: parser = argparse.ArgumentParser(description="Model Training Configuration") parser.add_argument( "--model_name", type=str, default="testing", help="name of model, used for ckpt saving and wandb logging (if enabled)", ) parser.add_argument( "--data_path", type=str, default="/weka/proj-fmri/shared/natural-scenes-dataset", help="Path to where NSD data is stored / where to download it to", ) parser.add_argument( "--subj",type=int, default=1, choices=[1,2,3,4,5,6,7,8], help="Validate on which subject?", ) parser.add_argument( "--multisubject_ckpt", type=str, default=None, help="Path to pre-trained multisubject model to finetune a single subject from. multisubject must be False.", ) parser.add_argument( "--num_sessions", type=int, default=0, help="Number of training sessions to include (if multi_subject, this variable doesnt matter)", ) parser.add_argument( "--use_prior",action=argparse.BooleanOptionalAction,default=False, help="whether to train diffusion prior (True) or just rely on retrieval part of the pipeline (False)", ) parser.add_argument( "--batch_size", type=int, default=32, help="Batch size can be increased by 10x if only training v2c and not diffusion diffuser", ) parser.add_argument( "--wandb_log",action=argparse.BooleanOptionalAction,default=False, help="whether to log to wandb", ) parser.add_argument( "--resume_from_ckpt",action=argparse.BooleanOptionalAction,default=False, help="if not using wandb and want to resume from a ckpt", ) parser.add_argument( "--wandb_project",type=str,default="stability", help="wandb project name", ) parser.add_argument( "--mixup_pct",type=float,default=.33, help="proportion of way through training when to switch from BiMixCo to SoftCLIP", ) parser.add_argument( "--low_mem",action=argparse.BooleanOptionalAction,default=False, help="whether to preload images to cpu to speed things up but consume more memory", ) parser.add_argument( "--blurry_recon",action=argparse.BooleanOptionalAction,default=True, help="whether to output blurry reconstructions", ) parser.add_argument( "--blur_scale",type=float,default=.5, help="multiply loss from blurry recons by this number", ) parser.add_argument( "--clip_scale",type=float,default=1., help="multiply contrastive loss by this number", ) parser.add_argument( "--prior_scale",type=float,default=30, help="multiply diffusion prior loss by this", ) parser.add_argument( "--use_image_aug",action=argparse.BooleanOptionalAction,default=True, help="whether to use image augmentation", ) parser.add_argument( "--num_epochs",type=int,default=120, help="number of epochs of training", ) parser.add_argument( "--multi_subject",action=argparse.BooleanOptionalAction,default=False, ) parser.add_argument( "--new_test",action=argparse.BooleanOptionalAction,default=True, ) parser.add_argument( "--n_blocks",type=int,default=2, ) parser.add_argument( "--hidden_dim",type=int,default=1024, ) parser.add_argument( "--seq_past",type=int,default=0, ) parser.add_argument( "--seq_future",type=int,default=0, ) parser.add_argument( "--lr_scheduler_type",type=str,default='cycle',choices=['cycle','linear'], ) parser.add_argument( "--ckpt_saving",action=argparse.BooleanOptionalAction,default=True, ) parser.add_argument( "--ckpt_interval",type=int,default=5, help="save backup ckpt and reconstruct every x epochs", ) parser.add_argument( "--seed",type=int,default=42, ) parser.add_argument( "--max_lr",type=float,default=3e-4, ) if utils.is_interactive(): args = parser.parse_args(jupyter_args) else: args = parser.parse_args() # create global variables without the args prefix for attribute_name in vars(args).keys(): globals()[attribute_name] = getattr(args, attribute_name) outdir = os.path.abspath(f'/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/train_logs/{model_name}') if not os.path.exists(outdir) and ckpt_saving: os.makedirs(outdir,exist_ok=True) if use_image_aug or blurry_recon: import kornia import kornia.augmentation as K from kornia.augmentation.container import AugmentationSequential if use_image_aug: img_augment = AugmentationSequential( kornia.augmentation.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.1, p=0.3), same_on_batch=False, data_keys=["input"], ) # Define the blurring augmentations blur_augment = K.RandomGaussianBlur(kernel_size=(21, 21), sigma=(51.0, 51.0), p=1.) if multi_subject: subj_list = np.arange(1,9) subj_list = subj_list[subj_list != subj] else: subj_list = [subj] print("subj_list", subj_list, "num_sessions", num_sessions) # ## Prep data, models, and dataloaders # In[ ]: if ckpt_saving: # save MST_ID for 2-alternative forced-choice retrieval evaluation if 'MST' in model_name: eval_dir = os.environ["eval_dir"] print('saving MST info in', eval_dir) # Saving ## if not os.path.exists(eval_dir): os.mkdir(eval_dir) np.save(f"{eval_dir}/MST_ID.npy", MST_ID) np.save(f"{eval_dir}/MST_pairmate_indices.npy", MST_pairmate_indices) if remove_random_n: np.save(f"{eval_dir}/imgs_to_remove.npy", imgs_to_remove) np.save(f"{eval_dir}/train_image_indices.npy", train_image_indices) np.save(f"{eval_dir}/test_image_indices.npy", test_image_indices) np.save(f"{eval_dir}/images.npy", images) np.save(f"{eval_dir}/vox.npy", vox) np.save(f'{eval_dir}/train_test_mean_s1.npy', train_test_mean_s1) np.save(f'{eval_dir}/train_test_std_s1.npy', train_test_std_s1) np.save(f'{eval_dir}/train_test_mean_s2.npy', train_test_mean_s2) np.save(f'{eval_dir}/train_test_std_s2.npy', train_test_std_s2) # ### Creating wds dataloader, preload betas and all 73k possible images # In[ ]: def my_split_by_node(urls): return urls num_voxels_list = [] if multi_subject: nsessions_allsubj=np.array([40, 40, 32, 30, 40, 32, 40, 30]) num_samples_per_epoch = (750*40) // num_devices else: # num_samples_per_epoch = (750*num_sessions) // num_devices num_samples_per_epoch = len(train_image_indices) print("dividing batch size by subj_list, which will then be concatenated across subj during training...") batch_size = batch_size // len(subj_list) num_iterations_per_epoch = num_samples_per_epoch // (batch_size*len(subj_list)) print("batch_size =", batch_size, "num_iterations_per_epoch =",num_iterations_per_epoch, "num_samples_per_epoch =",num_samples_per_epoch) # In[ ]: train_data = {} train_dl = {} train_data[f'subj0{subj}'] = torch.utils.data.TensorDataset(torch.tensor(train_image_indices)) test_data = torch.utils.data.TensorDataset(torch.tensor(test_image_indices)) # In[ ]: num_voxels = {} voxels = {} for s in subj_list: print(f"Training with {num_sessions} sessions") train_dl = torch.utils.data.DataLoader(train_data[f'subj0{s}'], batch_size=batch_size, shuffle=True, drop_last=True, pin_memory=True) num_voxels_list.append(vox[0].shape[-1]) num_voxels[f'subj0{s}'] = vox[0].shape[-1] voxels[f'subj0{s}'] = vox print(f"num_voxels for subj0{s}: {num_voxels[f'subj0{s}']}") print("Loaded all subj train dls and vox!\n") # Validate only on one subject if multi_subject: subj = subj_list[0] # cant validate on the actual held out person so picking first in subj_list test_dl = torch.utils.data.DataLoader(test_data, batch_size=24, shuffle=False, drop_last=True, pin_memory=True) print(f"Loaded test dl for subj{subj}!\n") # ## Load models # ### CLIP image embeddings model # In[ ]: ## USING OpenCLIP ViT-bigG ### sys.path.append('generative_models/') import sgm from generative_models.sgm.modules.encoders.modules import FrozenOpenCLIPImageEmbedder # from generative_models.sgm.models.diffusion import DiffusionEngine # from omegaconf import OmegaConf try: print(clip_img_embedder) except: clip_img_embedder = FrozenOpenCLIPImageEmbedder( arch="ViT-bigG-14", version="laion2b_s39b_b160k", output_tokens=True, only_tokens=True, ) clip_img_embedder.to(device) clip_seq_dim = 256 clip_emb_dim = 1664 # ## USING OPEN AI CLIP ViT-L ### # import clip # try: # print(clip_model) # except: # clip_model, preprocess = clip.load("ViT-L/14", device=device) # preprocess = transforms.Compose([ # transforms.Resize(224, interpolation=transforms.InterpolationMode.BILINEAR), # transforms.Normalize(mean=[0.48145466, 0.4578275, 0.40821073], # std=[0.26862954, 0.26130258, 0.27577711]), # ]) # def clip_img_embedder(image): # preproc_img = preprocess(image) # return clip_model.encode_image(preproc_img) # clip_seq_dim = 1 # clip_emb_dim = 768 # ### MindEye modules # In[ ]: model = utils.prepare_model_and_training( num_voxels_list=num_voxels_list, n_blocks=n_blocks, hidden_dim=hidden_dim, clip_emb_dim=clip_emb_dim, clip_seq_dim=clip_seq_dim, use_prior=use_prior, clip_scale=clip_scale ) # In[ ]: # test on subject 1 with fake data b = torch.randn((2,1,num_voxels_list[0])) print(b.shape, model.ridge(b,0).shape) # In[ ]: # test that the model works on some fake data b = torch.randn((2,1,hidden_dim)) print("b.shape",b.shape) backbone_, clip_, blur_ = model.backbone(b) print(backbone_.shape, clip_.shape, blur_[0].shape, blur_[1].shape) # ### Adding diffusion prior + unCLIP if use_prior=True # In[ ]: if use_prior: from models import * # setup diffusion prior network out_dim = clip_emb_dim depth = 6 dim_head = 52 heads = clip_emb_dim//52 # heads * dim_head = clip_emb_dim timesteps = 100 prior_network = VersatileDiffusionPriorNetwork( dim=out_dim, depth=depth, dim_head=dim_head, heads=heads, causal=False, num_tokens = clip_seq_dim, learned_query_mode="pos_emb" ) model.diffusion_prior = BrainDiffusionPrior( net=prior_network, image_embed_dim=out_dim, condition_on_text_encodings=False, timesteps=timesteps, cond_drop_prob=0.2, image_embed_scale=None, ) utils.count_params(model.diffusion_prior) utils.count_params(model) # ### Setup optimizer / lr / ckpt saving # In[ ]: no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] opt_grouped_parameters = [ {'params': [p for n, p in model.ridge.named_parameters()], 'weight_decay': 1e-2}, {'params': [p for n, p in model.backbone.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': 1e-2}, {'params': [p for n, p in model.backbone.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}, ] # model.backbone.requires_grad_(False) if use_prior: opt_grouped_parameters.extend([ {'params': [p for n, p in model.diffusion_prior.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': 1e-2}, {'params': [p for n, p in model.diffusion_prior.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0} ]) optimizer = torch.optim.AdamW(opt_grouped_parameters, lr=max_lr) if lr_scheduler_type == 'linear': lr_scheduler = torch.optim.lr_scheduler.LinearLR( optimizer, total_iters=int(np.floor(num_epochs*num_iterations_per_epoch)), last_epoch=-1 ) elif lr_scheduler_type == 'cycle': if num_iterations_per_epoch==0: num_iterations_per_epoch=1 total_steps=int(np.floor(num_epochs*num_iterations_per_epoch)) print("total_steps", total_steps) lr_scheduler = torch.optim.lr_scheduler.OneCycleLR( optimizer, max_lr=max_lr, total_steps=total_steps, final_div_factor=1000, last_epoch=-1, pct_start=2/num_epochs ) def save_ckpt(tag): ckpt_path = outdir+f'/{tag}.pth' if accelerator.is_main_process: unwrapped_model = accelerator.unwrap_model(model) torch.save({ 'epoch': epoch, 'model_state_dict': unwrapped_model.state_dict(), 'optimizer_state_dict': optimizer.state_dict(), 'lr_scheduler': lr_scheduler.state_dict(), 'train_losses': losses, 'test_losses': test_losses, 'lrs': lrs, }, ckpt_path) print(f"\n---saved {outdir}/{tag} ckpt!---\n") def load_ckpt(tag,load_lr=True,load_optimizer=True,load_epoch=True,strict=True,outdir=outdir,multisubj_loading=False): print(f"\n---loading {outdir}/{tag}.pth ckpt---\n") checkpoint = torch.load(outdir+'/last.pth', map_location='cpu') state_dict = checkpoint['model_state_dict'] if multisubj_loading: # remove incompatible ridge layer that will otherwise error state_dict.pop('ridge.linears.0.weight',None) model.load_state_dict(state_dict, strict=strict) if load_epoch: globals()["epoch"] = checkpoint['epoch'] print("Epoch",epoch) if load_optimizer: optimizer.load_state_dict(checkpoint['optimizer_state_dict']) if load_lr: lr_scheduler.load_state_dict(checkpoint['lr_scheduler']) del checkpoint print("\nDone with model preparations!") num_params = utils.count_params(model) # # Wandb # In[ ]: if local_rank==0 and wandb_log: # only use main process for wandb logging import wandb import time wandb_project = 'rtmindeye' print(f"wandb {wandb_project} run {model_name}") # Need to configure wandb beforehand in terminal with "wandb init"! wandb_config = { "model_name": model_name, "global_batch_size": global_batch_size, "batch_size": batch_size, "num_epochs": num_epochs, "num_sessions": num_sessions, "num_params": num_params, "clip_scale": clip_scale, "prior_scale": prior_scale, "blur_scale": blur_scale, "use_image_aug": use_image_aug, "max_lr": max_lr, "mixup_pct": mixup_pct, "num_samples_per_epoch": num_samples_per_epoch, "ckpt_interval": ckpt_interval, "ckpt_saving": ckpt_saving, "seed": seed, # SLURM array task ID "distributed": distributed, "num_devices": num_devices, "world_size": world_size, } print("wandb_config:\n", wandb_config) print("wandb_id:", model_name) # Initialize wandb wandb.init( id=model_name, project=wandb_project, name=model_name, config=wandb_config, resume="allow", save_code=True, ) # Get SLURM job & array ID slurm_job_id = utils.get_slurm_job() slurm_array_id = seed # seed corresponds to SLURM_ARRAY_TASK_ID # Define SLURM log paths log_dir = "slurms" log_files = [ f"{log_dir}/{slurm_job_id}_{slurm_array_id}.out", f"{log_dir}/{slurm_job_id}_{slurm_array_id}.err", ] # Ensure logs exist before logging them for log_file in log_files: wait_time = 0 while not os.path.exists(log_file) and wait_time < 60: # Wait max 60s time.sleep(5) wait_time += 5 # Log SLURM logs as artifacts artifact = wandb.Artifact(f"slurm_logs_{slurm_job_id}_{slurm_array_id}", type="logs") for log_file in log_files: if os.path.exists(log_file): artifact.add_file(log_file) wandb.log_artifact(artifact) else: wandb_log = False # # Train the model # In[ ]: epoch = 0 losses, test_losses, lrs = [], [], [] best_test_loss = 1e9 torch.cuda.empty_cache() # In[ ]: # load multisubject stage1 ckpt if set if multisubject_ckpt is not None and not resume_from_ckpt: load_ckpt("last",outdir=multisubject_ckpt,load_lr=False,load_optimizer=False,load_epoch=False,strict=False,multisubj_loading=True) # In[ ]: # checkpoint = torch.load(multisubject_ckpt+'/last.pth', map_location='cpu') # state_dict = checkpoint['model_state_dict'] # model.load_state_dict(state_dict, strict=False) # In[ ]: # train_dls = [train_dl[f'subj0{s}'] for s in subj_list] model, optimizer, train_dl, lr_scheduler = accelerator.prepare(model, optimizer, train_dl, lr_scheduler) # leaving out test_dl since we will only have local_rank 0 device do evals # In[ ]: print(f"{model_name} starting with epoch {epoch} / {num_epochs}") progress_bar = tqdm(range(epoch,num_epochs), ncols=1200, disable=(local_rank!=0)) test_image, test_voxel = None, None mse = nn.MSELoss() l1 = nn.L1Loss() soft_loss_temps = utils.cosine_anneal(0.004, 0.0075, num_epochs - int(mixup_pct * num_epochs)) skip_train = True if epoch>=(num_epochs-1) else False # skip training if you are resuming from a fully trained model for epoch in progress_bar: model.train() fwd_percent_correct = 0. bwd_percent_correct = 0. test_fwd_percent_correct = 0. test_bwd_percent_correct = 0. recon_cossim = 0. test_recon_cossim = 0. recon_mse = 0. test_recon_mse = 0. loss_clip_total = 0. loss_blurry_total = 0. loss_blurry_cont_total = 0. test_loss_clip_total = 0. loss_prior_total = 0. test_loss_prior_total = 0. blurry_pixcorr = 0. test_blurry_pixcorr = 0. # you now have voxel_iters and image_iters with num_iterations_per_epoch batches each for train_i, behav in enumerate(train_dl): with torch.cuda.amp.autocast(dtype=data_type): optimizer.zero_grad() loss = 0. behav = behav[0] image = images[behav.long().cpu()].to(device) voxel = vox[behav.long().cpu()] # voxel = (voxel - train_mean) / train_std voxel = torch.Tensor(voxel).unsqueeze(1).to(device) if use_image_aug: image = img_augment(image) clip_target = clip_img_embedder(image) assert not torch.any(torch.isnan(clip_target)) if epoch < int(mixup_pct * num_epochs): voxel, perm, betas, select = utils.mixco(voxel) voxel_ridge = model.ridge(voxel,0) #[model.ridge(voxel_list[si],si) for si,s in enumerate(subj_list)] # voxel_ridge = torch.cat(voxel_ridge_list, dim=0) backbone, clip_voxels, blurry_image_enc_ = model.backbone(voxel_ridge) if clip_scale>0: clip_voxels_norm = nn.functional.normalize(clip_voxels.flatten(1), dim=-1) clip_target_norm = nn.functional.normalize(clip_target.flatten(1), dim=-1) if use_prior: loss_prior, prior_out = model.diffusion_prior(text_embed=backbone, image_embed=clip_target) loss_prior_total += loss_prior.item() loss_prior *= prior_scale loss += loss_prior recon_cossim += nn.functional.cosine_similarity(prior_out, clip_target).mean().item() recon_mse += mse(prior_out, clip_target).item() if clip_scale>0: if epoch < int(mixup_pct * num_epochs): loss_clip = utils.mixco_nce( clip_voxels_norm, clip_target_norm, temp=.006, perm=perm, betas=betas, select=select) else: epoch_temp = soft_loss_temps[epoch-int(mixup_pct*num_epochs)] loss_clip = utils.soft_clip_loss( clip_voxels_norm, clip_target_norm, temp=epoch_temp) loss_clip_total += loss_clip.item() loss_clip *= clip_scale loss += loss_clip if blurry_recon: image_enc_pred, transformer_feats = blurry_image_enc_ image_enc = autoenc.encode(2*image-1).latent_dist.mode() * 0.18215 loss_blurry = l1(image_enc_pred, image_enc) loss_blurry_total += loss_blurry.item() if epoch < int(mixup_pct * num_epochs): image_enc_shuf = image_enc[perm] betas_shape = [-1] + [1]*(len(image_enc.shape)-1) image_enc[select] = image_enc[select] * betas[select].reshape(*betas_shape) + \ image_enc_shuf[select] * (1 - betas[select]).reshape(*betas_shape) image_norm = (image - mean)/std image_aug = (blur_augs(image) - mean)/std _, cnx_embeds = cnx(image_norm) _, cnx_aug_embeds = cnx(image_aug) cont_loss = utils.soft_cont_loss( nn.functional.normalize(transformer_feats.reshape(-1, transformer_feats.shape[-1]), dim=-1), nn.functional.normalize(cnx_embeds.reshape(-1, cnx_embeds.shape[-1]), dim=-1), nn.functional.normalize(cnx_aug_embeds.reshape(-1, cnx_embeds.shape[-1]), dim=-1), temp=0.2) loss_blurry_cont_total += cont_loss.item() loss += (loss_blurry + 0.1*cont_loss) * blur_scale #/.18215 if clip_scale>0: # forward and backward top 1 accuracy labels = torch.arange(len(clip_voxels_norm)).to(clip_voxels_norm.device) fwd_percent_correct += utils.topk(utils.batchwise_cosine_similarity(clip_voxels_norm, clip_target_norm), labels, k=1).item() bwd_percent_correct += utils.topk(utils.batchwise_cosine_similarity(clip_target_norm, clip_voxels_norm), labels, k=1).item() if blurry_recon: with torch.no_grad(): # only doing pixcorr eval on a subset of the samples per batch because its costly & slow to compute autoenc.decode() random_samps = np.random.choice(np.arange(len(image)), size=len(image)//5, replace=False) blurry_recon_images = (autoenc.decode(image_enc_pred[random_samps]/0.18215).sample/ 2 + 0.5).clamp(0,1) pixcorr = utils.pixcorr(image[random_samps], blurry_recon_images) blurry_pixcorr += pixcorr.item() utils.check_loss(loss) accelerator.backward(loss) optimizer.step() losses.append(loss.item()) lrs.append(optimizer.param_groups[0]['lr']) if lr_scheduler_type is not None: lr_scheduler.step() if train_i >= num_iterations_per_epoch-1: break model.eval() logs = {} if local_rank == 0: with torch.no_grad(), torch.cuda.amp.autocast(dtype=data_type): for i in range(2): for j in range(2): subset_indices = MST_idx[:, i, j].reshape(-1) subset_dataset = torch.utils.data.TensorDataset(torch.tensor(subset_indices)) subset_dl = torch.utils.data.DataLoader( subset_dataset, batch_size=len(MST_idx), shuffle=False, drop_last=True, pin_memory=True ) # Reset metrics for this subset test_losses = [] test_loss_clip_total = 0 test_loss_prior_total = 0 test_blurry_pixcorr = 0 test_fwd_percent_correct = 0 test_bwd_percent_correct = 0 test_recon_cossim = 0 test_recon_mse = 0 for test_i, behav in enumerate(subset_dl): behav = behav[0] loss = 0. if behav.ndim > 1: image = images[behav[:, 0].long().cpu()].to(device) voxel = vox[behav.long().cpu()].mean(1) else: image = images[behav.long().cpu()].to(device) voxel = vox[behav.long().cpu()] voxel = torch.Tensor(voxel).unsqueeze(1).to(device) clip_img_embedder = clip_img_embedder.to(device) clip_target = clip_img_embedder(image.float()) voxel_ridge = model.ridge(voxel, 0) backbone, clip_voxels, blurry_image_enc_ = model.backbone(voxel_ridge) if clip_scale > 0: clip_voxels_norm = nn.functional.normalize(clip_voxels.flatten(1), dim=-1) clip_target_norm = nn.functional.normalize(clip_target.flatten(1), dim=-1) random_samps = np.random.choice(np.arange(len(image)), size=len(image) // 5, replace=False) if use_prior: loss_prior, contaminated_prior_out = model.diffusion_prior( text_embed=backbone[random_samps], image_embed=clip_target[random_samps]) test_loss_prior_total += loss_prior.item() loss_prior *= prior_scale loss += loss_prior if clip_scale > 0: loss_clip = utils.soft_clip_loss( clip_voxels_norm, clip_target_norm, temp=0.006 ) test_loss_clip_total += loss_clip.item() loss_clip *= clip_scale loss += loss_clip if blurry_recon: image_enc_pred, _ = blurry_image_enc_ blurry_recon_images = (autoenc.decode(image_enc_pred[random_samps] / 0.18215).sample / 2 + 0.5).clamp(0, 1) pixcorr = utils.pixcorr(image[random_samps], blurry_recon_images) test_blurry_pixcorr += pixcorr.item() if clip_scale > 0: labels = torch.arange(len(clip_voxels_norm)).to(clip_voxels_norm.device) test_fwd_percent_correct += utils.topk(utils.batchwise_cosine_similarity(clip_voxels_norm, clip_target_norm), labels, k=1).item() test_bwd_percent_correct += utils.topk(utils.batchwise_cosine_similarity(clip_target_norm, clip_voxels_norm), labels, k=1).item() utils.check_loss(loss) test_losses.append(loss.item()) logs.update({ f"subset_{i}_{j}_test/loss": np.mean(test_losses), f"subset_{i}_{j}_test/loss_clip_total": test_loss_clip_total / (test_i + 1), f"subset_{i}_{j}_test/loss_prior": test_loss_prior_total / (test_i + 1), f"subset_{i}_{j}_test/blurry_pixcorr": test_blurry_pixcorr / (test_i + 1), f"subset_{i}_{j}_test/fwd_pct_correct": test_fwd_percent_correct / (test_i + 1), f"subset_{i}_{j}_test/bwd_pct_correct": test_bwd_percent_correct / (test_i + 1), }) print(f"--- Subset ({i},{j}) ---") for k, v in logs.items(): if f"subset_{i}_{j}" in k: print(f"{k}: {v:.4f}") # After subset loop: add train (and global test, if you want) metrics logs.update({ "train/loss": np.mean(losses[-(train_i+1):]), "train/lr": lrs[-1], "train/num_steps": len(losses), "train/fwd_pct_correct": fwd_percent_correct / (train_i + 1), "train/bwd_pct_correct": bwd_percent_correct / (train_i + 1), "train/loss_clip_total": loss_clip_total / (train_i + 1), "train/loss_blurry_total": loss_blurry_total / (train_i + 1), "train/loss_blurry_cont_total": loss_blurry_cont_total / (train_i + 1), "train/blurry_pixcorr": blurry_pixcorr / (train_i + 1), "train/recon_cossim": recon_cossim / (train_i + 1), "train/recon_mse": recon_mse / (train_i + 1), "train/loss_prior": loss_prior_total / (train_i + 1), }) # if finished training, save jpg recons if they exist if (epoch == num_epochs-1) or (epoch % ckpt_interval == 0): if blurry_recon: image_enc = autoenc.encode(2*image[:4]-1).latent_dist.mode() * 0.18215 # transform blurry recon latents to images and plot it fig, axes = plt.subplots(1, 8, figsize=(10, 4)) jj=-1 for j in [0,1,2,3]: jj+=1 axes[jj].imshow(utils.torch_to_Image((autoenc.decode(image_enc[[j]]/0.18215).sample / 2 + 0.5).clamp(0,1))) axes[jj].axis('off') jj+=1 axes[jj].imshow(utils.torch_to_Image((autoenc.decode(image_enc_pred[[j]]/0.18215).sample / 2 + 0.5).clamp(0,1))) axes[jj].axis('off') plt.show() progress_bar.set_postfix(**logs) if wandb_log: wandb.log(logs) # Save model checkpoint and reconstruct if (ckpt_saving) and (epoch % ckpt_interval == 0): save_ckpt(f'last') # wait for other GPUs to catch up if needed accelerator.wait_for_everyone() torch.cuda.empty_cache() print("\n===Finished!===\n") if ckpt_saving: save_ckpt(f'last') # In[ ]: len(test_data) # In[ ]: # # Track metrics here: # https://docs.google.com/spreadsheets/d/1-dbmr4ovl2-4-MFNAL1DqLS651KM_ihjDkkUeP1kHXs/edit?gid=1494588999#gid=1494588999 # **To tell if the model is working I'm looking at test_bwd/fwd_pct_correct and seeing if that is doing better than chance (1/batch_size)** # In[ ]: # MST_pairmate_names # In[ ]: x = [im for im in image_names if str(im) not in ('blank.jpg', 'nan')] assert len(image_idx) == len(x) pairs = [] for i, p in enumerate(MST_pairmate_names): assert p[0] != p[1] # no duplicate images pairs.append([utils.find_all_indices(x,p[0]), utils.find_all_indices(x,p[1])]) pairs = np.array(pairs) # In[ ]: pairs.shape # In[ ]: model.eval() logs = {} if local_rank == 0: with torch.no_grad(), torch.cuda.amp.autocast(dtype=data_type): for i in range(2): for j in range(2): subset_indices = MST_idx[:, i, j].reshape(-1) subset_dataset = torch.utils.data.TensorDataset(torch.tensor(subset_indices)) subset_dl = torch.utils.data.DataLoader( subset_dataset, batch_size=len(MST_idx), shuffle=False, drop_last=True, pin_memory=True ) # Reset metrics for this subset test_fwd_percent_correct = 0 test_bwd_percent_correct = 0 for test_i, behav in enumerate(subset_dl): behav = behav[0] loss = 0. image = images[behav.long().cpu()].to(device) voxel = vox[behav.long().cpu()] voxel = torch.Tensor(voxel).unsqueeze(1).to(device) clip_img_embedder = clip_img_embedder.to(device) clip_target = clip_img_embedder(image.float()) voxel_ridge = model.ridge(voxel, 0) backbone, clip_voxels, blurry_image_enc_ = model.backbone(voxel_ridge) clip_voxels_norm = torch.nn.functional.normalize(clip_voxels, dim=-1) clip_target_norm = torch.nn.functional.normalize(clip_target, dim=-1) if clip_scale > 0: labels = torch.arange(len(clip_voxels_norm)).to(clip_voxels_norm.device) test_fwd_percent_correct += utils.topk(utils.batchwise_cosine_similarity(clip_voxels_norm, clip_target_norm), labels, k=1).item() test_bwd_percent_correct += utils.topk(utils.batchwise_cosine_similarity(clip_target_norm, clip_voxels_norm), labels, k=1).item() print(test_fwd_percent_correct) print(test_bwd_percent_correct) logs.update({ f"subset_{i}_{j}_test/fwd_pct_correct": test_fwd_percent_correct / (test_i + 1), f"subset_{i}_{j}_test/bwd_pct_correct": test_bwd_percent_correct / (test_i + 1), }) print("--- Full Dataset Evaluation ---") for k, v in logs.items(): print(f"{k}: {v:.4f}") # In[ ]: # if sub=="sub-002": # unique_images_pairs = [ # (2,3),(4,5),(7,8),(15,16), # (483, 484), (485, 486), (487, 488), (491, 492), (495, 496), (499, 500), (501, 502), # (503, 504), (512, 513), # ] # elif sub != 'sub-001' and session != 'ses-05': # unique_images_pairs = [ # (1,2),(3,4),(5,6),(7,8),(9,10),(11,12),(13,14),(15,16), # (17,18),(19,20),(21,22),(23,24),(25,26),(27,28),(29,30), # (31,32),(33,34),(35,36), # (787, 788), (789, 790), (791, 792), (793, 794), (795, 796), # (797, 798), (799, 800), (801, 802), (803, 804), (805, 806), # (807, 808), (809, 810), (811, 812), (813, 814), (815, 816), # (817, 818), (819, 820), (821, 822), (823, 824), (825, 826), # (827, 828), (829, 830), (831, 832), (833, 834), (835, 836), # (837, 838), (839, 840), (841, 842), (843, 844), (845, 846), # (847, 848), (849, 850) # ] # else: # # unique_images = unique_images[unique_images!='blank.jpg'][:50] # unique_images_pairs = find_mst_pairs(x) # # unique_images[unique_images_pairs] # In[ ]: import pdb # In[ ]: def evaluate_mst_pairs(mst_pairs): with torch.no_grad(), torch.cuda.amp.autocast(dtype=data_type): failed_A = [] failed_B = [] failed_non_corr = [] # Get all unique image indices all_indices = np.unique(mst_pairs.flatten()) # Pre-load all images and betas to device all_images = images[image_idx[all_indices]].to(device) all_voxels = torch.Tensor(vox[image_idx[all_indices]]).unsqueeze(1).to(device) # Get CLIP embeddings for all images all_clip_targets = clip_img_embedder(all_images.float()) all_clip_targets_norm = nn.functional.normalize(all_clip_targets.flatten(1), dim=-1) # Pass all betas through model to get MindEye embeddings all_voxel_ridge = model.ridge(all_voxels, 0) _, all_clip_voxels, _ = model.backbone(all_voxel_ridge) all_clip_voxels_norm = nn.functional.normalize(all_clip_voxels.flatten(1), dim=-1) # Dict mapping idx (which indexes the "vox" and "images" tensors) to pos (their position in the flattened array "all_indices") idx_to_pos = {idx: pos for pos, idx in enumerate(all_indices)} # Initialize scores corr_score = 0 non_corr_score = 0 corr_total = len(mst_pairs) * 2 non_corr_total = len(mst_pairs) * (len(mst_pairs)-1) * 4 # number of elements in the matrix excluding the diagonal is n*(n-1)*4 since we're doing this twice each for pairmate A and B # Pre-load voxelwise beta-based embeddings from MindEye and CLIP image embeddings idxA = np.array([pair[0] for pair in mst_pairs]) idxB = np.array([pair[1] for pair in mst_pairs]) posA = np.array([idx_to_pos[idx] for idx in idxA]) posB = np.array([idx_to_pos[idx] for idx in idxB]) voxA_embeddings = all_clip_voxels_norm[posA] voxB_embeddings = all_clip_voxels_norm[posB] imgA_embeddings = all_clip_targets_norm[posA] imgB_embeddings = all_clip_targets_norm[posB] simA_A = utils.batchwise_cosine_similarity(voxA_embeddings, imgA_embeddings) simA_B = utils.batchwise_cosine_similarity(voxA_embeddings, imgB_embeddings) simB_B = utils.batchwise_cosine_similarity(voxB_embeddings, imgB_embeddings) simB_A = utils.batchwise_cosine_similarity(voxB_embeddings, imgA_embeddings) # corresponding 2-AFC # is the voxel embedding for image 1 pairmate A more similar to the CLIP embedding for image 1 pairmate A or the CLIP embedding for image 1 pairmate B? correct_A = torch.diag(simA_A) > torch.diag(simA_B) # is the voxel embedding for image 1 pairmate B more similar to the CLIP embedding for image 1 pairmate B or the CLIP embedding for image 1 pairmate A? correct_B = torch.diag(simB_B) > torch.diag(simB_A) corr_score += correct_A.sum().item() corr_score += correct_B.sum().item() # Store indices where AFC fails failed_A = [i for i, correct in enumerate(correct_A.cpu()) if not correct] failed_B = [i for i, correct in enumerate(correct_B.cpu()) if not correct] # non-corresponding 2-AFC N = len(mst_pairs) # Create a mask that is True for all off-diagonal elements row_idx = torch.arange(N).unsqueeze(1) # (N, 1) col_idx = torch.arange(N).unsqueeze(0) # (1, N) off_diag_mask = row_idx != col_idx # shape (N, N) diagA_A = simA_A.diag().unsqueeze(1).expand(-1, N) # Get diagonal values and expand to (N, N) by duplicating the diagonal element along the rows (since each row is the cosine similarity between a single voxel embedding and all CLIP embeddings) diagB_B = simB_B.diag().unsqueeze(1).expand(-1, N) # pdb.set_trace() # Compare each element in the row to the diagonal element off_diag_mask_device = off_diag_mask.to(device) fail_AA = (simA_A < diagA_A) & off_diag_mask_device fail_AB = (simA_B < diagA_A) & off_diag_mask_device fail_BB = (simB_B < diagB_B) & off_diag_mask_device fail_BA = (simB_A < diagB_B) & off_diag_mask_device non_corr_score += fail_AA.sum().item() non_corr_score += fail_AB.sum().item() non_corr_score += fail_BB.sum().item() non_corr_score += fail_BA.sum().item() # Log failed indices fail_sources = [fail_AA, fail_AB, fail_BB, fail_BA] for fail_matrix, label in zip(fail_sources, ["AA", "AB", "BB", "BA"]): fail_coords = torch.nonzero(fail_matrix, as_tuple=False).cpu().numpy() for i, j in fail_coords: failed_non_corr.append({"type": label, "i": i, "j": j, "pair_i": mst_pairs[i], "pair_j": mst_pairs[j]}) return corr_score, corr_total, int(non_corr_score), non_corr_total, failed_A, failed_B, failed_non_corr # In[ ]: all_scores = [] all_failures = [] for i in range(4): for j in range(4): mst_pairs = np.stack([pairs[:, 0, i], pairs[:, 1, j]], axis=1) # shape (31, 2) corr_score, corr_total, non_corr_score, non_corr_total, failed_A, failed_B, failed_non_corr = evaluate_mst_pairs(mst_pairs) # Store scores and failure info together all_scores.append((corr_score, corr_total, non_corr_score, non_corr_total)) all_failures.append({ "repeat_A": i, "repeat_B": j, "failed_A": failed_A, "failed_B": failed_B, "failed_non_corr": failed_non_corr, "mst_pairs": mst_pairs, }) # Print summary print(f"pairmate A repeat {i} vs pairmate B repeat {j}:") print(f"2-AFC corresponding = {corr_score}/{corr_total} ({corr_score/corr_total:.2%})") print(f"2-AFC non-corresponding = {non_corr_score}/{non_corr_total} ({non_corr_score/non_corr_total:.2%})") print("") # In[ ]: all_scores = np.array(all_scores) print(f"average 2-AFC corresponding: {all_scores[:,0].mean():.2f}/{all_scores[:,1].mean():.2f} ({(all_scores[:,0].sum()/all_scores[:,1].sum()):.2%})") print(f"average 2-AFC non-corresponding: {all_scores[:,2].mean():.2f}/{all_scores[:,3].mean():.2f} ({(all_scores[:,2].sum()/all_scores[:,3].sum()):.2%})") print(f'chance = 1/{corr_total} ({(1/corr_total):.2%})') # In[ ]: from collections import defaultdict # Map from image index to failure details failed_images = defaultdict(list) for failure_entry in all_failures: mst_pairs = failure_entry["mst_pairs"] i, j = failure_entry["repeat_A"], failure_entry["repeat_B"] # A-side failures for fail_idx in failure_entry["failed_A"]: image_idx = mst_pairs[fail_idx][0] pairmate_idx = mst_pairs[fail_idx][1] failed_images[image_idx].append({ "repeat_A": i, "repeat_B": j, "pairmate": pairmate_idx, "type": "A", }) # B-side failures for fail_idx in failure_entry["failed_B"]: image_idx = mst_pairs[fail_idx][1] pairmate_idx = mst_pairs[fail_idx][0] failed_images[image_idx].append({ "repeat_A": i, "repeat_B": j, "pairmate": pairmate_idx, "type": "B", }) # In[ ]: # import matplotlib.pyplot as plt # for img_idx, failure_list in failed_images.items(): # print(f"\n==== Failed Image {img_idx} ====") # # Load and normalize the embeddings # image = images[img_idx].unsqueeze(0).to(device).float() # image_clip = nn.functional.normalize(clip_img_embedder(image).flatten(1), dim=-1) # # Get voxel→CLIP embedding # voxel = torch.Tensor(vox[img_idx]).unsqueeze(0).unsqueeze(0).to(device) # voxel_embed = model.backbone(model.ridge(voxel, 0))[1] # voxel_embed = nn.functional.normalize(voxel_embed.flatten(1), dim=-1) # # Display original image # print("Original image:") # display(utils.torch_to_Image(images[img_idx])) # # Collect unique pairmates involved in the failure # pairmate_indices = list(set(entry["pairmate"] for entry in failure_list)) # # Plot failed pairmates with similarity annotations # fig, axs = plt.subplots(1, len(pairmate_indices), figsize=(4 * len(pairmate_indices), 4)) # if len(pairmate_indices) == 1: # axs = [axs] # # Compute "correct" similarity — voxel to its own CLIP embedding # correct_clip = image_clip.float() # correct_voxel_sim = (voxel_embed.float() @ correct_clip.T).item() # print(f"Correct voxel→CLIP similarity = {correct_voxel_sim:.4f}") # # Plot failed pairmates with similarity annotations # fig, axs = plt.subplots(1, len(pairmate_indices), figsize=(4 * len(pairmate_indices), 4)) # if len(pairmate_indices) == 1: # axs = [axs] # for ax, mate_idx in zip(axs, pairmate_indices): # mate_image = images[mate_idx].unsqueeze(0).to(device).float() # mate_clip = nn.functional.normalize(clip_img_embedder(mate_image).flatten(1), dim=-1).float() # # Similarities # clip_sim = (correct_clip @ mate_clip.T).item() # voxel_sim = (voxel_embed.float() @ mate_clip.T).item() # # Check if this was the mistaken "higher" match # wrong_match = voxel_sim > correct_voxel_sim # # Plot image and annotate # ax.imshow(utils.torch_to_Image(images[mate_idx])) # ax.axis("off") # ax.set_title(f"Pairmate {mate_idx}\nCLIP={clip_sim:.3f}\nVoxel={voxel_sim:.3f}\n{'← WRONG' if wrong_match else ''}", # color="red" if wrong_match else "black") # plt.tight_layout() # plt.show() # In[ ]: # comp[20,18] is the only False # In[ ]: # import matplotlib.pyplot as plt # for img_idx, failure_list in failed_images.items(): # print(f"\n==== Failed Image {img_idx} ====") # # Load and normalize the embeddings # image = images[img_idx].unsqueeze(0).to(device).float() # image_clip = nn.functional.normalize(clip_img_embedder(image).flatten(1), dim=-1) # # Get voxel→CLIP embedding # voxel = torch.Tensor(vox[img_idx]).unsqueeze(0).unsqueeze(0).to(device) # voxel_embed = model.backbone(model.ridge(voxel, 0))[1] # voxel_embed = nn.functional.normalize(voxel_embed.flatten(1), dim=-1) # # Display original image # print("Original image:") # display(utils.torch_to_Image(images[img_idx])) # # Collect unique pairmates involved in the failure # pairmate_indices = list(set(entry["pairmate"] for entry in failure_list)) # # Plot failed pairmates with similarity annotations # fig, axs = plt.subplots(1, len(pairmate_indices), figsize=(4 * len(pairmate_indices), 4)) # if len(pairmate_indices) == 1: # axs = [axs] # for ax, mate_idx in zip(axs, pairmate_indices): # # Get all CLIP embeddings for failed image and pairmates # all_indices = [img_idx] + pairmate_indices # all_images = images[all_indices].to(device).float() # all_clip_embeds = clip_img_embedder(all_images) # all_clip_embeds = nn.functional.normalize(all_clip_embeds.flatten(1), dim=-1).float() # # Compare voxel embedding for the failed image to all CLIP embeddings # sims = (voxel_embed.float() @ all_clip_embeds.T).squeeze().cpu().detach().numpy() # shape: (1, N) → (N,) # image_ids = ["correct"] + [f"pairmate {idx}" for idx in pairmate_indices] # # Sort and display # sorted_sims = sorted(zip(image_ids, all_indices, sims), key=lambda x: -x[2]) # print("\n🧠 Voxel→CLIP similarity ranking:") # for label, idx, sim in sorted_sims: # print(f"{label:12} (index {idx:3}): similarity = {sim:.4f}") # # Optional assertion: did any pairmate score higher than the correct image? # correct_sim = sims[0] # higher = [(label, sim) for label, _, sim in sorted_sims[1:] if sim > correct_sim] # if higher: # print("\n❌ Mismatch detected: voxel embedding matched other images more than the correct one!") # else: # print("\n✅ Model correctly ranked the correct image highest (despite failure elsewhere)") # plt.tight_layout() # plt.show() # In[ ]: mst_pairs[:5] # In[ ]: pairs[0] # In[ ]: # images[image_idx[pairs[0][0]]].shape # In[ ]: ix = 0 display(utils.torch_to_Image(images[pairs[ix][0]])) display(utils.torch_to_Image(images[pairs[ix][1]])) # In[ ]: # print(np.allclose(embed_A[0], embed_A[1])) # across repeats # print(np.allclose(embed_A[0], embed_B[0])) # across pairmates # In[ ]: # def generate_random_nonmatching_pairs(pairs, num_images_per_source=5, num_repeats=2): # n_imgs, n_pairmates, n_repeats = pairs.shape # nonmatch_pairs = [] # for i in range(n_imgs): # other_idxs = [j for j in range(n_imgs) if j != i] # sampled_j = np.random.choice(other_idxs, size=num_images_per_source, replace=False) # for j in sampled_j: # for _ in range(num_repeats): # a_side = np.random.randint(2) # b_side = np.random.randint(2) # a_repeat = np.random.randint(n_repeats) # b_repeat = np.random.randint(n_repeats) # pair_a = pairs[i, a_side, a_repeat] # pair_b = pairs[j, b_side, b_repeat] # nonmatch_pairs.append([pair_a, pair_b]) # return np.array(nonmatch_pairs) # In[ ]: # nonmatch_pairs = generate_random_nonmatching_pairs(pairs, num_images_per_source=5, num_repeats=1) # results = evaluate_mst_pairs(nonmatch_pairs) # print(results) # In[ ]: # # Compare first few pairs # for pair in pairs: # Checking first 2 pairs # print("Indices in mst_pairs:", pair) # print("Corresponding filenames:") # print(f"Image 1: {x[pair[0]]}") # print(f"Image 2: {x[pair[1]]}\n") # In[ ]: # for i in range(len(pairs)): # fig, ax = plt.subplots(1, 2, figsize=(10,8)) # ax[0].imshow(images[pairs[i][0]].permute(1,2,0).numpy()) # ax[0].set_title(f"Repeat 1") # ax[1].imshow(images[pairs[i][1]].permute(1,2,0).numpy()) # ax[1].set_title(f"Repeat 2") # plt.setp(ax, xticks=[], yticks=[]) # plt.tight_layout() # plt.show() # In[ ]: # score = 0 # total = 0 # with torch.no_grad(), torch.cuda.amp.autocast(dtype=data_type): # for pair in unique_images_pairs: # imageA_idx, imageB_idx = pair # imageA_idx = np.where(image_idx == imageA_idx)[0].item() # imageB_idx = np.where(image_idx == imageB_idx)[0].item() # voxel = vox[imageA_idx].to(device)[None] # voxel = torch.Tensor(voxel).unsqueeze(1).to(device) # imageA = images[imageA_idx].to(device)[None] # imageB = images[imageB_idx].to(device)[None] # clip_targetA = clip_img_embedder(imageA.float()) # clip_targetB = clip_img_embedder(imageB.float()) # voxel_ridge = model.ridge(voxel,0) # backbone, clip_voxels, blurry_image_enc_ = model.backbone(voxel_ridge) # clip_voxels_norm = nn.functional.normalize(clip_voxels.flatten(1), dim=-1) # clip_targetA_norm = nn.functional.normalize(clip_targetA.flatten(1), dim=-1) # clip_targetB_norm = nn.functional.normalize(clip_targetB.flatten(1), dim=-1) # cossimA = utils.batchwise_cosine_similarity(clip_voxels_norm, clip_targetA_norm) # cossimB = utils.batchwise_cosine_similarity(clip_voxels_norm, clip_targetB_norm) # if cossimA > cossimB: # score += 1 # total += 1 # for pair in unique_images_pairs: # imageA_idx, imageB_idx = pair # imageA_idx = np.where(image_idx == imageA_idx)[0].item() # imageB_idx = np.where(image_idx == imageB_idx)[0].item() # voxel = vox[imageB_idx].to(device)[None] # voxel = torch.Tensor(voxel).unsqueeze(1).to(device) # imageA = images[imageA_idx].to(device)[None] # imageB = images[imageB_idx].to(device)[None] # clip_targetA = clip_img_embedder(imageA.float()) # clip_targetB = clip_img_embedder(imageB.float()) # voxel_ridge = model.ridge(voxel,0) # backbone, clip_voxels, blurry_image_enc_ = model.backbone(voxel_ridge) # clip_voxels_norm = nn.functional.normalize(clip_voxels.flatten(1), dim=-1) # clip_targetA_norm = nn.functional.normalize(clip_targetA.flatten(1), dim=-1) # clip_targetB_norm = nn.functional.normalize(clip_targetB.flatten(1), dim=-1) # cossimA = utils.batchwise_cosine_similarity(clip_voxels_norm, clip_targetA_norm) # cossimB = utils.batchwise_cosine_similarity(clip_voxels_norm, clip_targetB_norm) # if cossimB > cossimA: # score += 1 # total += 1 # print(score/total) # In[ ]: #display(utils.torch_to_Image(imageA)) #display(utils.torch_to_Image(imageB)) # In[ ]: # from scipy.stats import binomtest # total_samples = len(np.array(unique_images_pairs).flatten()) # assert total_samples == 100 # correct_predictions = int((score/total) * total_samples) # calculate the number of correct predictions # expected_accuracy = 0.5 # expected accuracy under the null hypothesis # # Perform the binomial test # binom_stats = binomtest(correct_predictions, total_samples, expected_accuracy, alternative='greater') # p_value = binom_stats.pvalue # # Output the result # print(f"P-value: {p_value}") # if p_value < 0.05: # print("The decoder's accuracy is significantly better than chance.") # else: # print("The decoder's accuracy is not significantly better than chance.") # In[ ]: