diff --git "a/main-finetune.ipynb" "b/main-finetune.ipynb" new file mode 100644--- /dev/null +++ "b/main-finetune.ipynb" @@ -0,0 +1,2814 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "b0f0f4f3", + "metadata": {}, + "source": [ + "# Import packages & functions" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "5bad764b-45c1-45ce-a716-8d055e09821a", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "importing modules\n", + "SLURM seed not found, using default: 0\n" + ] + } + ], + "source": [ + "print(\"importing modules\")\n", + "import os\n", + "import sys\n", + "import json\n", + "import argparse\n", + "import numpy as np\n", + "import time\n", + "import random\n", + "import string\n", + "import h5py\n", + "from tqdm import tqdm\n", + "import webdataset as wds\n", + "from PIL import Image\n", + "import pandas as pd\n", + "import nibabel as nib\n", + "import nilearn\n", + "\n", + "import matplotlib.pyplot as plt\n", + "import torch\n", + "import torch.nn as nn\n", + "from torchvision import transforms\n", + "\n", + "# tf32 data type is faster than standard float32\n", + "torch.backends.cuda.matmul.allow_tf32 = True\n", + "\n", + "import utils\n", + "from utils import load_preprocess_betas, resample, applyxfm, apply_thresh, resample_betas\n", + "\n", + "# imports utils from mindeye_preproc as \"preproc\"\n", + "import importlib.util\n", + "parent_utils_path = \"/home/ri4541/mindeye_preproc/analysis/utils.py\"\n", + "spec = importlib.util.spec_from_file_location(\"utils\", parent_utils_path)\n", + "preproc = importlib.util.module_from_spec(spec)\n", + "parent_dir = os.path.dirname(parent_utils_path)\n", + "if parent_dir not in sys.path:\n", + " sys.path.append(parent_dir)\n", + "spec.loader.exec_module(preproc)\n", + "\n", + "if utils.is_interactive():\n", + " from IPython.display import clear_output # function to clear print outputs in cell\n", + " %load_ext autoreload \n", + " # this allows you to change functions in models.py or utils.py and have this notebook automatically update with your revisions\n", + " %autoreload 2 \n", + " \n", + "seed = utils.get_slurm_seed()" + ] + }, + { + "cell_type": "markdown", + "id": "bae2b2ad-e1ef-4262-8263-6ae9a0766caa", + "metadata": {}, + "source": [ + "# Princeton data prep" + ] + }, + { + "cell_type": "markdown", + "id": "c6dbeabe-9e9c-4d8d-a8c3-414d79d14e63", + "metadata": {}, + "source": [ + "## Load Data & Design" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "0f2d14fc-bfe3-40dc-b14e-070812c43406", + "metadata": {}, + "outputs": [], + "source": [ + "if utils.is_interactive():\n", + " sub = \"sub-005\"\n", + " session = \"all\"\n", + " task = 'C' # 'study' or 'A'; used to search for functional run in bids format\n", + " func_task_name = 'C'\n", + "else:\n", + " sub = os.environ[\"sub\"]\n", + " session = os.environ[\"session\"]\n", + " task = os.environ[\"task\"]\n", + " func_task_name = 'C'\n", + "\n", + "if session == \"all\":\n", + " ses_list = [\"ses-01\", \"ses-02\", \"ses-03\"] # list of actual session IDs\n", + " design_ses_list = [\"ses-01\", \"ses-02\", \"ses-03\"] # list of session IDs to search for design matrix\n", + "else:\n", + " ses_list = [session]\n", + " design_ses_list = [session]\n", + " \n", + "task_name = f\"_task-{task}\" if task != 'study' else ''\n", + "resample_voxel_size = False\n", + "resample_post_glmsingle = False # do you want to do voxel resampling here? if resample_voxel_size = True and resample_post_glmsingle = False, assume the resampling has been done prior to GLMsingle, so just use resampled directory but otherwise proceed as normal\n", + "load_from_resampled_file = False # do you want to load resampled data from file? if True, assume resampling was done in this notebook before, and that we're not using the GLMsingle resampled data\n", + " \n", + "train_test_split = 'MST' # 'MST', 'orig', 'unique'\n", + "remove_close_to_MST = False\n", + "remove_random_n = False\n", + "\n", + "if remove_close_to_MST or remove_random_n:\n", + " assert remove_close_to_MST != remove_random_n # don't remove both sets of images\n", + "\n", + "n_to_remove = 0\n", + "if remove_random_n:\n", + " assert train_test_split == 'MST' # MST images are excluded from the n images removed, so only makes sense if they're not in the training set\n", + " n_to_remove = 150\n", + " \n", + "if resample_voxel_size:\n", + " # voxel size was unchanged in glmsingle, want to perform resampling here\n", + " resampled_vox_size = 2.5\n", + " resample_method = \"sinc\" # {trilinear,nearestneighbour,sinc,spline}, credit: https://johnmuschelli.com/fslr/reference/flirt.help.html\n", + " \n", + " # file name helper variables\n", + " vox_dim_str = str(resampled_vox_size).replace('.', '_') # in case the voxel size has a decimal, replace with an underscore\n", + " resampled_suffix = f\"resampled_{vox_dim_str}mm_{resample_method}\"\n", + " mask_resampled_suffix = resampled_suffix\n", + " if resample_post_glmsingle:\n", + " resampled_suffix += '_postglmsingle'\n", + " else:\n", + " resampled_suffix += '_preglmsingle'" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "67d4c658-b30e-4e82-b0d1-0731cc619751", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "session label: ses-01-03\n" + ] + } + ], + "source": [ + "session_label = preproc.get_session_label(ses_list)\n", + "print('session label:', session_label)\n", + "n_runs, _ = preproc.get_runs_per_session(sub, session, ses_list)" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "d5d98f76-1ae9-4880-93ec-cdc65e2b09a8", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/glmsingle_sub-005_ses-01-03_task-C\n", + "glmsingle path exists!\n" + ] + } + ], + "source": [ + "if utils.is_interactive():\n", + " glmsingle_path = f\"/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/glmsingle_{sub}_{session_label}_task-{task}\"\n", + "else:\n", + " glmsingle_path = os.environ[\"glmsingle_path\"]\n", + " \n", + "designdir = \"/home/ri4541/real_time_mindEye2\"\n", + "print(glmsingle_path)\n", + "\n", + "if resample_voxel_size:\n", + " # option 1: we are using original (non-resampled) GLMsingle outputs and doing the resampling here\n", + " # option 2: doing resampling pre-GLMsingle and using those outputs; no resampling involved here\n", + " if resample_post_glmsingle:\n", + " # option 1\n", + " orig_glmsingle_path = glmsingle_path\n", + " glmsingle_path += f\"_{resampled_suffix}\"\n", + " print(\"resampled glmsingle path:\", glmsingle_path)\n", + " if load_from_resampled_file:\n", + " # resampling is already done; load from file\n", + " assert os.path.exists(glmsingle_path) # the new directory must have been created if we reached here\n", + " else:\n", + " # don't load from file; do resampling here\n", + " os.makedirs(glmsingle_path,exist_ok=True)\n", + " else:\n", + " # option 2\n", + " glmsingle_path += f\"_{resampled_suffix}\"\n", + " print(\"glmsingle path:\", glmsingle_path)\n", + "\n", + "assert os.path.exists(glmsingle_path)\n", + "print(\"glmsingle path exists!\")" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "34c1e0c6-0641-4239-8201-f2c676532302", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Loading: /home/ri4541/real_time_mindEye2/csv/sub-005_ses-01.csv\n", + "Loading: /home/ri4541/real_time_mindEye2/csv/sub-005_ses-02.csv\n", + "Loading: /home/ri4541/real_time_mindEye2/csv/sub-005_ses-03.csv\n", + "Using design file: /home/ri4541/real_time_mindEye2/csv/sub-005_ses-03.csv\n", + "Total number of images: 2310\n", + "Number of unique images: 1470\n", + "n_runs 33\n", + "['all_stimuli/special515/special_15939.jpg'\n", + " 'all_stimuli/special515/special_23241.jpg'\n", + " 'all_stimuli/special515/special_32232.jpg'\n", + " 'all_stimuli/special515/special_34238.jpg']\n", + "[190.2773371 194.2907918 198.3011098 202.3095724]\n", + "[0. 0. 0. 0.]\n", + "(2079,)\n" + ] + } + ], + "source": [ + "data, starts, images, is_new_run, image_names, unique_images, len_unique_images = preproc.load_design_files(\n", + " sub=sub,\n", + " session=session,\n", + " func_task_name=task,\n", + " designdir=designdir,\n", + " design_ses_list=design_ses_list\n", + ")\n", + "\n", + "if sub == 'sub-001':\n", + " if session == 'ses-01':\n", + " assert image_names[0] == 'images/image_686_seed_1.png'\n", + " elif session in ('ses-02', 'all'):\n", + " assert image_names[0] == 'all_stimuli/special515/special_40840.jpg'\n", + " elif session == 'ses-03':\n", + " assert image_names[0] == 'all_stimuli/special515/special_69839.jpg'\n", + " elif session == 'ses-04':\n", + " assert image_names[0] == 'all_stimuli/rtmindeye_stimuli/image_686_seed_1.png'\n", + "elif sub == 'sub-003':\n", + " assert image_names[0] == 'all_stimuli/rtmindeye_stimuli/image_686_seed_1.png'\n", + "\n", + "unique_images = np.unique(image_names.astype(str))\n", + "unique_images = unique_images[(unique_images!=\"nan\")]\n", + "len_unique_images = len(unique_images)\n", + "print(\"n_runs\",n_runs)\n", + "\n", + "if (sub == 'sub-001' and session == 'ses-04') or (sub == 'sub-003' and session == 'ses-01'):\n", + " assert len(unique_images) == 851\n", + "\n", + "print(image_names[:4])\n", + "print(starts[:4])\n", + "print(is_new_run[:4])\n", + "\n", + "if remove_random_n:\n", + " # want to remove 150 imgs\n", + " # 100 special515 imgs are repeated 3x (300 total)\n", + " # all other train imgs are only shown once (558 total)\n", + " # of the 150, want to sample proportionally since we're cutting all repeats for special515\n", + " # so take out 51 (17 unique) from special515 and 99 from rest = removing 150 total\n", + " np.random.seed(seed)\n", + " options_to_remove = [x for x in set(image_names) if str(x) != 'nan' and x != 'blank.jpg' and 'MST_pairs' not in x and 'special515' not in x and list(image_names).count(x)==1] # all the imgs that only appear once (this is O(N^2) b/c of count() within list comprehension but image_names is a relatively small list)\n", + " options_to_remove_special515 = [x for x in set(image_names) if str(x) != 'nan' and x != 'blank.jpg' and 'MST_pairs' not in x and 'special515' in x and list(image_names).count(x)>1] # all the special515 images that are repeated (count()>1 necessary because there are special515 that are not repeated)\n", + " imgs_to_remove = np.random.choice(options_to_remove, size=99, replace=False)\n", + " imgs_to_remove = np.append(imgs_to_remove, np.random.choice(options_to_remove_special515, size=17, replace=False))\n", + "\n", + "image_idx = np.array([]) # contains the unique index of each presented image\n", + "vox_image_names = np.array([]) # contains the names of the images corresponding to image_idx\n", + "all_MST_images = dict()\n", + "for i, im in enumerate(image_names):\n", + " # skip if blank, nan\n", + " if im == \"blank.jpg\":\n", + " i+=1\n", + " continue\n", + " if str(im) == \"nan\":\n", + " i+=1\n", + " continue\n", + " vox_image_names = np.append(vox_image_names, im)\n", + " if remove_close_to_MST: # optionally skip close_to_MST images \n", + " if \"closest_pairs\" in im:\n", + " i+=1\n", + " continue\n", + " elif remove_random_n:\n", + " if im in imgs_to_remove:\n", + " i+=1\n", + " continue\n", + " \n", + " image_idx_ = np.where(im==unique_images)[0].item()\n", + " image_idx = np.append(image_idx, image_idx_)\n", + " \n", + " if (sub == 'sub-001' and session == 'ses-04') or (sub == 'sub-003' and session == 'ses-01'): # MST images are ones that matched these image titles\n", + " import re\n", + " if ('w_' in im or 'paired_image_' in im or re.match(r'all_stimuli/rtmindeye_stimuli/\\d{1,2}_\\d{1,3}\\.png$', im) or re.match(r'images/\\d{1,2}_\\d{1,3}\\.png$', im)): \n", + " # the regexp here looks for **_***.png, allows 1-2 chars before underscore and 1-3 chars after it\n", + " # print(im)\n", + " all_MST_images[i] = im\n", + " i+=1 \n", + " elif 'MST' in im:\n", + " all_MST_images[i] = im\n", + " i+=1\n", + " \n", + "image_idx = torch.Tensor(image_idx).long()\n", + "# for im in new_image_names[MST_images]:\n", + "# assert 'MST_pairs' in im\n", + "# assert len(all_MST_images) == 300\n", + "\n", + "unique_MST_images = np.unique(list(all_MST_images.values())) \n", + "\n", + "MST_ID = np.array([], dtype=int)\n", + "if remove_close_to_MST:\n", + " close_to_MST_idx = np.array([], dtype=int)\n", + "if remove_random_n:\n", + " random_n_idx = np.array([], dtype=int)\n", + "\n", + "vox_idx = np.array([], dtype=int)\n", + "j=0 # this is a counter keeping track of the remove_random_n used later to index vox based on the removed images; unused otherwise\n", + "for i, im in enumerate(image_names): # need unique_MST_images to be defined, so repeating the same loop structure\n", + " # skip if blank, nan\n", + " if im == \"blank.jpg\":\n", + " i+=1\n", + " continue\n", + " if str(im) == \"nan\":\n", + " i+=1\n", + " continue\n", + " if remove_close_to_MST: # optionally skip close_to_MST images \n", + " if \"closest_pairs\" in im:\n", + " close_to_MST_idx = np.append(close_to_MST_idx, i)\n", + " i+=1\n", + " continue\n", + " if remove_random_n:\n", + " if im in imgs_to_remove:\n", + " vox_idx = np.append(vox_idx, j)\n", + " i+=1\n", + " j+=1\n", + " continue\n", + " j+=1\n", + " curr = np.where(im == unique_MST_images)\n", + " # print(curr)\n", + " if curr[0].size == 0:\n", + " MST_ID = np.append(MST_ID, np.array(len(unique_MST_images))) # add a value that should be out of range based on the for loop, will index it out later\n", + " else:\n", + " MST_ID = np.append(MST_ID, curr)\n", + " \n", + "assert len(MST_ID) == len(image_idx)\n", + "# assert len(np.argwhere(pd.isna(data['current_image']))) + len(np.argwhere(data['current_image'] == 'blank.jpg')) + len(image_idx) == len(data)\n", + "# MST_ID = torch.tensor(MST_ID[MST_ID != len(unique_MST_images)], dtype=torch.uint8) # torch.tensor (lowercase) allows dtype kwarg, Tensor (uppercase) is an alias for torch.FloatTensor\n", + "print(MST_ID.shape)\n", + "if (sub == 'sub-001' and session == 'ses-04') or (sub == 'sub-003' and session == 'ses-01'):\n", + " assert len(all_MST_images) == 100" + ] + }, + { + "cell_type": "markdown", + "id": "e48ffe08-71ec-4a3f-9371-66fed2c21de4", + "metadata": { + "tags": [] + }, + "source": [ + "## Load images" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2ceb404f-b04f-42b6-afc4-283bb2b40c08", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + " 0%| | 0/2079 [00:00 1:\n", + " session_length = 693\n", + " for image, session_indices_list in image_to_indices.items():\n", + " new_indices_list = []\n", + " for idx, indices in enumerate(session_indices_list):\n", + " offset = idx * session_length\n", + " new_indices = [i + offset for i in indices]\n", + " new_indices_list.append(new_indices)\n", + " image_to_indices[image] = new_indices_list\n", + " \n", + " import itertools\n", + " assert max(itertools.chain.from_iterable(list(image_to_indices.values())))[0] == (len(ses_list)*session_length) - 1" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "df24c635-8019-4fc9-b6c9-54c86d6eeb58", + "metadata": {}, + "outputs": [], + "source": [ + "if resample_voxel_size:\n", + " from nilearn.masking import apply_mask, unmask\n", + " ref_name = f'{glmsingle_path}/boldref_resampled.nii.gz'\n", + " omat_name = f'{glmsingle_path}/boldref_omat'" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e846060a-d9c0-43d3-824b-08fa2b4b354e", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "from nilearn.plotting import plot_roi\n", + "\n", + "print('loading brain mask')\n", + "avg_mask = nib.load('/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/glmsingle_sub-005_task-C/sub-005_final_brain.nii.gz')\n", + "final_mask = nib.load('/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/glmsingle_sub-005_task-C/sub-005_final_mask.nii.gz')\n", + "\n", + "# mask info\n", + "dimsize=avg_mask.header.get_zooms()\n", + "affine_mat = avg_mask.affine\n", + "brain=avg_mask.get_fdata()\n", + "xyz=brain.shape #xyz dimensionality of brain mask and epi data\n", + "\n", + "print('Mask dimensions:', dimsize)\n", + "print('')\n", + "print('Affine:')\n", + "print(affine_mat)\n", + "print('')\n", + "print(f'There are {int(np.sum(brain))} voxels in the included brain mask\\n')\n", + "\n", + "plot_roi(final_mask, bg_img=avg_mask)\n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "89eebb1f-bf55-44f5-b875-6d466182c03f", + "metadata": {}, + "outputs": [], + "source": [ + "# # create union of ses-01 and ses-02 reliability masks and plot against avg_mask \n", + "# rel_masks = []\n", + "# rel_masks.append(np.load('/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/glmsingle_sub-005_task-C/rel_mask_from_ses-01_to_ses-03.npy'))\n", + "# rel_masks.append(np.load('/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/glmsingle_sub-005_task-C/rel_mask_from_ses-02_to_ses-03.npy'))\n", + "# rel_masks = np.array(rel_masks)\n", + "# for r in rel_masks:\n", + "# assert r.shape[0] == int(final_mask.get_fdata().sum())\n", + "# assert r.dtype == bool\n", + " \n", + "# assert len(rel_masks) == 2 # should be the case if there's 2 training sessions\n", + "# union_mask = np.logical_or(rel_masks[0], rel_masks[1])\n", + "# assert union_mask.sum() > rel_masks[0].sum()\n", + "# assert union_mask.sum() > rel_masks[1].sum()\n", + "# print(f'there are {union_mask.sum()} reliable voxels based on the union mask out of {int(final_mask.get_fdata().sum())} voxels in the nsdgeneral roi')\n", + "# print(f'{(union_mask.sum() / int(final_mask.get_fdata().sum())):.2%} of the voxels in the roi were selected')\n", + "# path = f'/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/glmsingle_sub-005_task-C/union_mask_from_{session_label}.npy'\n", + "path = f'/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/glmsingle_sub-005_task-C/union_mask_from_ses-01-02.npy'\n", + "# np.save(f'/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/glmsingle_sub-005_task-C/union_mask_from_{session_label}.npy', union_mask)\n", + "# print(f'saved union mask to {path}!')\n", + "union_mask = np.load(path)" + ] + }, + { + "cell_type": "markdown", + "id": "7804edab-5dc2-4499-8a91-91d77f78bd77", + "metadata": {}, + "source": [ + "## Load GLMSingle voxel data" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "779861de-8628-49ad-8cec-c14d285d2ea3", + "metadata": {}, + "outputs": [], + "source": [ + "ses_mask = []\n", + "\n", + "for s in ses_list:\n", + " ses_mask_path = f'/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/glmsingle_sub-005_{s}_task-C/sub-005_{s}_task-C_brain.nii.gz'\n", + " ses_mask.append(nib.load(ses_mask_path))\n", + " \n", + " assert np.all(ses_mask[-1].affine == final_mask.affine)\n", + " assert np.all(ses_mask[-1].shape == final_mask.shape)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9d273f33-cb38-4045-9ff0-65edc3d35060", + "metadata": {}, + "outputs": [], + "source": [ + "ses_vox = []\n", + "vox = None\n", + "needs_postprocessing = False\n", + "params = (session, ses_list, remove_close_to_MST, image_names, remove_random_n, vox_idx)\n", + "\n", + "if resample_post_glmsingle == True:\n", + " glm_save_path_resampled = f\"{glmsingle_path}/vox_resampled.nii.gz\"\n", + " if load_from_resampled_file == True:\n", + " # resampling was done in this notebook so we can load from file\n", + " vox = nib.load(glm_save_path_resampled)\n", + " else:\n", + " # do resampling here\n", + " assert os.path.exists(ref_name) and os.path.exists(omat_name), \"need to generate the boldref and omat separately since we don't have access to the functional data here; either do so using flirt on the command line or copy over the glmsingle resampled outputs\"\n", + " vox = load_preprocess_betas(orig_glmsingle_path, *params)\n", + " vox = resample_betas(orig_glmsingle_path, sub, session, task_name, vox, glmsingle_path, glm_save_path_resampled, ref_name, omat_name)\n", + " needs_postprocessing = True\n", + "\n", + "if vox is None: \n", + " for i, s in enumerate(ses_list):\n", + " # either resampling was done in glmsingle or we aren't resampling \n", + " ses_vox_path = f'/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/glmsingle_sub-005_{s}_task-C'\n", + " assert os.path.exists(ses_vox_path)\n", + " ses_vox.append(load_preprocess_betas(ses_vox_path, *params))\n", + " v = nilearn.masking.unmask(ses_vox[i], ses_mask[i])\n", + " ses_vox[i] = nilearn.masking.apply_mask(v, final_mask)\n", + " vox = np.concatenate(ses_vox)\n", + " print(\"applied final brain mask\")\n", + " print(vox.shape)\n", + " vox = vox[:, union_mask]\n", + " print(\"applied union roi mask\")\n", + " print(vox.shape)\n", + " \n", + " \n", + "if needs_postprocessing == True:\n", + " vox = apply_mask(vox, avg_mask)\n", + " vox = vox.reshape(-1, vox.shape[-1]) # flatten the 3D image into np array with shape (voxels, images)\n", + " print(vox.shape)\n", + "\n", + "assert len(vox) == len(image_idx)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9d7d1933-b828-4312-bb36-8e6a1fc2394d", + "metadata": {}, + "outputs": [], + "source": [ + "# # get vox into the same shape as the union mask\n", + "# v = nilearn.masking.unmask(vox, ses_mask) # move back to 3D based on own session mask\n", + "# final_mask = nilearn.masking.intersect_masks([avg_mask, roi])\n", + "# vox = nilearn.masking.apply_mask(vox, final_mask) # re-flatten based on final mask so everything is in the same shape now\n", + "# print(vox.shape)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "028998bc-0a34-4820-b68c-e428d2bc4b8a", + "metadata": {}, + "outputs": [], + "source": [ + "pairs_homog = np.array([[p[0], p[1]] for p in pairs])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c087978f-b316-44f4-a2af-c17b5a00764b", + "metadata": {}, + "outputs": [], + "source": [ + "same_corrs = []\n", + "diff_corrs = []\n", + "for isamp, samp in enumerate(vox[pairs_homog]):\n", + " avg_same_img = []\n", + " for i in range(samp.shape[0]):\n", + " for j in range(i, samp.shape[0]):\n", + " if i != j:\n", + " avg_same_img.append(np.array([np.corrcoef(samp[i, :], samp[j, :])[0,1]]))\n", + " \n", + " same_corrs.append(np.mean(avg_same_img))\n", + " \n", + " avg_diff_img = []\n", + " for isamp_j, samp_j in enumerate(vox[pairs_homog]):\n", + " if isamp_j != isamp:\n", + " for i in range(samp_j.shape[0]):\n", + " for j in range(i, samp_j.shape[0]):\n", + " if i != j:\n", + " avg_diff_img.append(np.array([np.corrcoef(samp[i, :], samp_j[j, :])[0,1]]))\n", + " \n", + " # print(len(avg_diff_img))\n", + " diff_corrs.append(np.mean(avg_diff_img))\n", + "\n", + "\n", + "print(len(same_corrs), len(diff_corrs))\n", + "same_corrs = np.array(same_corrs)\n", + "diff_corrs = np.array(diff_corrs)\n", + "\n", + "\n", + "plt.figure(figsize=(5,4))\n", + "plt.title(f\"{sub}_{session} same/diff Pearson corr.\")\n", + "plt.plot(np.sort(same_corrs),c='blue',label='same')\n", + "plt.plot(np.sort(diff_corrs),c='cyan',label='diff')\n", + "plt.axhline(0,c='k',ls='--')\n", + "plt.legend()\n", + "plt.xlabel(\"sample\")\n", + "plt.ylabel(\"Pearson R\")\n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "31646431-10ac-4820-ba5d-c35f1e104557", + "metadata": {}, + "outputs": [], + "source": [ + "vox_pairs = utils.zscore(vox[pairs_homog])\n", + "plt.figure(figsize=(5,4))\n", + "plt.title(f\"{sub}_{session} same minus diff difference Pearson corr.\")\n", + "plt.plot(np.sort(same_corrs) - np.sort(diff_corrs),c='cyan',label='difference')\n", + "plt.axhline(0,c='k',ls='--')\n", + "plt.legend()\n", + "plt.xlabel(\"sample\")\n", + "plt.ylabel(\"Pearson R\")\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "id": "a8866ce2-1cf2-459e-aa81-dec26a3dcd33", + "metadata": {}, + "source": [ + "# Training MindEye" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8f554db1-f7cd-40d2-ab62-5d1e282c2bc8", + "metadata": {}, + "outputs": [], + "source": [ + "utils.seed_everything(seed)\n", + "\n", + "if train_test_split == 'orig':\n", + " # train = all images except images that were repeated\n", + " # test = average of the same-image presentations\n", + " imageTrain = np.arange(len(images))\n", + " train_image_indices = np.array([item for item in imageTrain if item not in pairs.flatten()])\n", + " test_image_indices = pairs\n", + " print(len(train_image_indices), len(test_image_indices))\n", + " assert len(train_image_indices) + len(test_image_indices) == len(image_idx)\n", + "elif train_test_split == 'MST':\n", + " # non-MST images are the train split\n", + " # MST images are the test split\n", + " MST_idx = np.array([v for k,v in image_to_indices.items() if 'MST_pairs' in k])\n", + " non_MST_idx = [v for k,v in image_to_indices.items() if 'MST_pairs' not in k]\n", + " non_MST_idx = np.array([z for y in non_MST_idx for x in y for z in x]) # flatten the indices\n", + " train_image_indices = non_MST_idx\n", + " test_image_indices = MST_idx.flatten() # MST_idx contains the mapping for the different test sets; test_image_indices has all MST indices combined\n", + " print(len(train_image_indices), len(test_image_indices))\n", + " assert len(train_image_indices) + len(test_image_indices) == len(vox)\n", + "elif train_test_split == 'unique':\n", + " imageTest = np.arange(len(images))\n", + " train_image_indices = pairs.flatten()\n", + " test_image_indices = np.array([item for item in imageTest if item not in pairs.flatten()])\n", + " print(len(train_image_indices), len(test_image_indices))\n", + " assert len(train_image_indices) + len(test_image_indices) == len(image_idx)\n", + "else:\n", + " raise Exception(\"invalid train_test_split\")\n", + "\n", + "# TODO add assertion that verifies file names in train and test don't overlap, guards against repeats\n", + "\n", + "for i in train_image_indices:\n", + " assert i not in test_image_indices" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "590f2b4b-db7c-42a1-bfd0-cc578e6af988", + "metadata": {}, + "outputs": [], + "source": [ + "ses_split = vox[train_image_indices].shape[0] // 2\n", + "\n", + "train_mean_s1 = np.mean(vox[train_image_indices][:ses_split], axis=0)\n", + "train_std_s1 = np.std(vox[train_image_indices][:ses_split], axis=0)\n", + "train_mean_s2 = np.mean(vox[train_image_indices][ses_split:], axis=0)\n", + "train_std_s2 = np.std(vox[train_image_indices][ses_split:], axis=0)\n", + "\n", + "\n", + "vox[:ses_split] = utils.zscore(vox[:ses_split],train_mean=train_mean_s1,train_std=train_std_s1)\n", + "vox[ses_split:] = utils.zscore(vox[ses_split:],train_mean=train_mean_s2,train_std=train_std_s2)\n", + "\n", + "print(\"voxels have been zscored\")\n", + "print(\"ses-01:\", vox[:ses_split,0].mean(), vox[:ses_split,0].std())\n", + "print(\"ses-02:\", vox[ses_split:,0].mean(), vox[ses_split:,0].std())\n", + "print(\"vox\", vox.shape)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ddf00e76-51ee-4e9e-8e38-c09fba22f103", + "metadata": {}, + "outputs": [], + "source": [ + "# save the mean and std from ses-01 and 02\n", + "train_test_mean_s1 = np.mean(vox[:ses_split], axis=0)\n", + "train_test_std_s1 = np.std(vox[:ses_split], axis=0)\n", + "train_test_mean_s2 = np.mean(vox[ses_split:], axis=0)\n", + "train_test_std_s2 = np.std(vox[ses_split:], axis=0)\n", + "print(train_test_mean_s1.shape)\n", + "assert np.all(train_test_mean_s1.shape == train_test_std_s1.shape)\n", + "assert np.all(train_test_mean_s1.shape == train_test_mean_s2.shape)\n", + "assert np.all(train_test_mean_s1.shape == train_test_std_s2.shape)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ea7db416-e12f-4678-bd6e-74040261d0d1", + "metadata": {}, + "outputs": [], + "source": [ + "# for idx in deleted_indices:\n", + "# # check image names to be deleted match\n", + "# original_name = vox_image_dict[idx]\n", + "# matching_indices = [i for i in deleted_indices if vox_image_dict[i] == original_name]\n", + "# assert all(vox_image_dict[i] == original_name for i in matching_indices), \\\n", + "# f\"Mismatch in image names for deleted indices {matching_indices}\"\n", + "\n", + "# # check image data to be deleted match\n", + "# base_image = images[matching_indices[0]] # Reference image\n", + "# for i in matching_indices[1:]:\n", + "# assert np.array_equal(base_image, images[i]), \\\n", + "# f\"Mismatch in image data for {vox_image_dict[i]} at index {i}\"\n", + "\n", + "# images = images[kept_indices]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b02840f9-2836-4e64-9c86-ad255da4b2cc", + "metadata": {}, + "outputs": [], + "source": [ + "images = torch.Tensor(images)\n", + "vox = torch.Tensor(vox)\n", + "assert len(images) == len(vox)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "cc5d2e32-6027-4a19-bef4-5ca068db35bb", + "metadata": {}, + "outputs": [], + "source": [ + "### Multi-GPU config ###\n", + "from accelerate import Accelerator, DeepSpeedPlugin\n", + "\n", + "local_rank = os.getenv('RANK')\n", + "if local_rank is None: \n", + " local_rank = 0\n", + "else:\n", + " local_rank = int(local_rank)\n", + "print(\"LOCAL RANK \", local_rank) \n", + "\n", + "data_type = torch.float32 # change depending on your mixed_precision\n", + "\n", + "accelerator = Accelerator(split_batches=False)\n", + "batch_size = 8 " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b767ab6f-d4a9-47a5-b3bf-f56bf6760c0c", + "metadata": {}, + "outputs": [], + "source": [ + "print(\"PID of this process =\",os.getpid())\n", + "device = accelerator.device\n", + "print(\"device:\",device)\n", + "world_size = accelerator.state.num_processes\n", + "distributed = not accelerator.state.distributed_type == 'NO'\n", + "num_devices = torch.cuda.device_count()\n", + "global_batch_size = batch_size * num_devices\n", + "print(\"global_batch_size\", global_batch_size)\n", + "if num_devices==0 or not distributed: num_devices = 1\n", + "num_workers = num_devices\n", + "print(accelerator.state)\n", + "\n", + "# set data_type to match your mixed precision (automatically set based on deepspeed config)\n", + "if accelerator.mixed_precision == \"bf16\":\n", + " data_type = torch.bfloat16\n", + "elif accelerator.mixed_precision == \"fp16\":\n", + " data_type = torch.float16\n", + "else:\n", + " data_type = torch.float32\n", + "\n", + "print(\"distributed =\",distributed, \"num_devices =\", num_devices, \"local rank =\", local_rank, \"world size =\", world_size, \"data_type =\", data_type)\n", + "print = accelerator.print # only print if local_rank=0" + ] + }, + { + "cell_type": "markdown", + "id": "9018b82b-c054-4463-9527-4b0c2a75bda6", + "metadata": { + "tags": [] + }, + "source": [ + "## Configurations" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2b61fec7-72a0-4b67-86da-1375f1d9fbd3", + "metadata": {}, + "outputs": [], + "source": [ + "# if running this interactively, can specify jupyter_args here for argparser to use\n", + "if utils.is_interactive():\n", + " model_name = 'testing_MST' # 'sub-001_multi_bs24_MST_rishab_MSTsplit_remove_150_random_seed_0'\n", + " print(\"model_name:\", model_name)\n", + " \n", + " # global_batch_size and batch_size should already be defined in the above cells\n", + " # other variables can be specified in the following string:\n", + " # jupyter_args = f\"--data_path=/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2 --model_name={model_name}\"\n", + "\n", + " jupyter_args = f\"--data_path=/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2 \\\n", + " --model_name={model_name} \\\n", + " --no-multi_subject --subj=1 --batch_size={batch_size} \\\n", + " --hidden_dim=1024 --clip_scale=1. \\\n", + " --no-blurry_recon --blur_scale=.5 \\\n", + " --no-use_prior --prior_scale=30 \\\n", + " --n_blocks=4 --max_lr=3e-4 --mixup_pct=.33 --num_epochs=30 --no-use_image_aug \\\n", + " --ckpt_interval=999 --no-ckpt_saving --new_test \\\n", + " --multisubject_ckpt=/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/train_logs/multisubject_subj01_1024hid_nolow_300ep\"\n", + " print(jupyter_args)\n", + " jupyter_args = jupyter_args.split()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2028bdf0-2f41-46d9-b6e7-86b870dbf16c", + "metadata": {}, + "outputs": [], + "source": [ + "parser = argparse.ArgumentParser(description=\"Model Training Configuration\")\n", + "parser.add_argument(\n", + " \"--model_name\", type=str, default=\"testing\",\n", + " help=\"name of model, used for ckpt saving and wandb logging (if enabled)\",\n", + ")\n", + "parser.add_argument(\n", + " \"--data_path\", type=str, default=\"/weka/proj-fmri/shared/natural-scenes-dataset\",\n", + " help=\"Path to where NSD data is stored / where to download it to\",\n", + ")\n", + "parser.add_argument(\n", + " \"--subj\",type=int, default=1, choices=[1,2,3,4,5,6,7,8],\n", + " help=\"Validate on which subject?\",\n", + ")\n", + "parser.add_argument(\n", + " \"--multisubject_ckpt\", type=str, default=None,\n", + " help=\"Path to pre-trained multisubject model to finetune a single subject from. multisubject must be False.\",\n", + ")\n", + "parser.add_argument(\n", + " \"--num_sessions\", type=int, default=0,\n", + " help=\"Number of training sessions to include (if multi_subject, this variable doesnt matter)\",\n", + ")\n", + "parser.add_argument(\n", + " \"--use_prior\",action=argparse.BooleanOptionalAction,default=False,\n", + " help=\"whether to train diffusion prior (True) or just rely on retrieval part of the pipeline (False)\",\n", + ")\n", + "parser.add_argument(\n", + " \"--batch_size\", type=int, default=32,\n", + " help=\"Batch size can be increased by 10x if only training v2c and not diffusion diffuser\",\n", + ")\n", + "parser.add_argument(\n", + " \"--wandb_log\",action=argparse.BooleanOptionalAction,default=False,\n", + " help=\"whether to log to wandb\",\n", + ")\n", + "parser.add_argument(\n", + " \"--resume_from_ckpt\",action=argparse.BooleanOptionalAction,default=False,\n", + " help=\"if not using wandb and want to resume from a ckpt\",\n", + ")\n", + "parser.add_argument(\n", + " \"--wandb_project\",type=str,default=\"stability\",\n", + " help=\"wandb project name\",\n", + ")\n", + "parser.add_argument(\n", + " \"--mixup_pct\",type=float,default=.33,\n", + " help=\"proportion of way through training when to switch from BiMixCo to SoftCLIP\",\n", + ")\n", + "parser.add_argument(\n", + " \"--low_mem\",action=argparse.BooleanOptionalAction,default=False,\n", + " help=\"whether to preload images to cpu to speed things up but consume more memory\",\n", + ")\n", + "parser.add_argument(\n", + " \"--blurry_recon\",action=argparse.BooleanOptionalAction,default=True,\n", + " help=\"whether to output blurry reconstructions\",\n", + ")\n", + "parser.add_argument(\n", + " \"--blur_scale\",type=float,default=.5,\n", + " help=\"multiply loss from blurry recons by this number\",\n", + ")\n", + "parser.add_argument(\n", + " \"--clip_scale\",type=float,default=1.,\n", + " help=\"multiply contrastive loss by this number\",\n", + ")\n", + "parser.add_argument(\n", + " \"--prior_scale\",type=float,default=30,\n", + " help=\"multiply diffusion prior loss by this\",\n", + ")\n", + "parser.add_argument(\n", + " \"--use_image_aug\",action=argparse.BooleanOptionalAction,default=True,\n", + " help=\"whether to use image augmentation\",\n", + ")\n", + "parser.add_argument(\n", + " \"--num_epochs\",type=int,default=120,\n", + " help=\"number of epochs of training\",\n", + ")\n", + "parser.add_argument(\n", + " \"--multi_subject\",action=argparse.BooleanOptionalAction,default=False,\n", + ")\n", + "parser.add_argument(\n", + " \"--new_test\",action=argparse.BooleanOptionalAction,default=True,\n", + ")\n", + "parser.add_argument(\n", + " \"--n_blocks\",type=int,default=2,\n", + ")\n", + "parser.add_argument(\n", + " \"--hidden_dim\",type=int,default=1024,\n", + ")\n", + "parser.add_argument(\n", + " \"--seq_past\",type=int,default=0,\n", + ")\n", + "parser.add_argument(\n", + " \"--seq_future\",type=int,default=0,\n", + ")\n", + "parser.add_argument(\n", + " \"--lr_scheduler_type\",type=str,default='cycle',choices=['cycle','linear'],\n", + ")\n", + "parser.add_argument(\n", + " \"--ckpt_saving\",action=argparse.BooleanOptionalAction,default=True,\n", + ")\n", + "parser.add_argument(\n", + " \"--ckpt_interval\",type=int,default=5,\n", + " help=\"save backup ckpt and reconstruct every x epochs\",\n", + ")\n", + "parser.add_argument(\n", + " \"--seed\",type=int,default=42,\n", + ")\n", + "parser.add_argument(\n", + " \"--max_lr\",type=float,default=3e-4,\n", + ")\n", + "\n", + "if utils.is_interactive():\n", + " args = parser.parse_args(jupyter_args)\n", + "else:\n", + " args = parser.parse_args()\n", + "\n", + "# create global variables without the args prefix\n", + "for attribute_name in vars(args).keys():\n", + " globals()[attribute_name] = getattr(args, attribute_name)\n", + " \n", + "outdir = os.path.abspath(f'/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/train_logs/{model_name}')\n", + "if not os.path.exists(outdir) and ckpt_saving:\n", + " os.makedirs(outdir,exist_ok=True)\n", + " \n", + "if use_image_aug or blurry_recon:\n", + " import kornia\n", + " import kornia.augmentation as K\n", + " from kornia.augmentation.container import AugmentationSequential\n", + "if use_image_aug:\n", + " img_augment = AugmentationSequential(\n", + " kornia.augmentation.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.1, p=0.3),\n", + " same_on_batch=False,\n", + " data_keys=[\"input\"],\n", + " )\n", + " # Define the blurring augmentations\n", + " blur_augment = K.RandomGaussianBlur(kernel_size=(21, 21), sigma=(51.0, 51.0), p=1.)\n", + " \n", + "if multi_subject:\n", + " subj_list = np.arange(1,9)\n", + " subj_list = subj_list[subj_list != subj]\n", + "else:\n", + " subj_list = [subj]\n", + "\n", + "print(\"subj_list\", subj_list, \"num_sessions\", num_sessions)" + ] + }, + { + "cell_type": "markdown", + "id": "42d13c25-1369-4c49-81d4-83d713586096", + "metadata": { + "tags": [] + }, + "source": [ + "## Prep data, models, and dataloaders" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a6660667-94bc-4829-8310-227042febd4c", + "metadata": {}, + "outputs": [], + "source": [ + "if ckpt_saving:\n", + " # save MST_ID for 2-alternative forced-choice retrieval evaluation \n", + " if 'MST' in model_name:\n", + " eval_dir = os.environ[\"eval_dir\"]\n", + " print('saving MST info in', eval_dir)\n", + " # Saving ##\n", + " if not os.path.exists(eval_dir):\n", + " os.mkdir(eval_dir)\n", + "\n", + " np.save(f\"{eval_dir}/MST_ID.npy\", MST_ID)\n", + " np.save(f\"{eval_dir}/MST_pairmate_indices.npy\", MST_pairmate_indices)\n", + "\n", + " if remove_random_n:\n", + " np.save(f\"{eval_dir}/imgs_to_remove.npy\", imgs_to_remove)\n", + "\n", + " np.save(f\"{eval_dir}/train_image_indices.npy\", train_image_indices)\n", + " np.save(f\"{eval_dir}/test_image_indices.npy\", test_image_indices)\n", + " np.save(f\"{eval_dir}/images.npy\", images)\n", + " np.save(f\"{eval_dir}/vox.npy\", vox)\n", + " \n", + " np.save(f'{eval_dir}/train_test_mean_s1.npy', train_test_mean_s1)\n", + " np.save(f'{eval_dir}/train_test_std_s1.npy', train_test_std_s1)\n", + " np.save(f'{eval_dir}/train_test_mean_s2.npy', train_test_mean_s2)\n", + " np.save(f'{eval_dir}/train_test_std_s2.npy', train_test_std_s2)" + ] + }, + { + "cell_type": "markdown", + "id": "1c023f24-5233-4a15-a2f5-78487b3a8546", + "metadata": {}, + "source": [ + "### Creating wds dataloader, preload betas and all 73k possible images" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "aefe7c27-ab39-4b2c-90f4-480f4087b7ab", + "metadata": {}, + "outputs": [], + "source": [ + "def my_split_by_node(urls): return urls\n", + "num_voxels_list = []\n", + "\n", + "if multi_subject:\n", + " nsessions_allsubj=np.array([40, 40, 32, 30, 40, 32, 40, 30])\n", + " num_samples_per_epoch = (750*40) // num_devices \n", + "else:\n", + " # num_samples_per_epoch = (750*num_sessions) // num_devices \n", + " num_samples_per_epoch = len(train_image_indices)\n", + "\n", + "print(\"dividing batch size by subj_list, which will then be concatenated across subj during training...\") \n", + "batch_size = batch_size // len(subj_list)\n", + "\n", + "num_iterations_per_epoch = num_samples_per_epoch // (batch_size*len(subj_list))\n", + "\n", + "print(\"batch_size =\", batch_size, \"num_iterations_per_epoch =\",num_iterations_per_epoch, \"num_samples_per_epoch =\",num_samples_per_epoch)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e1942b0e-1223-40e6-b543-2f7ff2e8ebcd", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "train_data = {}\n", + "train_dl = {}\n", + "\n", + "train_data[f'subj0{subj}'] = torch.utils.data.TensorDataset(torch.tensor(train_image_indices))\n", + "test_data = torch.utils.data.TensorDataset(torch.tensor(test_image_indices))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "81084834-035f-4465-ad59-59e6b806a2f5", + "metadata": {}, + "outputs": [], + "source": [ + "num_voxels = {}\n", + "voxels = {}\n", + "for s in subj_list:\n", + " print(f\"Training with {num_sessions} sessions\")\n", + " train_dl = torch.utils.data.DataLoader(train_data[f'subj0{s}'], batch_size=batch_size, shuffle=True, drop_last=True, pin_memory=True)\n", + "\n", + " num_voxels_list.append(vox[0].shape[-1])\n", + " num_voxels[f'subj0{s}'] = vox[0].shape[-1]\n", + " voxels[f'subj0{s}'] = vox\n", + " print(f\"num_voxels for subj0{s}: {num_voxels[f'subj0{s}']}\")\n", + "\n", + "print(\"Loaded all subj train dls and vox!\\n\")\n", + "\n", + "# Validate only on one subject\n", + "if multi_subject: \n", + " subj = subj_list[0] # cant validate on the actual held out person so picking first in subj_list\n", + "test_dl = torch.utils.data.DataLoader(test_data, batch_size=24, shuffle=False, drop_last=True, pin_memory=True)\n", + "\n", + "print(f\"Loaded test dl for subj{subj}!\\n\")" + ] + }, + { + "cell_type": "markdown", + "id": "10ec4517-dbdf-4ece-98f6-4714d5de4e15", + "metadata": {}, + "source": [ + "## Load models" + ] + }, + { + "cell_type": "markdown", + "id": "48d6160e-1ee8-4da7-a755-9dbb452a6fa5", + "metadata": {}, + "source": [ + "### CLIP image embeddings model" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b0420dc0-199e-4c1a-857d-b1747058b467", + "metadata": {}, + "outputs": [], + "source": [ + "## USING OpenCLIP ViT-bigG ###\n", + "sys.path.append('generative_models/')\n", + "import sgm\n", + "from generative_models.sgm.modules.encoders.modules import FrozenOpenCLIPImageEmbedder\n", + "# from generative_models.sgm.models.diffusion import DiffusionEngine\n", + "# from omegaconf import OmegaConf\n", + "\n", + "try:\n", + " print(clip_img_embedder)\n", + "except:\n", + " clip_img_embedder = FrozenOpenCLIPImageEmbedder(\n", + " arch=\"ViT-bigG-14\",\n", + " version=\"laion2b_s39b_b160k\",\n", + " output_tokens=True,\n", + " only_tokens=True,\n", + " )\n", + " clip_img_embedder.to(device)\n", + "clip_seq_dim = 256\n", + "clip_emb_dim = 1664\n", + "\n", + "# ## USING OPEN AI CLIP ViT-L ###\n", + "# import clip\n", + "# try:\n", + "# print(clip_model)\n", + "# except:\n", + "# clip_model, preprocess = clip.load(\"ViT-L/14\", device=device)\n", + "# preprocess = transforms.Compose([\n", + "# transforms.Resize(224, interpolation=transforms.InterpolationMode.BILINEAR),\n", + "# transforms.Normalize(mean=[0.48145466, 0.4578275, 0.40821073],\n", + "# std=[0.26862954, 0.26130258, 0.27577711]),\n", + "# ])\n", + "# def clip_img_embedder(image):\n", + "# preproc_img = preprocess(image)\n", + "# return clip_model.encode_image(preproc_img)\n", + "# clip_seq_dim = 1\n", + "# clip_emb_dim = 768" + ] + }, + { + "cell_type": "markdown", + "id": "260e5e4a-f697-4b2c-88fc-01f6a54886c0", + "metadata": {}, + "source": [ + "### MindEye modules" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "22850c04-3811-4d52-ad2f-48c7be1366e8", + "metadata": {}, + "outputs": [], + "source": [ + "model = utils.prepare_model_and_training(\n", + " num_voxels_list=num_voxels_list,\n", + " n_blocks=n_blocks,\n", + " hidden_dim=hidden_dim,\n", + " clip_emb_dim=clip_emb_dim,\n", + " clip_seq_dim=clip_seq_dim,\n", + " use_prior=use_prior,\n", + " clip_scale=clip_scale\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "038a5d61-4769-40b9-a004-f4e7b5b38bb0", + "metadata": {}, + "outputs": [], + "source": [ + "# test on subject 1 with fake data\n", + "b = torch.randn((2,1,num_voxels_list[0]))\n", + "print(b.shape, model.ridge(b,0).shape)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7b8de65a-6d3b-4248-bea9-9b6f4d562321", + "metadata": {}, + "outputs": [], + "source": [ + "# test that the model works on some fake data\n", + "b = torch.randn((2,1,hidden_dim))\n", + "print(\"b.shape\",b.shape)\n", + "\n", + "backbone_, clip_, blur_ = model.backbone(b)\n", + "print(backbone_.shape, clip_.shape, blur_[0].shape, blur_[1].shape)" + ] + }, + { + "cell_type": "markdown", + "id": "b397c0d7-52a3-4153-823b-c27d2eb3eeba", + "metadata": {}, + "source": [ + "### Adding diffusion prior + unCLIP if use_prior=True" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "69965344-9346-4592-9cc5-e537e31d5fce", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "if use_prior:\n", + " from models import *\n", + "\n", + " # setup diffusion prior network\n", + " out_dim = clip_emb_dim\n", + " depth = 6\n", + " dim_head = 52\n", + " heads = clip_emb_dim//52 # heads * dim_head = clip_emb_dim\n", + " timesteps = 100\n", + "\n", + " prior_network = VersatileDiffusionPriorNetwork(\n", + " dim=out_dim,\n", + " depth=depth,\n", + " dim_head=dim_head,\n", + " heads=heads,\n", + " causal=False,\n", + " num_tokens = clip_seq_dim,\n", + " learned_query_mode=\"pos_emb\"\n", + " )\n", + "\n", + " model.diffusion_prior = BrainDiffusionPrior(\n", + " net=prior_network,\n", + " image_embed_dim=out_dim,\n", + " condition_on_text_encodings=False,\n", + " timesteps=timesteps,\n", + " cond_drop_prob=0.2,\n", + " image_embed_scale=None,\n", + " )\n", + " \n", + " utils.count_params(model.diffusion_prior)\n", + " utils.count_params(model)" + ] + }, + { + "cell_type": "markdown", + "id": "ec25271a-2209-400c-8026-df3b8ddc1eef", + "metadata": {}, + "source": [ + "### Setup optimizer / lr / ckpt saving" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e14d0482-dc42-43b9-9ce1-953c32f2c9c1", + "metadata": {}, + "outputs": [], + "source": [ + "no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']\n", + "\n", + "opt_grouped_parameters = [\n", + " {'params': [p for n, p in model.ridge.named_parameters()], 'weight_decay': 1e-2},\n", + " {'params': [p for n, p in model.backbone.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': 1e-2},\n", + " {'params': [p for n, p in model.backbone.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0},\n", + "]\n", + "# model.backbone.requires_grad_(False)\n", + "\n", + "if use_prior:\n", + " opt_grouped_parameters.extend([\n", + " {'params': [p for n, p in model.diffusion_prior.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': 1e-2},\n", + " {'params': [p for n, p in model.diffusion_prior.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}\n", + " ])\n", + "\n", + "optimizer = torch.optim.AdamW(opt_grouped_parameters, lr=max_lr)\n", + "\n", + "if lr_scheduler_type == 'linear':\n", + " lr_scheduler = torch.optim.lr_scheduler.LinearLR(\n", + " optimizer,\n", + " total_iters=int(np.floor(num_epochs*num_iterations_per_epoch)),\n", + " last_epoch=-1\n", + " )\n", + "elif lr_scheduler_type == 'cycle':\n", + " if num_iterations_per_epoch==0:\n", + " num_iterations_per_epoch=1\n", + " total_steps=int(np.floor(num_epochs*num_iterations_per_epoch))\n", + " print(\"total_steps\", total_steps)\n", + " lr_scheduler = torch.optim.lr_scheduler.OneCycleLR(\n", + " optimizer, \n", + " max_lr=max_lr,\n", + " total_steps=total_steps,\n", + " final_div_factor=1000,\n", + " last_epoch=-1, pct_start=2/num_epochs\n", + " )\n", + " \n", + "def save_ckpt(tag):\n", + " ckpt_path = outdir+f'/{tag}.pth'\n", + " if accelerator.is_main_process:\n", + " unwrapped_model = accelerator.unwrap_model(model)\n", + " torch.save({\n", + " 'epoch': epoch,\n", + " 'model_state_dict': unwrapped_model.state_dict(),\n", + " 'optimizer_state_dict': optimizer.state_dict(),\n", + " 'lr_scheduler': lr_scheduler.state_dict(),\n", + " 'train_losses': losses,\n", + " 'test_losses': test_losses,\n", + " 'lrs': lrs,\n", + " }, ckpt_path)\n", + " print(f\"\\n---saved {outdir}/{tag} ckpt!---\\n\")\n", + "\n", + "def load_ckpt(tag,load_lr=True,load_optimizer=True,load_epoch=True,strict=True,outdir=outdir,multisubj_loading=False): \n", + " print(f\"\\n---loading {outdir}/{tag}.pth ckpt---\\n\")\n", + " checkpoint = torch.load(outdir+'/last.pth', map_location='cpu')\n", + " state_dict = checkpoint['model_state_dict']\n", + " if multisubj_loading: # remove incompatible ridge layer that will otherwise error\n", + " state_dict.pop('ridge.linears.0.weight',None)\n", + " model.load_state_dict(state_dict, strict=strict)\n", + " if load_epoch:\n", + " globals()[\"epoch\"] = checkpoint['epoch']\n", + " print(\"Epoch\",epoch)\n", + " if load_optimizer:\n", + " optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n", + " if load_lr:\n", + " lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])\n", + " del checkpoint\n", + "\n", + "print(\"\\nDone with model preparations!\")\n", + "num_params = utils.count_params(model)" + ] + }, + { + "cell_type": "markdown", + "id": "b1e8dcc4-5ce2-4206-88dc-a68d1dd701cd", + "metadata": {}, + "source": [ + "# Wandb" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "990cce8c-df83-473a-93c8-c47ba355eccd", + "metadata": {}, + "outputs": [], + "source": [ + "if local_rank==0 and wandb_log: # only use main process for wandb logging\n", + " import wandb\n", + " import time\n", + " \n", + " wandb_project = 'rtmindeye'\n", + " print(f\"wandb {wandb_project} run {model_name}\")\n", + "\n", + " # Need to configure wandb beforehand in terminal with \"wandb init\"!\n", + " wandb_config = {\n", + " \"model_name\": model_name,\n", + " \"global_batch_size\": global_batch_size,\n", + " \"batch_size\": batch_size,\n", + " \"num_epochs\": num_epochs,\n", + " \"num_sessions\": num_sessions,\n", + " \"num_params\": num_params,\n", + " \"clip_scale\": clip_scale,\n", + " \"prior_scale\": prior_scale,\n", + " \"blur_scale\": blur_scale,\n", + " \"use_image_aug\": use_image_aug,\n", + " \"max_lr\": max_lr,\n", + " \"mixup_pct\": mixup_pct,\n", + " \"num_samples_per_epoch\": num_samples_per_epoch,\n", + " \"ckpt_interval\": ckpt_interval,\n", + " \"ckpt_saving\": ckpt_saving,\n", + " \"seed\": seed, # SLURM array task ID\n", + " \"distributed\": distributed,\n", + " \"num_devices\": num_devices,\n", + " \"world_size\": world_size,\n", + " }\n", + " print(\"wandb_config:\\n\", wandb_config)\n", + " print(\"wandb_id:\", model_name)\n", + "\n", + " # Initialize wandb\n", + " wandb.init(\n", + " id=model_name,\n", + " project=wandb_project,\n", + " name=model_name,\n", + " config=wandb_config,\n", + " resume=\"allow\",\n", + " save_code=True,\n", + " )\n", + "\n", + " # Get SLURM job & array ID\n", + " slurm_job_id = utils.get_slurm_job()\n", + " slurm_array_id = seed # seed corresponds to SLURM_ARRAY_TASK_ID\n", + "\n", + " # Define SLURM log paths\n", + " log_dir = \"slurms\"\n", + " log_files = [\n", + " f\"{log_dir}/{slurm_job_id}_{slurm_array_id}.out\",\n", + " f\"{log_dir}/{slurm_job_id}_{slurm_array_id}.err\",\n", + " ]\n", + "\n", + " # Ensure logs exist before logging them\n", + " for log_file in log_files:\n", + " wait_time = 0\n", + " while not os.path.exists(log_file) and wait_time < 60: # Wait max 60s\n", + " time.sleep(5)\n", + " wait_time += 5\n", + "\n", + " # Log SLURM logs as artifacts\n", + " artifact = wandb.Artifact(f\"slurm_logs_{slurm_job_id}_{slurm_array_id}\", type=\"logs\")\n", + " for log_file in log_files:\n", + " if os.path.exists(log_file):\n", + " artifact.add_file(log_file)\n", + "\n", + " wandb.log_artifact(artifact)\n", + "else:\n", + " wandb_log = False" + ] + }, + { + "cell_type": "markdown", + "id": "d5690151-2131-4918-b750-e869cbd1a8a8", + "metadata": {}, + "source": [ + "# Train the model" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "12de6387-6e18-4e4b-b5ce-a847d625330a", + "metadata": {}, + "outputs": [], + "source": [ + "epoch = 0\n", + "losses, test_losses, lrs = [], [], []\n", + "best_test_loss = 1e9\n", + "torch.cuda.empty_cache()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "607a7c7b-fe5e-41a4-80bf-d2814b3a57cc", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# load multisubject stage1 ckpt if set\n", + "if multisubject_ckpt is not None and not resume_from_ckpt:\n", + " load_ckpt(\"last\",outdir=multisubject_ckpt,load_lr=False,load_optimizer=False,load_epoch=False,strict=False,multisubj_loading=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "00ea5ae0-5c92-4276-af5b-25a17ba4dc17", + "metadata": {}, + "outputs": [], + "source": [ + "# checkpoint = torch.load(multisubject_ckpt+'/last.pth', map_location='cpu')\n", + "# state_dict = checkpoint['model_state_dict']\n", + "# model.load_state_dict(state_dict, strict=False)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "99f09f76-4481-4133-b09a-a22b10dbc0c4", + "metadata": {}, + "outputs": [], + "source": [ + "# train_dls = [train_dl[f'subj0{s}'] for s in subj_list]\n", + "\n", + "model, optimizer, train_dl, lr_scheduler = accelerator.prepare(model, optimizer, train_dl, lr_scheduler)\n", + "# leaving out test_dl since we will only have local_rank 0 device do evals" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "60be0d5f-3e94-4612-9373-61b53d836393", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "print(f\"{model_name} starting with epoch {epoch} / {num_epochs}\")\n", + "progress_bar = tqdm(range(epoch,num_epochs), ncols=1200, disable=(local_rank!=0))\n", + "test_image, test_voxel = None, None\n", + "mse = nn.MSELoss()\n", + "l1 = nn.L1Loss()\n", + "soft_loss_temps = utils.cosine_anneal(0.004, 0.0075, num_epochs - int(mixup_pct * num_epochs))\n", + "skip_train = True if epoch>=(num_epochs-1) else False # skip training if you are resuming from a fully trained model\n", + "\n", + "for epoch in progress_bar:\n", + " model.train()\n", + "\n", + " fwd_percent_correct = 0.\n", + " bwd_percent_correct = 0.\n", + " test_fwd_percent_correct = 0.\n", + " test_bwd_percent_correct = 0.\n", + " \n", + " recon_cossim = 0.\n", + " test_recon_cossim = 0.\n", + " recon_mse = 0.\n", + " test_recon_mse = 0.\n", + "\n", + " loss_clip_total = 0.\n", + " loss_blurry_total = 0.\n", + " loss_blurry_cont_total = 0.\n", + " test_loss_clip_total = 0.\n", + " \n", + " loss_prior_total = 0.\n", + " test_loss_prior_total = 0.\n", + "\n", + " blurry_pixcorr = 0.\n", + " test_blurry_pixcorr = 0. \n", + "\n", + " # you now have voxel_iters and image_iters with num_iterations_per_epoch batches each\n", + " for train_i, behav in enumerate(train_dl): \n", + " with torch.cuda.amp.autocast(dtype=data_type):\n", + " optimizer.zero_grad()\n", + " loss = 0.\n", + " \n", + " behav = behav[0]\n", + "\n", + " image = images[behav.long().cpu()].to(device)\n", + " voxel = vox[behav.long().cpu()]\n", + " # voxel = (voxel - train_mean) / train_std\n", + " voxel = torch.Tensor(voxel).unsqueeze(1).to(device)\n", + "\n", + " if use_image_aug: \n", + " image = img_augment(image)\n", + "\n", + " clip_target = clip_img_embedder(image)\n", + " assert not torch.any(torch.isnan(clip_target))\n", + "\n", + " if epoch < int(mixup_pct * num_epochs):\n", + " voxel, perm, betas, select = utils.mixco(voxel)\n", + "\n", + " voxel_ridge = model.ridge(voxel,0) #[model.ridge(voxel_list[si],si) for si,s in enumerate(subj_list)]\n", + " # voxel_ridge = torch.cat(voxel_ridge_list, dim=0)\n", + "\n", + " backbone, clip_voxels, blurry_image_enc_ = model.backbone(voxel_ridge)\n", + "\n", + " if clip_scale>0:\n", + " clip_voxels_norm = nn.functional.normalize(clip_voxels.flatten(1), dim=-1)\n", + " clip_target_norm = nn.functional.normalize(clip_target.flatten(1), dim=-1)\n", + "\n", + " if use_prior:\n", + " loss_prior, prior_out = model.diffusion_prior(text_embed=backbone, image_embed=clip_target)\n", + " loss_prior_total += loss_prior.item()\n", + " loss_prior *= prior_scale\n", + " loss += loss_prior\n", + "\n", + " recon_cossim += nn.functional.cosine_similarity(prior_out, clip_target).mean().item()\n", + " recon_mse += mse(prior_out, clip_target).item()\n", + "\n", + " if clip_scale>0:\n", + " if epoch < int(mixup_pct * num_epochs): \n", + " loss_clip = utils.mixco_nce(\n", + " clip_voxels_norm,\n", + " clip_target_norm,\n", + " temp=.006,\n", + " perm=perm, betas=betas, select=select)\n", + " else:\n", + " epoch_temp = soft_loss_temps[epoch-int(mixup_pct*num_epochs)]\n", + " loss_clip = utils.soft_clip_loss(\n", + " clip_voxels_norm,\n", + " clip_target_norm,\n", + " temp=epoch_temp)\n", + "\n", + " loss_clip_total += loss_clip.item()\n", + " loss_clip *= clip_scale\n", + " loss += loss_clip\n", + "\n", + " if blurry_recon: \n", + " image_enc_pred, transformer_feats = blurry_image_enc_\n", + "\n", + " image_enc = autoenc.encode(2*image-1).latent_dist.mode() * 0.18215\n", + " loss_blurry = l1(image_enc_pred, image_enc)\n", + " loss_blurry_total += loss_blurry.item()\n", + "\n", + " if epoch < int(mixup_pct * num_epochs):\n", + " image_enc_shuf = image_enc[perm]\n", + " betas_shape = [-1] + [1]*(len(image_enc.shape)-1)\n", + " image_enc[select] = image_enc[select] * betas[select].reshape(*betas_shape) + \\\n", + " image_enc_shuf[select] * (1 - betas[select]).reshape(*betas_shape)\n", + "\n", + " image_norm = (image - mean)/std\n", + " image_aug = (blur_augs(image) - mean)/std\n", + " _, cnx_embeds = cnx(image_norm)\n", + " _, cnx_aug_embeds = cnx(image_aug)\n", + "\n", + " cont_loss = utils.soft_cont_loss(\n", + " nn.functional.normalize(transformer_feats.reshape(-1, transformer_feats.shape[-1]), dim=-1),\n", + " nn.functional.normalize(cnx_embeds.reshape(-1, cnx_embeds.shape[-1]), dim=-1),\n", + " nn.functional.normalize(cnx_aug_embeds.reshape(-1, cnx_embeds.shape[-1]), dim=-1),\n", + " temp=0.2)\n", + " loss_blurry_cont_total += cont_loss.item()\n", + "\n", + " loss += (loss_blurry + 0.1*cont_loss) * blur_scale #/.18215\n", + "\n", + " if clip_scale>0:\n", + " # forward and backward top 1 accuracy \n", + " labels = torch.arange(len(clip_voxels_norm)).to(clip_voxels_norm.device) \n", + " fwd_percent_correct += utils.topk(utils.batchwise_cosine_similarity(clip_voxels_norm, clip_target_norm), labels, k=1).item()\n", + " bwd_percent_correct += utils.topk(utils.batchwise_cosine_similarity(clip_target_norm, clip_voxels_norm), labels, k=1).item()\n", + "\n", + " if blurry_recon:\n", + " with torch.no_grad():\n", + " # only doing pixcorr eval on a subset of the samples per batch because its costly & slow to compute autoenc.decode()\n", + " random_samps = np.random.choice(np.arange(len(image)), size=len(image)//5, replace=False)\n", + " blurry_recon_images = (autoenc.decode(image_enc_pred[random_samps]/0.18215).sample/ 2 + 0.5).clamp(0,1)\n", + " pixcorr = utils.pixcorr(image[random_samps], blurry_recon_images)\n", + " blurry_pixcorr += pixcorr.item()\n", + " \n", + " utils.check_loss(loss)\n", + " accelerator.backward(loss)\n", + " optimizer.step()\n", + "\n", + " losses.append(loss.item())\n", + " lrs.append(optimizer.param_groups[0]['lr'])\n", + "\n", + " if lr_scheduler_type is not None:\n", + " lr_scheduler.step()\n", + " \n", + " if train_i >= num_iterations_per_epoch-1:\n", + " break\n", + " \n", + " model.eval()\n", + " logs = {}\n", + "\n", + " if local_rank == 0:\n", + " with torch.no_grad(), torch.cuda.amp.autocast(dtype=data_type):\n", + " for i in range(2):\n", + " for j in range(2):\n", + " subset_indices = MST_idx[:, i, j].reshape(-1)\n", + " subset_dataset = torch.utils.data.TensorDataset(torch.tensor(subset_indices))\n", + " subset_dl = torch.utils.data.DataLoader(\n", + " subset_dataset, batch_size=len(MST_idx), shuffle=False,\n", + " drop_last=True, pin_memory=True\n", + " )\n", + "\n", + " # Reset metrics for this subset\n", + " test_losses = []\n", + " test_loss_clip_total = 0\n", + " test_loss_prior_total = 0\n", + " test_blurry_pixcorr = 0\n", + " test_fwd_percent_correct = 0\n", + " test_bwd_percent_correct = 0\n", + " test_recon_cossim = 0\n", + " test_recon_mse = 0\n", + "\n", + " for test_i, behav in enumerate(subset_dl):\n", + " behav = behav[0]\n", + " loss = 0.\n", + "\n", + " if behav.ndim > 1:\n", + " image = images[behav[:, 0].long().cpu()].to(device)\n", + " voxel = vox[behav.long().cpu()].mean(1)\n", + " else:\n", + " image = images[behav.long().cpu()].to(device)\n", + " voxel = vox[behav.long().cpu()]\n", + "\n", + " voxel = torch.Tensor(voxel).unsqueeze(1).to(device)\n", + "\n", + " clip_img_embedder = clip_img_embedder.to(device)\n", + " clip_target = clip_img_embedder(image.float())\n", + "\n", + " voxel_ridge = model.ridge(voxel, 0)\n", + " backbone, clip_voxels, blurry_image_enc_ = model.backbone(voxel_ridge)\n", + "\n", + " if clip_scale > 0:\n", + " clip_voxels_norm = nn.functional.normalize(clip_voxels.flatten(1), dim=-1)\n", + " clip_target_norm = nn.functional.normalize(clip_target.flatten(1), dim=-1)\n", + "\n", + " random_samps = np.random.choice(np.arange(len(image)), size=len(image) // 5, replace=False)\n", + "\n", + " if use_prior:\n", + " loss_prior, contaminated_prior_out = model.diffusion_prior(\n", + " text_embed=backbone[random_samps], image_embed=clip_target[random_samps])\n", + " test_loss_prior_total += loss_prior.item()\n", + " loss_prior *= prior_scale\n", + " loss += loss_prior\n", + "\n", + " if clip_scale > 0:\n", + " loss_clip = utils.soft_clip_loss(\n", + " clip_voxels_norm,\n", + " clip_target_norm,\n", + " temp=0.006\n", + " )\n", + " test_loss_clip_total += loss_clip.item()\n", + " loss_clip *= clip_scale\n", + " loss += loss_clip\n", + "\n", + " if blurry_recon:\n", + " image_enc_pred, _ = blurry_image_enc_\n", + " blurry_recon_images = (autoenc.decode(image_enc_pred[random_samps] / 0.18215).sample / 2 + 0.5).clamp(0, 1)\n", + " pixcorr = utils.pixcorr(image[random_samps], blurry_recon_images)\n", + " test_blurry_pixcorr += pixcorr.item()\n", + "\n", + " if clip_scale > 0:\n", + " labels = torch.arange(len(clip_voxels_norm)).to(clip_voxels_norm.device)\n", + " test_fwd_percent_correct += utils.topk(utils.batchwise_cosine_similarity(clip_voxels_norm, clip_target_norm), labels, k=1).item()\n", + " test_bwd_percent_correct += utils.topk(utils.batchwise_cosine_similarity(clip_target_norm, clip_voxels_norm), labels, k=1).item()\n", + "\n", + " utils.check_loss(loss)\n", + " test_losses.append(loss.item())\n", + "\n", + " logs.update({\n", + " f\"subset_{i}_{j}_test/loss\": np.mean(test_losses),\n", + " f\"subset_{i}_{j}_test/loss_clip_total\": test_loss_clip_total / (test_i + 1),\n", + " f\"subset_{i}_{j}_test/loss_prior\": test_loss_prior_total / (test_i + 1),\n", + " f\"subset_{i}_{j}_test/blurry_pixcorr\": test_blurry_pixcorr / (test_i + 1),\n", + " f\"subset_{i}_{j}_test/fwd_pct_correct\": test_fwd_percent_correct / (test_i + 1),\n", + " f\"subset_{i}_{j}_test/bwd_pct_correct\": test_bwd_percent_correct / (test_i + 1),\n", + " })\n", + " print(f\"--- Subset ({i},{j}) ---\")\n", + " for k, v in logs.items():\n", + " if f\"subset_{i}_{j}\" in k:\n", + " print(f\"{k}: {v:.4f}\")\n", + "\n", + " # After subset loop: add train (and global test, if you want) metrics\n", + " logs.update({\n", + " \"train/loss\": np.mean(losses[-(train_i+1):]),\n", + " \"train/lr\": lrs[-1],\n", + " \"train/num_steps\": len(losses),\n", + " \"train/fwd_pct_correct\": fwd_percent_correct / (train_i + 1),\n", + " \"train/bwd_pct_correct\": bwd_percent_correct / (train_i + 1),\n", + " \"train/loss_clip_total\": loss_clip_total / (train_i + 1),\n", + " \"train/loss_blurry_total\": loss_blurry_total / (train_i + 1),\n", + " \"train/loss_blurry_cont_total\": loss_blurry_cont_total / (train_i + 1),\n", + " \"train/blurry_pixcorr\": blurry_pixcorr / (train_i + 1),\n", + " \"train/recon_cossim\": recon_cossim / (train_i + 1),\n", + " \"train/recon_mse\": recon_mse / (train_i + 1),\n", + " \"train/loss_prior\": loss_prior_total / (train_i + 1),\n", + " })\n", + "\n", + "\n", + " # if finished training, save jpg recons if they exist\n", + " if (epoch == num_epochs-1) or (epoch % ckpt_interval == 0):\n", + " if blurry_recon: \n", + " image_enc = autoenc.encode(2*image[:4]-1).latent_dist.mode() * 0.18215\n", + " # transform blurry recon latents to images and plot it\n", + " fig, axes = plt.subplots(1, 8, figsize=(10, 4))\n", + " jj=-1\n", + " for j in [0,1,2,3]:\n", + " jj+=1\n", + " axes[jj].imshow(utils.torch_to_Image((autoenc.decode(image_enc[[j]]/0.18215).sample / 2 + 0.5).clamp(0,1)))\n", + " axes[jj].axis('off')\n", + " jj+=1\n", + " axes[jj].imshow(utils.torch_to_Image((autoenc.decode(image_enc_pred[[j]]/0.18215).sample / 2 + 0.5).clamp(0,1)))\n", + " axes[jj].axis('off')\n", + " plt.show()\n", + "\n", + " progress_bar.set_postfix(**logs)\n", + "\n", + " if wandb_log: wandb.log(logs)\n", + " \n", + " # Save model checkpoint and reconstruct\n", + " if (ckpt_saving) and (epoch % ckpt_interval == 0):\n", + " save_ckpt(f'last')\n", + "\n", + " # wait for other GPUs to catch up if needed\n", + " accelerator.wait_for_everyone()\n", + " torch.cuda.empty_cache()\n", + "\n", + "print(\"\\n===Finished!===\\n\")\n", + "if ckpt_saving:\n", + " save_ckpt(f'last')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "19bdef2b-02cf-40cc-8c0c-84b8b8acdd36", + "metadata": {}, + "outputs": [], + "source": [ + "len(test_data)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5702acf6-45fe-44f5-8842-c0e2d4d8e8ce", + "metadata": {}, + "outputs": [], + "source": [ + "# # Track metrics here:\n", + "# https://docs.google.com/spreadsheets/d/1-dbmr4ovl2-4-MFNAL1DqLS651KM_ihjDkkUeP1kHXs/edit?gid=1494588999#gid=1494588999" + ] + }, + { + "cell_type": "markdown", + "id": "23a54acc-1dce-4de4-9d5f-d0582f5097c5", + "metadata": {}, + "source": [ + "**To tell if the model is working I'm looking at test_bwd/fwd_pct_correct and seeing if that is doing better than chance (1/batch_size)**" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3af2dfd7-638f-4932-b384-67d582f88c2c", + "metadata": {}, + "outputs": [], + "source": [ + "# MST_pairmate_names" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b5a7e752-6665-41fb-b3cc-022d36893de7", + "metadata": {}, + "outputs": [], + "source": [ + "x = [im for im in image_names if str(im) not in ('blank.jpg', 'nan')]\n", + "assert len(image_idx) == len(x)\n", + "pairs = []\n", + "for i, p in enumerate(MST_pairmate_names):\n", + " assert p[0] != p[1] # no duplicate images\n", + " pairs.append([utils.find_all_indices(x,p[0]), utils.find_all_indices(x,p[1])])\n", + " \n", + "pairs = np.array(pairs)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4bda00d8-84b9-4e37-b4bc-aa5576e1586c", + "metadata": {}, + "outputs": [], + "source": [ + "pairs.shape" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "34cfada9", + "metadata": {}, + "outputs": [], + "source": [ + "model.eval()\n", + "logs = {}\n", + "if local_rank == 0:\n", + " with torch.no_grad(), torch.cuda.amp.autocast(dtype=data_type):\n", + " for i in range(2):\n", + " for j in range(2):\n", + " subset_indices = MST_idx[:, i, j].reshape(-1)\n", + " subset_dataset = torch.utils.data.TensorDataset(torch.tensor(subset_indices))\n", + " subset_dl = torch.utils.data.DataLoader(\n", + " subset_dataset, batch_size=len(MST_idx), shuffle=False,\n", + " drop_last=True, pin_memory=True\n", + " )\n", + "\n", + " # Reset metrics for this subset\n", + " test_fwd_percent_correct = 0\n", + " test_bwd_percent_correct = 0\n", + "\n", + " for test_i, behav in enumerate(subset_dl):\n", + " behav = behav[0]\n", + " loss = 0.\n", + " image = images[behav.long().cpu()].to(device)\n", + " voxel = vox[behav.long().cpu()]\n", + " voxel = torch.Tensor(voxel).unsqueeze(1).to(device)\n", + " clip_img_embedder = clip_img_embedder.to(device)\n", + " clip_target = clip_img_embedder(image.float())\n", + "\n", + " voxel_ridge = model.ridge(voxel, 0)\n", + " backbone, clip_voxels, blurry_image_enc_ = model.backbone(voxel_ridge)\n", + "\n", + " clip_voxels_norm = torch.nn.functional.normalize(clip_voxels, dim=-1)\n", + " clip_target_norm = torch.nn.functional.normalize(clip_target, dim=-1)\n", + "\n", + " if clip_scale > 0:\n", + " labels = torch.arange(len(clip_voxels_norm)).to(clip_voxels_norm.device)\n", + " test_fwd_percent_correct += utils.topk(utils.batchwise_cosine_similarity(clip_voxels_norm, clip_target_norm), labels, k=1).item()\n", + " test_bwd_percent_correct += utils.topk(utils.batchwise_cosine_similarity(clip_target_norm, clip_voxels_norm), labels, k=1).item()\n", + " print(test_fwd_percent_correct)\n", + " print(test_bwd_percent_correct)\n", + " logs.update({\n", + " f\"subset_{i}_{j}_test/fwd_pct_correct\": test_fwd_percent_correct / (test_i + 1),\n", + " f\"subset_{i}_{j}_test/bwd_pct_correct\": test_bwd_percent_correct / (test_i + 1),\n", + " })\n", + "\n", + " print(\"--- Full Dataset Evaluation ---\")\n", + " for k, v in logs.items():\n", + " print(f\"{k}: {v:.4f}\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3d1501cd-0473-4faa-8bfc-b2e2472559ae", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# if sub==\"sub-002\":\n", + "# unique_images_pairs = [\n", + "# (2,3),(4,5),(7,8),(15,16),\n", + "# (483, 484), (485, 486), (487, 488), (491, 492), (495, 496), (499, 500), (501, 502),\n", + "# (503, 504), (512, 513), \n", + "# ]\n", + "# elif sub != 'sub-001' and session != 'ses-05':\n", + "# unique_images_pairs = [\n", + "# (1,2),(3,4),(5,6),(7,8),(9,10),(11,12),(13,14),(15,16),\n", + "# (17,18),(19,20),(21,22),(23,24),(25,26),(27,28),(29,30),\n", + "# (31,32),(33,34),(35,36),\n", + "# (787, 788), (789, 790), (791, 792), (793, 794), (795, 796),\n", + "# (797, 798), (799, 800), (801, 802), (803, 804), (805, 806),\n", + "# (807, 808), (809, 810), (811, 812), (813, 814), (815, 816),\n", + "# (817, 818), (819, 820), (821, 822), (823, 824), (825, 826),\n", + "# (827, 828), (829, 830), (831, 832), (833, 834), (835, 836),\n", + "# (837, 838), (839, 840), (841, 842), (843, 844), (845, 846),\n", + "# (847, 848), (849, 850)\n", + "# ]\n", + "# else:\n", + "# # unique_images = unique_images[unique_images!='blank.jpg'][:50]\n", + "# unique_images_pairs = find_mst_pairs(x)\n", + "# # unique_images[unique_images_pairs]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f12f93b0", + "metadata": {}, + "outputs": [], + "source": [ + "import pdb" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3215e72f-8a91-4761-9223-5d6990ddcdb3", + "metadata": {}, + "outputs": [], + "source": [ + "def evaluate_mst_pairs(mst_pairs):\n", + " with torch.no_grad(), torch.cuda.amp.autocast(dtype=data_type):\n", + " failed_A = []\n", + " failed_B = []\n", + " failed_non_corr = []\n", + "\n", + " # Get all unique image indices\n", + " all_indices = np.unique(mst_pairs.flatten())\n", + " \n", + " # Pre-load all images and betas to device\n", + " all_images = images[image_idx[all_indices]].to(device)\n", + " all_voxels = torch.Tensor(vox[image_idx[all_indices]]).unsqueeze(1).to(device)\n", + " \n", + " # Get CLIP embeddings for all images\n", + " all_clip_targets = clip_img_embedder(all_images.float())\n", + " all_clip_targets_norm = nn.functional.normalize(all_clip_targets.flatten(1), dim=-1)\n", + " \n", + " # Pass all betas through model to get MindEye embeddings\n", + " all_voxel_ridge = model.ridge(all_voxels, 0)\n", + " _, all_clip_voxels, _ = model.backbone(all_voxel_ridge)\n", + " all_clip_voxels_norm = nn.functional.normalize(all_clip_voxels.flatten(1), dim=-1)\n", + " \n", + " # Dict mapping idx (which indexes the \"vox\" and \"images\" tensors) to pos (their position in the flattened array \"all_indices\")\n", + " idx_to_pos = {idx: pos for pos, idx in enumerate(all_indices)}\n", + " \n", + " # Initialize scores\n", + " corr_score = 0\n", + " non_corr_score = 0\n", + " corr_total = len(mst_pairs) * 2\n", + " non_corr_total = len(mst_pairs) * (len(mst_pairs)-1) * 4 # number of elements in the matrix excluding the diagonal is n*(n-1)*4 since we're doing this twice each for pairmate A and B\n", + "\n", + " \n", + " # Pre-load voxelwise beta-based embeddings from MindEye and CLIP image embeddings\n", + " idxA = np.array([pair[0] for pair in mst_pairs])\n", + " idxB = np.array([pair[1] for pair in mst_pairs])\n", + " \n", + " posA = np.array([idx_to_pos[idx] for idx in idxA])\n", + " posB = np.array([idx_to_pos[idx] for idx in idxB])\n", + " \n", + " voxA_embeddings = all_clip_voxels_norm[posA]\n", + " voxB_embeddings = all_clip_voxels_norm[posB]\n", + " imgA_embeddings = all_clip_targets_norm[posA]\n", + " imgB_embeddings = all_clip_targets_norm[posB]\n", + " \n", + " simA_A = utils.batchwise_cosine_similarity(voxA_embeddings, imgA_embeddings)\n", + " simA_B = utils.batchwise_cosine_similarity(voxA_embeddings, imgB_embeddings)\n", + " simB_B = utils.batchwise_cosine_similarity(voxB_embeddings, imgB_embeddings)\n", + " simB_A = utils.batchwise_cosine_similarity(voxB_embeddings, imgA_embeddings)\n", + "\n", + " \n", + " # corresponding 2-AFC\n", + " # is the voxel embedding for image 1 pairmate A more similar to the CLIP embedding for image 1 pairmate A or the CLIP embedding for image 1 pairmate B?\n", + " correct_A = torch.diag(simA_A) > torch.diag(simA_B)\n", + " # is the voxel embedding for image 1 pairmate B more similar to the CLIP embedding for image 1 pairmate B or the CLIP embedding for image 1 pairmate A?\n", + " correct_B = torch.diag(simB_B) > torch.diag(simB_A)\n", + "\n", + " corr_score += correct_A.sum().item()\n", + " corr_score += correct_B.sum().item()\n", + "\n", + " # Store indices where AFC fails\n", + " failed_A = [i for i, correct in enumerate(correct_A.cpu()) if not correct]\n", + " failed_B = [i for i, correct in enumerate(correct_B.cpu()) if not correct]\n", + " \n", + " # non-corresponding 2-AFC\n", + " N = len(mst_pairs) \n", + " # Create a mask that is True for all off-diagonal elements\n", + " row_idx = torch.arange(N).unsqueeze(1) # (N, 1)\n", + " col_idx = torch.arange(N).unsqueeze(0) # (1, N)\n", + " off_diag_mask = row_idx != col_idx # shape (N, N)\n", + " \n", + " diagA_A = simA_A.diag().unsqueeze(1).expand(-1, N) # Get diagonal values and expand to (N, N) by duplicating the diagonal element along the rows (since each row is the cosine similarity between a single voxel embedding and all CLIP embeddings)\n", + " diagB_B = simB_B.diag().unsqueeze(1).expand(-1, N)\n", + " \n", + " # pdb.set_trace()\n", + "\n", + " # Compare each element in the row to the diagonal element\n", + " off_diag_mask_device = off_diag_mask.to(device)\n", + "\n", + " fail_AA = (simA_A < diagA_A) & off_diag_mask_device\n", + " fail_AB = (simA_B < diagA_A) & off_diag_mask_device\n", + " fail_BB = (simB_B < diagB_B) & off_diag_mask_device\n", + " fail_BA = (simB_A < diagB_B) & off_diag_mask_device\n", + "\n", + " non_corr_score += fail_AA.sum().item()\n", + " non_corr_score += fail_AB.sum().item()\n", + " non_corr_score += fail_BB.sum().item()\n", + " non_corr_score += fail_BA.sum().item()\n", + "\n", + " # Log failed indices\n", + " fail_sources = [fail_AA, fail_AB, fail_BB, fail_BA]\n", + " for fail_matrix, label in zip(fail_sources, [\"AA\", \"AB\", \"BB\", \"BA\"]):\n", + " fail_coords = torch.nonzero(fail_matrix, as_tuple=False).cpu().numpy()\n", + " for i, j in fail_coords:\n", + " failed_non_corr.append({\"type\": label, \"i\": i, \"j\": j, \"pair_i\": mst_pairs[i], \"pair_j\": mst_pairs[j]})\n", + "\n", + " return corr_score, corr_total, int(non_corr_score), non_corr_total, failed_A, failed_B, failed_non_corr" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "571cf97f-5342-4005-8374-7b85385cf81e", + "metadata": {}, + "outputs": [], + "source": [ + "all_scores = []\n", + "all_failures = []\n", + "\n", + "for i in range(4):\n", + " for j in range(4):\n", + " mst_pairs = np.stack([pairs[:, 0, i], pairs[:, 1, j]], axis=1) # shape (31, 2)\n", + " corr_score, corr_total, non_corr_score, non_corr_total, failed_A, failed_B, failed_non_corr = evaluate_mst_pairs(mst_pairs)\n", + "\n", + " # Store scores and failure info together\n", + " all_scores.append((corr_score, corr_total, non_corr_score, non_corr_total))\n", + " all_failures.append({\n", + " \"repeat_A\": i,\n", + " \"repeat_B\": j,\n", + " \"failed_A\": failed_A,\n", + " \"failed_B\": failed_B,\n", + " \"failed_non_corr\": failed_non_corr,\n", + " \"mst_pairs\": mst_pairs,\n", + " })\n", + "\n", + " # Print summary\n", + " print(f\"pairmate A repeat {i} vs pairmate B repeat {j}:\")\n", + " print(f\"2-AFC corresponding = {corr_score}/{corr_total} ({corr_score/corr_total:.2%})\")\n", + " print(f\"2-AFC non-corresponding = {non_corr_score}/{non_corr_total} ({non_corr_score/non_corr_total:.2%})\")\n", + " print(\"\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6ec5ea78", + "metadata": {}, + "outputs": [], + "source": [ + "all_scores = np.array(all_scores)\n", + "print(f\"average 2-AFC corresponding: {all_scores[:,0].mean():.2f}/{all_scores[:,1].mean():.2f} ({(all_scores[:,0].sum()/all_scores[:,1].sum()):.2%})\")\n", + "print(f\"average 2-AFC non-corresponding: {all_scores[:,2].mean():.2f}/{all_scores[:,3].mean():.2f} ({(all_scores[:,2].sum()/all_scores[:,3].sum()):.2%})\")\n", + "print(f'chance = 1/{corr_total} ({(1/corr_total):.2%})')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "da81a250", + "metadata": {}, + "outputs": [], + "source": [ + "from collections import defaultdict\n", + "\n", + "# Map from image index to failure details\n", + "failed_images = defaultdict(list)\n", + "\n", + "for failure_entry in all_failures:\n", + " mst_pairs = failure_entry[\"mst_pairs\"]\n", + " i, j = failure_entry[\"repeat_A\"], failure_entry[\"repeat_B\"]\n", + "\n", + " # A-side failures\n", + " for fail_idx in failure_entry[\"failed_A\"]:\n", + " image_idx = mst_pairs[fail_idx][0]\n", + " pairmate_idx = mst_pairs[fail_idx][1]\n", + " failed_images[image_idx].append({\n", + " \"repeat_A\": i,\n", + " \"repeat_B\": j,\n", + " \"pairmate\": pairmate_idx,\n", + " \"type\": \"A\",\n", + " })\n", + "\n", + " # B-side failures\n", + " for fail_idx in failure_entry[\"failed_B\"]:\n", + " image_idx = mst_pairs[fail_idx][1]\n", + " pairmate_idx = mst_pairs[fail_idx][0]\n", + " failed_images[image_idx].append({\n", + " \"repeat_A\": i,\n", + " \"repeat_B\": j,\n", + " \"pairmate\": pairmate_idx,\n", + " \"type\": \"B\",\n", + " })\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b55e6b64", + "metadata": {}, + "outputs": [], + "source": [ + "# import matplotlib.pyplot as plt\n", + "\n", + "# for img_idx, failure_list in failed_images.items():\n", + "# print(f\"\\n==== Failed Image {img_idx} ====\")\n", + "\n", + "# # Load and normalize the embeddings\n", + "# image = images[img_idx].unsqueeze(0).to(device).float()\n", + "# image_clip = nn.functional.normalize(clip_img_embedder(image).flatten(1), dim=-1)\n", + "\n", + "# # Get voxel→CLIP embedding\n", + "# voxel = torch.Tensor(vox[img_idx]).unsqueeze(0).unsqueeze(0).to(device)\n", + "# voxel_embed = model.backbone(model.ridge(voxel, 0))[1]\n", + "# voxel_embed = nn.functional.normalize(voxel_embed.flatten(1), dim=-1)\n", + "\n", + "# # Display original image\n", + "# print(\"Original image:\")\n", + "# display(utils.torch_to_Image(images[img_idx]))\n", + "\n", + "# # Collect unique pairmates involved in the failure\n", + "# pairmate_indices = list(set(entry[\"pairmate\"] for entry in failure_list))\n", + "\n", + "# # Plot failed pairmates with similarity annotations\n", + "# fig, axs = plt.subplots(1, len(pairmate_indices), figsize=(4 * len(pairmate_indices), 4))\n", + "# if len(pairmate_indices) == 1:\n", + "# axs = [axs]\n", + "\n", + "# # Compute \"correct\" similarity — voxel to its own CLIP embedding\n", + "# correct_clip = image_clip.float()\n", + "# correct_voxel_sim = (voxel_embed.float() @ correct_clip.T).item()\n", + "# print(f\"Correct voxel→CLIP similarity = {correct_voxel_sim:.4f}\")\n", + "\n", + "# # Plot failed pairmates with similarity annotations\n", + "# fig, axs = plt.subplots(1, len(pairmate_indices), figsize=(4 * len(pairmate_indices), 4))\n", + "# if len(pairmate_indices) == 1:\n", + "# axs = [axs]\n", + "\n", + "# for ax, mate_idx in zip(axs, pairmate_indices):\n", + "# mate_image = images[mate_idx].unsqueeze(0).to(device).float()\n", + "# mate_clip = nn.functional.normalize(clip_img_embedder(mate_image).flatten(1), dim=-1).float()\n", + "\n", + "# # Similarities\n", + "# clip_sim = (correct_clip @ mate_clip.T).item()\n", + "# voxel_sim = (voxel_embed.float() @ mate_clip.T).item()\n", + "\n", + "# # Check if this was the mistaken \"higher\" match\n", + "# wrong_match = voxel_sim > correct_voxel_sim\n", + "\n", + "# # Plot image and annotate\n", + "# ax.imshow(utils.torch_to_Image(images[mate_idx]))\n", + "# ax.axis(\"off\")\n", + "# ax.set_title(f\"Pairmate {mate_idx}\\nCLIP={clip_sim:.3f}\\nVoxel={voxel_sim:.3f}\\n{'← WRONG' if wrong_match else ''}\",\n", + "# color=\"red\" if wrong_match else \"black\")\n", + "\n", + "\n", + "# plt.tight_layout()\n", + "# plt.show()\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f053eed8", + "metadata": {}, + "outputs": [], + "source": [ + "# comp[20,18] is the only False" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "03c0c8d2", + "metadata": {}, + "outputs": [], + "source": [ + "# import matplotlib.pyplot as plt\n", + "\n", + "# for img_idx, failure_list in failed_images.items():\n", + "# print(f\"\\n==== Failed Image {img_idx} ====\")\n", + "\n", + "# # Load and normalize the embeddings\n", + "# image = images[img_idx].unsqueeze(0).to(device).float()\n", + "# image_clip = nn.functional.normalize(clip_img_embedder(image).flatten(1), dim=-1)\n", + "\n", + "# # Get voxel→CLIP embedding\n", + "# voxel = torch.Tensor(vox[img_idx]).unsqueeze(0).unsqueeze(0).to(device)\n", + "# voxel_embed = model.backbone(model.ridge(voxel, 0))[1]\n", + "# voxel_embed = nn.functional.normalize(voxel_embed.flatten(1), dim=-1)\n", + "\n", + "# # Display original image\n", + "# print(\"Original image:\")\n", + "# display(utils.torch_to_Image(images[img_idx]))\n", + "\n", + "# # Collect unique pairmates involved in the failure\n", + "# pairmate_indices = list(set(entry[\"pairmate\"] for entry in failure_list))\n", + "\n", + "# # Plot failed pairmates with similarity annotations\n", + "# fig, axs = plt.subplots(1, len(pairmate_indices), figsize=(4 * len(pairmate_indices), 4))\n", + "# if len(pairmate_indices) == 1:\n", + "# axs = [axs]\n", + "\n", + "# for ax, mate_idx in zip(axs, pairmate_indices):\n", + "# # Get all CLIP embeddings for failed image and pairmates\n", + "# all_indices = [img_idx] + pairmate_indices\n", + "# all_images = images[all_indices].to(device).float()\n", + "# all_clip_embeds = clip_img_embedder(all_images)\n", + "# all_clip_embeds = nn.functional.normalize(all_clip_embeds.flatten(1), dim=-1).float()\n", + "\n", + "# # Compare voxel embedding for the failed image to all CLIP embeddings\n", + "# sims = (voxel_embed.float() @ all_clip_embeds.T).squeeze().cpu().detach().numpy() # shape: (1, N) → (N,)\n", + "# image_ids = [\"correct\"] + [f\"pairmate {idx}\" for idx in pairmate_indices]\n", + "\n", + "# # Sort and display\n", + "# sorted_sims = sorted(zip(image_ids, all_indices, sims), key=lambda x: -x[2])\n", + "\n", + "# print(\"\\n🧠 Voxel→CLIP similarity ranking:\")\n", + "# for label, idx, sim in sorted_sims:\n", + "# print(f\"{label:12} (index {idx:3}): similarity = {sim:.4f}\")\n", + "\n", + "# # Optional assertion: did any pairmate score higher than the correct image?\n", + "# correct_sim = sims[0]\n", + "# higher = [(label, sim) for label, _, sim in sorted_sims[1:] if sim > correct_sim]\n", + "# if higher:\n", + "# print(\"\\n❌ Mismatch detected: voxel embedding matched other images more than the correct one!\")\n", + "# else:\n", + "# print(\"\\n✅ Model correctly ranked the correct image highest (despite failure elsewhere)\")\n", + "\n", + "# plt.tight_layout()\n", + "# plt.show()\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4cf75fc6-126d-4ba7-9034-8d10b4015203", + "metadata": {}, + "outputs": [], + "source": [ + "mst_pairs[:5]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e490d576-26a0-406d-9c90-8bc1d963e02c", + "metadata": {}, + "outputs": [], + "source": [ + "pairs[0]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e6320f3c-f034-4414-ac5d-54657b5c80a2", + "metadata": {}, + "outputs": [], + "source": [ + "# images[image_idx[pairs[0][0]]].shape" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "84d63bf0-8ac4-406e-9562-49d784adf0bf", + "metadata": {}, + "outputs": [], + "source": [ + "ix = 0\n", + "display(utils.torch_to_Image(images[pairs[ix][0]]))\n", + "display(utils.torch_to_Image(images[pairs[ix][1]]))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "fbc97bcf-5f2c-4666-b5d4-e075704b327a", + "metadata": {}, + "outputs": [], + "source": [ + "# print(np.allclose(embed_A[0], embed_A[1])) # across repeats\n", + "# print(np.allclose(embed_A[0], embed_B[0])) # across pairmates" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "94c5db52-0bcf-4be5-9a35-5710f4738250", + "metadata": {}, + "outputs": [], + "source": [ + "# def generate_random_nonmatching_pairs(pairs, num_images_per_source=5, num_repeats=2):\n", + "# n_imgs, n_pairmates, n_repeats = pairs.shape\n", + "# nonmatch_pairs = []\n", + "\n", + "# for i in range(n_imgs):\n", + "# other_idxs = [j for j in range(n_imgs) if j != i]\n", + "# sampled_j = np.random.choice(other_idxs, size=num_images_per_source, replace=False)\n", + "\n", + "# for j in sampled_j:\n", + "# for _ in range(num_repeats):\n", + "# a_side = np.random.randint(2)\n", + "# b_side = np.random.randint(2)\n", + "# a_repeat = np.random.randint(n_repeats)\n", + "# b_repeat = np.random.randint(n_repeats)\n", + "\n", + "# pair_a = pairs[i, a_side, a_repeat]\n", + "# pair_b = pairs[j, b_side, b_repeat]\n", + "# nonmatch_pairs.append([pair_a, pair_b])\n", + "\n", + "# return np.array(nonmatch_pairs)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "095b1dd8-7463-4095-bf0c-6c6ce702f711", + "metadata": {}, + "outputs": [], + "source": [ + "# nonmatch_pairs = generate_random_nonmatching_pairs(pairs, num_images_per_source=5, num_repeats=1)\n", + "# results = evaluate_mst_pairs(nonmatch_pairs)\n", + "# print(results)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "35f91159-eab1-4f9d-ba75-a380e3a4111b", + "metadata": {}, + "outputs": [], + "source": [ + "# # Compare first few pairs\n", + "# for pair in pairs: # Checking first 2 pairs\n", + "# print(\"Indices in mst_pairs:\", pair)\n", + "# print(\"Corresponding filenames:\")\n", + "# print(f\"Image 1: {x[pair[0]]}\")\n", + "# print(f\"Image 2: {x[pair[1]]}\\n\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "922145d0-9e71-4c05-a76d-36009f193926", + "metadata": {}, + "outputs": [], + "source": [ + "# for i in range(len(pairs)):\n", + "# fig, ax = plt.subplots(1, 2, figsize=(10,8))\n", + "\n", + "# ax[0].imshow(images[pairs[i][0]].permute(1,2,0).numpy())\n", + "# ax[0].set_title(f\"Repeat 1\")\n", + "\n", + "# ax[1].imshow(images[pairs[i][1]].permute(1,2,0).numpy())\n", + "# ax[1].set_title(f\"Repeat 2\")\n", + "\n", + "# plt.setp(ax, xticks=[], yticks=[])\n", + "# plt.tight_layout()\n", + "# plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e97de7b2-2a32-4ce7-9533-aa8e06946cfe", + "metadata": {}, + "outputs": [], + "source": [ + "# score = 0\n", + "# total = 0\n", + "# with torch.no_grad(), torch.cuda.amp.autocast(dtype=data_type): \n", + "# for pair in unique_images_pairs:\n", + "# imageA_idx, imageB_idx = pair\n", + "# imageA_idx = np.where(image_idx == imageA_idx)[0].item()\n", + "# imageB_idx = np.where(image_idx == imageB_idx)[0].item()\n", + " \n", + "# voxel = vox[imageA_idx].to(device)[None]\n", + "# voxel = torch.Tensor(voxel).unsqueeze(1).to(device)\n", + " \n", + "# imageA = images[imageA_idx].to(device)[None]\n", + "# imageB = images[imageB_idx].to(device)[None]\n", + "\n", + "# clip_targetA = clip_img_embedder(imageA.float())\n", + "# clip_targetB = clip_img_embedder(imageB.float())\n", + " \n", + "# voxel_ridge = model.ridge(voxel,0)\n", + "# backbone, clip_voxels, blurry_image_enc_ = model.backbone(voxel_ridge)\n", + "\n", + "# clip_voxels_norm = nn.functional.normalize(clip_voxels.flatten(1), dim=-1)\n", + "# clip_targetA_norm = nn.functional.normalize(clip_targetA.flatten(1), dim=-1)\n", + "# clip_targetB_norm = nn.functional.normalize(clip_targetB.flatten(1), dim=-1)\n", + "\n", + "# cossimA = utils.batchwise_cosine_similarity(clip_voxels_norm, clip_targetA_norm)\n", + "# cossimB = utils.batchwise_cosine_similarity(clip_voxels_norm, clip_targetB_norm)\n", + " \n", + "# if cossimA > cossimB:\n", + "# score += 1\n", + "# total += 1\n", + " \n", + "# for pair in unique_images_pairs:\n", + "# imageA_idx, imageB_idx = pair\n", + "# imageA_idx = np.where(image_idx == imageA_idx)[0].item()\n", + "# imageB_idx = np.where(image_idx == imageB_idx)[0].item()\n", + " \n", + "# voxel = vox[imageB_idx].to(device)[None]\n", + "# voxel = torch.Tensor(voxel).unsqueeze(1).to(device)\n", + " \n", + "# imageA = images[imageA_idx].to(device)[None]\n", + "# imageB = images[imageB_idx].to(device)[None]\n", + "\n", + "# clip_targetA = clip_img_embedder(imageA.float())\n", + "# clip_targetB = clip_img_embedder(imageB.float())\n", + " \n", + "# voxel_ridge = model.ridge(voxel,0)\n", + "# backbone, clip_voxels, blurry_image_enc_ = model.backbone(voxel_ridge)\n", + "\n", + "# clip_voxels_norm = nn.functional.normalize(clip_voxels.flatten(1), dim=-1)\n", + "# clip_targetA_norm = nn.functional.normalize(clip_targetA.flatten(1), dim=-1)\n", + "# clip_targetB_norm = nn.functional.normalize(clip_targetB.flatten(1), dim=-1)\n", + "\n", + "# cossimA = utils.batchwise_cosine_similarity(clip_voxels_norm, clip_targetA_norm)\n", + "# cossimB = utils.batchwise_cosine_similarity(clip_voxels_norm, clip_targetB_norm)\n", + " \n", + "# if cossimB > cossimA:\n", + "# score += 1\n", + "# total += 1\n", + "\n", + "# print(score/total)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ee4bd720-864a-480a-818e-e7e61c8ad429", + "metadata": {}, + "outputs": [], + "source": [ + "#display(utils.torch_to_Image(imageA))\n", + "#display(utils.torch_to_Image(imageB))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b0b54c3d-7544-4249-a0da-c4ae90b5172a", + "metadata": {}, + "outputs": [], + "source": [ + "# from scipy.stats import binomtest\n", + "\n", + "# total_samples = len(np.array(unique_images_pairs).flatten())\n", + "# assert total_samples == 100\n", + "\n", + "# correct_predictions = int((score/total) * total_samples) # calculate the number of correct predictions\n", + "# expected_accuracy = 0.5 # expected accuracy under the null hypothesis\n", + "\n", + "# # Perform the binomial test\n", + "# binom_stats = binomtest(correct_predictions, total_samples, expected_accuracy, alternative='greater')\n", + "# p_value = binom_stats.pvalue\n", + "\n", + "# # Output the result\n", + "# print(f\"P-value: {p_value}\")\n", + "# if p_value < 0.05:\n", + "# print(\"The decoder's accuracy is significantly better than chance.\")\n", + "# else:\n", + "# print(\"The decoder's accuracy is not significantly better than chance.\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "68473420-d978-4d4d-ba51-90e02e62741b", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "rt_mindEye2 [~/.conda/envs/rt_mindEye2/]", + "language": "python", + "name": "conda_rt_mindeye2" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.7" + }, + "toc": { + "base_numbering": 1, + "nav_menu": {}, + "number_sections": true, + "sideBar": true, + "skip_h1_title": false, + "title_cell": "Table of Contents", + "title_sidebar": "Contents", + "toc_cell": false, + "toc_position": { + "height": "calc(100% - 180px)", + "left": "10px", + "top": "150px", + "width": "165px" + }, + "toc_section_display": true, + "toc_window_display": true + }, + "toc-autonumbering": true, + "vscode": { + "interpreter": { + "hash": "62aae01ef0cf7b6af841ab1c8ce59175c4332e693ab3d00bc32ceffb78a35376" + } + } + }, + "nbformat": 4, + "nbformat_minor": 5 +}