{ "cells": [ { "cell_type": "code", "execution_count": 1, "id": "f16c9d4c-66cb-4692-a61d-9aa86a8765d0", "metadata": { "tags": [] }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "importing modules\n" ] } ], "source": [ "print(\"importing modules\")\n", "import os\n", "import sys\n", "import json\n", "import argparse\n", "import numpy as np\n", "import time\n", "import random\n", "import string\n", "import h5py\n", "from tqdm import tqdm\n", "import webdataset as wds\n", "from PIL import Image\n", "import pandas as pd\n", "import nibabel as nib\n", "import nilearn\n", "\n", "import matplotlib.pyplot as plt\n", "import torch\n", "import torch.nn as nn\n", "from torchvision import transforms\n", "\n", "# tf32 data type is faster than standard float32\n", "torch.backends.cuda.matmul.allow_tf32 = True\n", "\n", "import utils\n", "from utils import load_preprocess_betas, resample, applyxfm, apply_thresh, resample_betas\n", "\n", "# this block imports utils from mindeye_preproc as \"preproc\"\n", "import importlib.util\n", "parent_utils_path = \"/home/ri4541/mindeye_preproc/analysis/utils.py\"\n", "spec = importlib.util.spec_from_file_location(\"utils\", parent_utils_path)\n", "preproc = importlib.util.module_from_spec(spec)\n", "parent_dir = os.path.dirname(parent_utils_path)\n", "if parent_dir not in sys.path:\n", " sys.path.append(parent_dir)\n", "spec.loader.exec_module(preproc)\n", "\n", "if utils.is_interactive():\n", " from IPython.display import clear_output # function to clear print outputs in cell\n", " %load_ext autoreload \n", " # this allows you to change functions in models.py or utils.py and have this notebook automatically update with your revisions\n", " %autoreload 2 " ] }, { "cell_type": "code", "execution_count": 2, "id": "33a4a539-7c94-4447-b3a4-9208c6af7920", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "LOCAL RANK 0\n", "device: cuda\n" ] } ], "source": [ "from accelerate import Accelerator, DeepSpeedPlugin\n", "from generative_models.sgm.models.diffusion import DiffusionEngine\n", "from omegaconf import OmegaConf\n", "\n", "import os\n", "### Multi-GPU config ###\n", "local_rank = os.getenv('RANK')\n", "if local_rank is None: \n", " local_rank = 0\n", "else:\n", " local_rank = int(local_rank)\n", "print(\"LOCAL RANK \", local_rank) \n", "\n", "accelerator = Accelerator(split_batches=False, mixed_precision=\"fp16\")\n", "device = accelerator.device\n", "print(\"device:\",device)" ] }, { "cell_type": "markdown", "id": "7d2d8de1-d0ca-4b5f-84d8-2560f0399a5a", "metadata": {}, "source": [ "# Data" ] }, { "cell_type": "markdown", "id": "84c47b5b-869f-468c-bb93-43610ee5dbe0", "metadata": {}, "source": [ "## New Design" ] }, { "cell_type": "code", "execution_count": 3, "id": "69037852-cdbd-4eac-a720-3fca5dc48a61", "metadata": {}, "outputs": [], "source": [ "if utils.is_interactive():\n", " sub = \"sub-005\"\n", " session = \"ses-03\"\n", " task = 'C' # 'study' or 'A'; used to search for functional run in bids format\n", "else:\n", " sub = os.environ[\"sub\"]\n", " session = os.environ[\"session\"]\n", " task = os.environ[\"task\"]\n", "\n", "if session == \"all\":\n", " ses_list = [\"ses-01\", \"ses-02\"] # list of actual session IDs\n", " design_ses_list = [\"ses-01\", \"ses-02\"] # list of session IDs to search for design matrix\n", "else:\n", " ses_list = [session]\n", " design_ses_list = [session]\n", " \n", "task_name = f\"_task-{task}\" if task != 'study' else ''\n", "resample_voxel_size = False\n", "resample_post_glmsingle = False # do you want to do voxel resampling here? if resample_voxel_size = True and resample_post_glmsingle = False, assume the resampling has been done prior to GLMsingle, so just use resampled directory but otherwise proceed as normal\n", "load_from_resampled_file = False # do you want to load resampled data from file? if True, assume resampling was done in this notebook before, and that we're not using the GLMsingle resampled data\n", " \n", "train_test_split = 'MST' # 'MST', 'orig', 'unique'\n", "remove_close_to_MST = False\n", "remove_random_n = False\n", "\n", "if remove_close_to_MST or remove_random_n:\n", " assert remove_close_to_MST != remove_random_n # don't remove both sets of images\n", "\n", "n_to_remove = 0\n", "if remove_random_n:\n", " assert train_test_split == 'MST' # MST images are excluded from the n images removed, so only makes sense if they're not in the training set\n", " n_to_remove = 150\n", " \n", "if resample_voxel_size:\n", " # voxel size was unchanged in glmsingle, want to perform resampling here\n", " resampled_vox_size = 2.5\n", " resample_method = \"sinc\" # {trilinear,nearestneighbour,sinc,spline}, credit: https://johnmuschelli.com/fslr/reference/flirt.help.html\n", " \n", " # file name helper variables\n", " vox_dim_str = str(resampled_vox_size).replace('.', '_') # in case the voxel size has a decimal, replace with an underscore\n", " resampled_suffix = f\"resampled_{vox_dim_str}mm_{resample_method}\"\n", " mask_resampled_suffix = resampled_suffix\n", " if resample_post_glmsingle:\n", " resampled_suffix += '_postglmsingle'\n", " else:\n", " resampled_suffix += '_preglmsingle'" ] }, { "cell_type": "code", "execution_count": 4, "id": "2ece766e-4272-4ca3-81e9-9ea5dccd2279", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "session label: ses-03\n" ] } ], "source": [ "session_label = preproc.get_session_label(ses_list)\n", "print('session label:', session_label)\n", "n_runs, _ = preproc.get_runs_per_session(sub, session, ses_list)" ] }, { "cell_type": "code", "execution_count": 6, "id": "e52985b1-95ff-487b-8b2d-cc1ad1c190b8", "metadata": { "tags": [] }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "model_name: sub-005_all_task-C_bs24_MST_rishab_MSTsplit_union_mask_finetune_0\n", "glmsingle_path: /scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/glmsingle_sub-005_ses-03_task-C\n", "glmsingle path exists!\n", "--data_path=/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2 --glmsingle_path=/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/glmsingle_sub-005_ses-03_task-C --model_name=sub-005_all_task-C_bs24_MST_rishab_MSTsplit_union_mask_finetune_0 --subj=1 --no-blurry_recon --use_prior --hidden_dim=1024 --n_blocks=4\n", "The autoreload extension is already loaded. To reload it, use:\n", " %reload_ext autoreload\n" ] } ], "source": [ "# if running this interactively, can specify jupyter_args here for argparser to use\n", "if utils.is_interactive():\n", " # model_name=f\"{sub}_{session}_task-{task}_bs24_MST_rishab_{train_test_split}split\"\n", " model_name = \"sub-005_all_task-C_bs24_MST_rishab_MSTsplit_union_mask_finetune_0\"\n", " print(\"model_name:\", model_name)\n", " glmsingle_path = f\"/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/glmsingle_{sub}_{session_label}_task-{task}\"\n", " print(\"glmsingle_path:\", glmsingle_path)\n", " assert os.path.exists(glmsingle_path)\n", " print(\"glmsingle path exists!\")\n", " # global_batch_size and batch_size should already be defined in the above cells\n", " # other variables can be specified in the following string:\n", " jupyter_args = f\"--data_path=/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2 \\\n", " --glmsingle_path={glmsingle_path} \\\n", " --model_name={model_name} --subj=1 \\\n", " --no-blurry_recon --use_prior \\\n", " --hidden_dim=1024 --n_blocks=4\"\n", " \n", " print(jupyter_args)\n", " jupyter_args = jupyter_args.split()\n", " \n", " from IPython.display import clear_output # function to clear print outputs in cell\n", " %load_ext autoreload \n", " # this allows you to change functions in models.py or utils.py and have this notebook automatically update with your revisions\n", " %autoreload 2 " ] }, { "cell_type": "code", "execution_count": 7, "id": "49e5dae4-606d-4dc6-b420-df9e4c14737e", "metadata": { "tags": [] }, "outputs": [], "source": [ "parser = argparse.ArgumentParser(description=\"Model Training Configuration\")\n", "parser.add_argument(\n", " \"--model_name\", type=str, default=\"testing\",\n", " help=\"will load ckpt for model found in ../train_logs/model_name\",\n", ")\n", "parser.add_argument(\n", " \"--data_path\", type=str, default=\"/weka/proj-fmri/shared/mindeyev2_dataset\",\n", " help=\"Path to where NSD data is stored / where to download it to\",\n", ")\n", "parser.add_argument(\n", " \"--subj\",type=int, default=1, choices=[1,2,3,4,5,6,7,8],\n", " help=\"Validate on which subject?\",\n", ")\n", "parser.add_argument(\n", " \"--blurry_recon\",action=argparse.BooleanOptionalAction,default=True,\n", ")\n", "parser.add_argument(\n", " \"--use_prior\",action=argparse.BooleanOptionalAction,default=False,\n", " help=\"whether to train diffusion prior (True) or just rely on retrieval part of the pipeline (False)\",\n", ")\n", "parser.add_argument(\n", " \"--clip_scale\",type=float,default=1.,\n", ")\n", "parser.add_argument(\n", " \"--n_blocks\",type=int,default=4,\n", ")\n", "parser.add_argument(\n", " \"--hidden_dim\",type=int,default=2048,\n", ")\n", "parser.add_argument(\n", " \"--new_test\",action=argparse.BooleanOptionalAction,default=True,\n", ")\n", "parser.add_argument(\n", " \"--seq_len\",type=int,default=1,\n", ")\n", "parser.add_argument(\n", " \"--seed\",type=int,default=42,\n", ")\n", "parser.add_argument(\n", " \"--glmsingle_path\",type=str,default=\"/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/glmsingle_ses-01\",\n", ")\n", "if utils.is_interactive():\n", " args = parser.parse_args(jupyter_args)\n", "else:\n", " args = parser.parse_args()\n", "\n", "# create global variables without the args prefix\n", "for attribute_name in vars(args).keys():\n", " globals()[attribute_name] = getattr(args, attribute_name)\n", " \n", "# make output directory\n", "# os.makedirs(\"evals\",exist_ok=True)\n", "# os.makedirs(f\"evals/{model_name}\",exist_ok=True)" ] }, { "cell_type": "code", "execution_count": 8, "id": "34c1e0c6-0641-4239-8201-f2c676532302", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "csv/sub-005_ses-03.csv\n", "(785, 126)\n", "len_unique_images 532\n", "n_runs 11\n", "['all_stimuli/unchosen_nsd_1000_images/unchosen_7211_cocoid_59250.png'\n", " 'all_stimuli/special515/special_67295.jpg'\n", " 'all_stimuli/unchosen_nsd_1000_images/unchosen_5729_cocoid_53029.png'\n", " 'all_stimuli/special515/special_70232.jpg']\n", "[174.7109683 178.7049172 182.7072832 186.7297016]\n", "[0. 0. 0. 0.]\n", "(693,)\n" ] } ], "source": [ "if session == \"all\":\n", " filename = f\"csv/{sub}_{ses_list[0]}.csv\"\n", " data = pd.read_csv(filename)[14:]\n", " print(filename)\n", " print(data.shape)\n", " for s in ses_list[1:]:\n", " filename = f\"csv/{sub}_{s}.csv\"\n", " print(filename)\n", " data = pd.concat([data, pd.read_csv(filename)[14:]])\n", " print(data.shape)\n", "else:\n", " filename = f\"csv/{sub}_{session}.csv\"\n", " if sub == 'sub-001' and session == 'ses-01':\n", " data = pd.read_csv(filename)[23:]\n", " else: \n", " data = pd.read_csv(filename)[14:]\n", " print(filename)\n", " print(data.shape)\n", "\n", "image_names = data['current_image'].values\n", "starts = data['trial.started'].values\n", "is_new_run = data['is_new_run'].values\n", "\n", "if sub == 'sub-001':\n", " if session == 'ses-01':\n", " assert image_names[0] == 'images/image_686_seed_1.png'\n", " elif session in ('ses-02', 'all'):\n", " assert image_names[0] == 'all_stimuli/special515/special_40840.jpg'\n", " elif session == 'ses-03':\n", " assert image_names[0] == 'all_stimuli/special515/special_69839.jpg'\n", " elif session == 'ses-04':\n", " assert image_names[0] == 'all_stimuli/rtmindeye_stimuli/image_686_seed_1.png'\n", "elif sub == 'sub-003':\n", " assert image_names[0] == 'all_stimuli/rtmindeye_stimuli/image_686_seed_1.png'\n", "\n", "unique_images = np.unique(image_names.astype(str))\n", "unique_images = unique_images[(unique_images!=\"nan\")]\n", "# unique_images = unique_images[(unique_images!=\"blank.jpg\")]\n", "len_unique_images = len(unique_images)\n", "print(\"len_unique_images\",len_unique_images)\n", "print(\"n_runs\",n_runs)\n", "\n", "if (sub == 'sub-001' and session == 'ses-04') or (sub == 'sub-003' and session == 'ses-01'):\n", " assert len(unique_images) == 851\n", "\n", "print(image_names[:4])\n", "print(starts[:4])\n", "print(is_new_run[:4])\n", "\n", "if remove_random_n:\n", " # want to remove 150 imgs\n", " # 100 special515 imgs are repeated 3x (300 total)\n", " # all other train imgs are only shown once (558 total)\n", " # of the 150, want to sample proportionally since we're cutting all repeats for special515\n", " # so take out 51 (17 unique) from special515 and 99 from rest = removing 150 total\n", " np.random.seed(seed)\n", " options_to_remove = [x for x in set(image_names) if str(x) != 'nan' and x != 'blank.jpg' and 'MST_pairs' not in x and 'special515' not in x and list(image_names).count(x)==1] # all the imgs that only appear once (this is O(N^2) b/c of count() within list comprehension but image_names is a relatively small list)\n", " options_to_remove_special515 = [x for x in set(image_names) if str(x) != 'nan' and x != 'blank.jpg' and 'MST_pairs' not in x and 'special515' in x and list(image_names).count(x)>1] # all the special515 images that are repeated (count()>1 necessary because there are special515 that are not repeated)\n", " imgs_to_remove = np.random.choice(options_to_remove, size=99, replace=False)\n", " imgs_to_remove = np.append(imgs_to_remove, np.random.choice(options_to_remove_special515, size=17, replace=False))\n", "\n", "image_idx = np.array([]) # contains the unique index of each presented image\n", "vox_image_names = np.array([]) # contains the names of the images corresponding to image_idx\n", "all_MST_images = dict()\n", "for i, im in enumerate(image_names):\n", " # skip if blank, nan\n", " if im == \"blank.jpg\":\n", " i+=1\n", " continue\n", " if str(im) == \"nan\":\n", " i+=1\n", " continue\n", " vox_image_names = np.append(vox_image_names, im)\n", " if remove_close_to_MST: # optionally skip close_to_MST images \n", " if \"closest_pairs\" in im:\n", " i+=1\n", " continue\n", " elif remove_random_n:\n", " if im in imgs_to_remove:\n", " i+=1\n", " continue\n", " \n", " image_idx_ = np.where(im==unique_images)[0].item()\n", " image_idx = np.append(image_idx, image_idx_)\n", " \n", " if (sub == 'sub-001' and session == 'ses-04') or (sub == 'sub-003' and session == 'ses-01'): # MST images are ones that matched these image titles\n", " import re\n", " if ('w_' in im or 'paired_image_' in im or re.match(r'all_stimuli/rtmindeye_stimuli/\\d{1,2}_\\d{1,3}\\.png$', im) or re.match(r'images/\\d{1,2}_\\d{1,3}\\.png$', im)): \n", " # the regexp here looks for **_***.png, allows 1-2 chars before underscore and 1-3 chars after it\n", " # print(im)\n", " all_MST_images[i] = im\n", " i+=1 \n", " elif 'MST' in im:\n", " all_MST_images[i] = im\n", " i+=1\n", " \n", "image_idx = torch.Tensor(image_idx).long()\n", "# for im in new_image_names[MST_images]:\n", "# assert 'MST_pairs' in im\n", "# assert len(all_MST_images) == 300\n", "\n", "unique_MST_images = np.unique(list(all_MST_images.values())) \n", "\n", "MST_ID = np.array([], dtype=int)\n", "if remove_close_to_MST:\n", " close_to_MST_idx = np.array([], dtype=int)\n", "if remove_random_n:\n", " random_n_idx = np.array([], dtype=int)\n", "\n", "vox_idx = np.array([], dtype=int)\n", "j=0 # this is a counter keeping track of the remove_random_n used later to index vox based on the removed images; unused otherwise\n", "for i, im in enumerate(image_names): # need unique_MST_images to be defined, so repeating the same loop structure\n", " # skip if blank, nan\n", " if im == \"blank.jpg\":\n", " i+=1\n", " continue\n", " if str(im) == \"nan\":\n", " i+=1\n", " continue\n", " if remove_close_to_MST: # optionally skip close_to_MST images \n", " if \"closest_pairs\" in im:\n", " close_to_MST_idx = np.append(close_to_MST_idx, i)\n", " i+=1\n", " continue\n", " if remove_random_n:\n", " if im in imgs_to_remove:\n", " vox_idx = np.append(vox_idx, j)\n", " i+=1\n", " j+=1\n", " continue\n", " j+=1\n", " curr = np.where(im == unique_MST_images)\n", " # print(curr)\n", " if curr[0].size == 0:\n", " MST_ID = np.append(MST_ID, np.array(len(unique_MST_images))) # add a value that should be out of range based on the for loop, will index it out later\n", " else:\n", " MST_ID = np.append(MST_ID, curr)\n", " \n", "assert len(MST_ID) == len(image_idx)\n", "# assert len(np.argwhere(pd.isna(data['current_image']))) + len(np.argwhere(data['current_image'] == 'blank.jpg')) + len(image_idx) == len(data)\n", "# MST_ID = torch.tensor(MST_ID[MST_ID != len(unique_MST_images)], dtype=torch.uint8) # torch.tensor (lowercase) allows dtype kwarg, Tensor (uppercase) is an alias for torch.FloatTensor\n", "print(MST_ID.shape)\n", "if (sub == 'sub-001' and session == 'ses-04') or (sub == 'sub-003' and session == 'ses-01'):\n", " assert len(all_MST_images) == 100" ] }, { "cell_type": "code", "execution_count": 9, "id": "dd08fa34-ebd0-482a-bc29-8fb32c8b888b", "metadata": {}, "outputs": [], "source": [ "# unique_images_pairs = [\n", "# (1,2),(3,4),(5,6),(7,8),(9,10),(11,12),(13,14),(15,16),\n", "# (17,18),(19,20),(21,22),(23,24),(25,26),(27,28),(29,30),\n", "# (31,32),(33,34),(35,36),\n", "# (787, 788), (789, 790), (791, 792), (793, 794), (795, 796),\n", "# (797, 798), (799, 800), (801, 802), (803, 804), (805, 806),\n", "# (807, 808), (809, 810), (811, 812), (813, 814), (815, 816),\n", "# (817, 818), (819, 820), (821, 822), (823, 824), (825, 826),\n", "# (827, 828), (829, 830), (831, 832), (833, 834), (835, 836),\n", "# (837, 838), (839, 840), (841, 842), (843, 844), (845, 846),\n", "# (847, 848), (849, 850)\n", "# ]\n", "# unique_images[unique_images_pairs]" ] }, { "cell_type": "code", "execution_count": 10, "id": "59bc3b21-e29d-4d2b-8223-cd704e3f058a", "metadata": { "tags": [] }, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ " 0%| | 1/693 [00:00<03:38, 3.16it/s]/home/ri4541/.conda/envs/rt_mindEye2/lib/python3.11/site-packages/torchvision/transforms/functional.py:1603: UserWarning: The default value of the antialias parameter of all the resizing transforms (Resize(), RandomResizedCrop(), etc.) will change from None to True in v0.17, in order to be consistent across the PIL and Tensor backends. To suppress this warning, directly pass antialias=True (recommended, future default), antialias=None (current default, which means False for Tensors and True for PIL), or antialias=False (only works on Tensors - PIL will still use antialiasing). This also applies if you are using the inference transforms from the models weights: update the call to weights.transforms(antialias=True).\n", " warnings.warn(\n", "100%|██████████| 693/693 [00:33<00:00, 20.78it/s]" ] }, { "name": "stdout", "output_type": "stream", "text": [ "images torch.Size([693, 3, 224, 224])\n", "MST_images 693\n", "MST_images==True 124\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "\n" ] } ], "source": [ "import imageio.v2 as imageio\n", "resize_transform = transforms.Resize((224, 224))\n", "MST_images = []\n", "images = None\n", "for im_name in tqdm(image_idx):\n", " if sub == 'sub-001' and session == 'ses-01':\n", " image_file = f\"all_stimuli/rtmindeye_stimuli/{unique_images[im_name]}\"\n", " else:\n", " image_file = f\"{unique_images[im_name]}\"\n", " im = imageio.imread(image_file)\n", " im = torch.Tensor(im / 255).permute(2,0,1)\n", " im = resize_transform(im.unsqueeze(0))\n", " if images is None:\n", " images = im\n", " else:\n", " images = torch.vstack((images, im))\n", " if (sub == 'sub-001' and session == 'ses-04') or (sub == 'sub-003' and session == 'ses-01'):\n", " if ('w_' in image_file or 'paired_image_' in image_file or re.match(r'all_stimuli/rtmindeye_stimuli/\\d{1,2}_\\d{1,3}\\.png$', image_file) or re.match(r'all_stimuli/rtmindeye_stimuli/images/\\d{1,2}_\\d{1,3}\\.png$', image_file)): \n", " MST_images.append(True)\n", " else:\n", " MST_images.append(False)\n", " else: \n", " if (\"MST_pairs\" in image_file): # (\"_seed_\" not in unique_images[im_name]) and (unique_images[im_name] != \"blank.jpg\") \n", " MST_images.append(True)\n", " else:\n", " MST_images.append(False)\n", "\n", "print(\"images\", images.shape)\n", "MST_images = np.array(MST_images)\n", "print(\"MST_images\", len(MST_images))\n", "if (sub == 'sub-001' and session == 'ses-04') or (sub == 'sub-003' and session == 'ses-01'):\n", " assert len(MST_images[MST_images==True]) == 100\n", "print(\"MST_images==True\", len(MST_images[MST_images==True]))" ] }, { "cell_type": "code", "execution_count": 11, "id": "6f440a02-dd8a-4a13-9c90-bd07253f6910", "metadata": {}, "outputs": [], "source": [ "pairs = utils.find_paired_indices(image_idx)\n", "pairs = sorted(pairs, key=lambda x: x[0])" ] }, { "cell_type": "code", "execution_count": 12, "id": "c5f61515-d4fa-419b-b945-cdedc8f24669", "metadata": { "tags": [] }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "vox (693, 1, 1, 183408)\n", "vox (693, 183408)\n" ] } ], "source": [ "vox = None\n", "needs_postprocessing = False\n", "params = (session, ses_list, remove_close_to_MST, image_names, remove_random_n, vox_idx)\n", "\n", "if resample_post_glmsingle == True:\n", " glm_save_path_resampled = f\"{glmsingle_path}/vox_resampled.nii.gz\"\n", " if load_from_resampled_file == True:\n", " # resampling was done in this notebook so we can load from file\n", " vox = nib.load(glm_save_path_resampled)\n", " else:\n", " # do resampling here\n", " assert os.path.exists(ref_name) and os.path.exists(omat_name), \"need to generate the boldref and omat separately since we don't have access to the functional data here; either do so using flirt on the command line or copy over the glmsingle resampled outputs\"\n", " vox = load_preprocess_betas(orig_glmsingle_path, *params)\n", " vox = resample_betas(orig_glmsingle_path, sub, session, task_name, vox, glmsingle_path, glm_save_path_resampled, ref_name, omat_name)\n", " needs_postprocessing = True\n", "\n", "if vox is None:\n", " # either resampling was done in glmsingle or we aren't resampling \n", " vox = load_preprocess_betas(glmsingle_path, *params)\n", "\n", "if needs_postprocessing == True:\n", " vox = apply_mask(vox, avg_mask)\n", " vox = vox.reshape(-1, vox.shape[-1]) # flatten the 3D image into np array with shape (voxels, images)\n", " print(vox.shape)\n", "\n", "assert len(vox) == len(image_idx)" ] }, { "cell_type": "code", "execution_count": 13, "id": "a4675ba2-b27c-48db-893c-d81f978ba93b", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/glmsingle_sub-005_ses-03_task-C/sub-005_ses-03_task-C_brain.nii.gz\n", "Mask dimensions: (2.0, 2.0, 2.0)\n", "\n", "Affine:\n", "[[ 2. 0. 0. -76.29234314]\n", " [ 0. 2. 0. -84.79180908]\n", " [ 0. 0. 2. -62.80359268]\n", " [ 0. 0. 0. 1. ]]\n", "\n", "There are 183408 voxels in the included brain mask\n", "\n" ] } ], "source": [ "from nilearn.plotting import plot_roi, plot_anat, plot_epi\n", "\n", "mask_name = f'{glmsingle_path}/{sub}_{session_label}{task_name}_brain'\n", "if resample_voxel_size:\n", " if resample_post_glmsingle is True:\n", " # use original mask directory\n", " mask_in_name = f'{orig_glmsingle_path}/{sub}_{session}{task_name}_brain.nii.gz'\n", " mask_out_name = mask_name + f\"_{mask_resampled_suffix}.nii.gz\"\n", " assert os.path.exists(mask_in_name)\n", " applyxfm(mask_in_name, ref_name, omat_name, resample_method, output=mask_out_name)\n", " apply_thresh(mask_out_name, 0.5, output=mask_out_name) # binarize the mask since resampling can result in non- 0 or 1 values\n", " mask_name += f\"_{mask_resampled_suffix}\"\n", "\n", "mask_name += \".nii.gz\"\n", "print(mask_name)\n", "avg_mask = nib.load(mask_name)\n", "# mask info\n", "dimsize=avg_mask.header.get_zooms()\n", "affine_mat = avg_mask.affine\n", "brain=avg_mask.get_fdata()\n", "xyz=brain.shape #xyz dimensionality of brain mask and epi data\n", "\n", "print('Mask dimensions:', dimsize)\n", "print('')\n", "print('Affine:')\n", "print(affine_mat)\n", "print('')\n", "print(f'There are {int(np.sum(brain))} voxels in the included brain mask\\n')" ] }, { "cell_type": "code", "execution_count": 14, "id": "8a5573cf-19b5-40e6-b21c-883e762f5f35", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/glmsingle_sub-005_ses-03_task-C/sub-005_ses-03_task-C_nsdgeneral.nii.gz\n", "nsdgeneral path exists!\n" ] } ], "source": [ "nsdgeneral_path = f'{glmsingle_path}/{sub}_{session_label}{task_name}_nsdgeneral.nii.gz' \n", "print(nsdgeneral_path)\n", "assert os.path.exists(nsdgeneral_path)\n", "print(f\"nsdgeneral path exists!\")" ] }, { "cell_type": "code", "execution_count": 15, "id": "b940e5dc-ac25-4f48-9764-6030cf18ff1e", "metadata": {}, "outputs": [], "source": [ "if resample_voxel_size:\n", " nsdgeneral_path = f'{glmsingle_path}/{sub}_task-{task}_nsdgeneral_resampled.nii.gz' \n", " if resample_post_glmsingle:\n", " assert os.path.exists(orig_glmsingle_path)\n", " roi_in_path = f\"{orig_glmsingle_path}/{sub}_task-{task}_nsdgeneral.nii.gz\" # the input file is the original nsdgeneral mask (without resampling), from the original glmsingle directory\n", " applyxfm(roi_in_path, ref_name, omat_name, resample_method, output=nsdgeneral_path)" ] }, { "cell_type": "code", "execution_count": 16, "id": "a3187c14-13df-4e51-915c-bb866eec413f", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "(76, 90, 74)\n" ] }, { "data": { "image/png": "iVBORw0KGgoAAAANSUhEUgAAAqgAAAFyCAYAAAA59SiIAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjguMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8g+/7EAAAACXBIWXMAAA9hAAAPYQGoP6dpAAAek0lEQVR4nO3dfYxU9bkH8AcUHQqKBlxXoAU0Xo2CYn3DdqNNb1utbSyxNl6LLYgmjWijsdj6RjE12lrTGPpmrVotVVODUkwt2FRaX9aKSCuK1pfaCxFEXLFoRV2L7t4/vLsOZdedGWbm/M7M55NMMswMZ35z9pzZ7z7P+Z0zKCK6AwAAEjE46wEAAEAxARUAgKQIqAAAJEVABQAgKQIqAABJEVABAEiKgAoAQFIEVAAAkiKgAgCQFAEVAICkCKgAACRFQAUAICkCKgAASRFQAQBIioAKAEBSBFQAAJIioAIAkBQBFQCApAioAAAkRUAFACApAioAAEkRUAEASIqACgBAUgRUAACSIqACAJAUARUAgKQIqAAAJEVABQAgKQIqAABJEVABAEiKgAoAQFIEVAAAkiKgAgCQFAEVAICkCKgAACRFQAUAICkCKgAASRFQAQBIioAKAEBSBFQAAJIioAIAkBQBFQCApAioAAAkRUAFACApAioAAEkRUAEASIqACgBAUgRUAACSIqACAJAUARUAgKQIqAAAJEVABQAgKQIqAABJEVABAEiKgAoAQFIEVAAAkiKgAgCQFAEVAICkCKgAACRFQAUAICkCKgAASRFQAQBIioAKAEBSBFQAAJIioAIAkBQBFQCApAioAAAkRUAFACApAioAAEkRUAEASIqACgBAUgRUAKApFAqFKBQKWQ+DEgioAEDDKxQK0d7eHu3t7UJqDgioAAAkRUAFgERMnz49uru749BDD816KDSwnu2s57Zly5ZYt25d3HjjjTF69OishxcRETtmPQAAAOpvzpw5sXr16igUCjFlypSYMWNGtLW1xcSJE+Ptt9/OdGwCKgBAE1qyZEn85S9/iYiIG264ITZu3BgXXHBBnHDCCbFgwYJMx6bFDwBAPPDAAxERsc8++2Q8EgEVAICIGD9+fEREbNq0KduBhBY/AEBTGjFiRIwcOTIKhUIceeSRMXfu3Ojs7Iy77ror66EJqAAAzWjp0qVb/Xv16tVx6qmnxgsvvJDRiN4noAIANKFZs2bFs88+GyNGjIiZM2fG0Ucfnfns/R4CKgBAE1q+fHnvLP5FixZFe3t73HrrrbHffvvFG2+8kenYTJICAGhyXV1dceGFF8aYMWPi7LPPzno4AioAABH33XdfPPzww3HuuefGzjvvnOlYtPgBIDEzZ86M4447bpvH582bF5s3b85gRDSLq666Km6//faYMWNGXHvttZmNQ0AFgMTMmjWrz8dvuukmAZWaWrhwYTz33HMxe/bsuO6666KrqyuTcQyKiO5M3hkAoE4KhUK0t7dHRERbW1t0dnZmPCI+iAoqAJCkxYsXV21ZJ554YtWWRe2ZJAUAQFIEVAAAkuIY1AZXKBQiIhxrw3azLQGlqGZbvtpaWloiIqKjo6Nqyzz++OOrtizep4LawHoOCG9vb+8NF1AJ2xIA9SSgAgCQlMxn8U+fPj1uuummOOyww3qvBwvV1rOd9XjnnXfipZdeij/84Q9x8cUXx/r167MbXBNLtRWoZQelSXUfrqdS1oHvlPJlHlChnubMmROrV6+OQqEQU6ZMiRkzZkRbW1tMnDgx3n777ayHBwCEgEqTWbJkSW+l/oYbboiNGzfGBRdcECeccEIsWLAg49EBABECKk3ugQceiAsuuCD22WefrIfS8PLUCszTWKtB+5FyNNv+UQ0DrTP74LZMkqKpjR8/PiIiNm3alO1AAIBeKqg0lREjRsTIkSOjUCjEkUceGXPnzo3Ozs646667sh5arqmo5JtJHvTFfl0/fa3rZt/nBFSaytKlS7f69+rVq+PUU0+NF154IaMRAQD/SUClqcyaNSueffbZGDFiRMycOTOOPvpos/cBIDECKk1l+fLlvbP4Fy1aFO3t7XHrrbfGfvvtF2+88UbGo8sfLcDm0fOzbva2Y57YP/Ot+OfXjPudSVI0ra6urrjwwgtjzJgxcfbZZ2c9HADg/wmoNLX77rsvHn744Tj33HNj5513zno4AEAk1OKfOXNmHHfccds8Pm/evNi8eXMGI6JZXHXVVXH77bfHjBkz4tprr816OMlbuHBh1kMgQ/21jZuxBZkibf3G1Iyz/JMJqLNmzerz8ZtuuklApaYWLlwYzz33XMyePTuuu+666OrqynpIANDUBkVEd9aDoDYKhUK0t7dHRERbW1t0dnZmPCLyqnhb6ujoyHg0pKjRqzl5oYL6wVpaWiKi8b7HGnH/S6aCyntqdcLsStuyjbjRA9XX7DOOgeoySQoAgKRo8WeoHq2YWrQzVEeaz+LFixu2NUZ9+N6oLa390jTD91ij7GsqqAAAJEVABQAgKVr8dZBl66XW7YxGaSXwvv6212ZojZEG3yul0dYvX7N9j+V5X1JBBQAgKQIqAABJcR7UKmu2lstAnzfP7QUgG86pCtXRsy/lcT9SQQUAICkqqBVqtkpppVRC8sH2TKryXAECKqeCCgBAUgRUAACS4jyoJchz+zMP53zTustGudt1HrYlmk+zfX/k+fdRCnyP5WefUUEFACApAioAAEnR4i/SiK2TPLUz8tJ2yLtKt/M8bUs0p2b4DmnE31P15HtsaynvMyqoAAAkRUAFACApTtQfWiapcFJ/AKiflH/vqqACAJCUpq2gqpqmLeW/6vLI9k4zcFlUaBwqqAAAJEVABQAgKU3V4tfmrL6PTvpon4//ddVfq/Ye2naVsb1DY7AvUw+p/a5VQQUAICkCKgAASWmKFr/2SHX119Yv5zWVHAJgZv/AbOvQ/37gewPyQwUVAICkCKgAACSlYVv8Wp2VK6WFX8/36OtwAO3+rdneofHYr8lCKr9fVVABAEhKQ1VQU/1rs7haWM3zg1ZTPaqmtZLKX3tA2nxXQH6ooAIAkBQBFQCApOS+xZ9CW7+cy32m2u4vHkut2v31eA+AUqR2WcceKfxOgx5ZHhajggoAQFIEVAAAkpLbFn9KbZD+WtcDtbGbud1fzvv23C9lHaXatgMASqeCCgBAUgRUAACSktsWP/VRzXZ/OYc8bK9mOCF3Soe5AEA1qaACAJCUXFVQU6oYNeN5PHuqqfX47ClNGgOopZR+t0F/6t2ZVEEFACApAioAAEnJVYu/nrSxAQCyoYIKAEBSBFQAAJKSfIu/kWc3pnqp04H0N9ZmPLNBvfW3P5Sz7vO0rUGjauTfbTS+elxWXAUVAICkJF9BbWSNVsmq5lWnGJh1DECjUkEFACApAioAAElJssWf1cHjtWqZNlorvx76mkBWys+nr3Vd78uzVVvx+LX1AWgGKqgAACRFQAUAIClJtvizYhZ6Oqr5s8hjW7+W+lqfDkOB+nD+UyiNCioAAEkRUAEASIoWPzVRzsz7vtT7/6WsET8TAPlXy7PkqKACAJAUFdQaytPEk77OO9ooNqzd0Hu/9cOtGY4EACiFCioAAEkRUAEASErTtvhr1dLOW3u8VhNwUp3Ys2jBot77U780NbNxAAD9U0EFACApAioAAElJvsVfjVZ8qe3mStvSeWvrN7M8tfVdEhFqxyWQIW0qqAAAJEVABQAgKcm3+IsN1O4vp0W/vW35PLf1U51hXyvLli3Legi50tf2keftHYD8UUEFACApSVZQiw9eL75MZTU1w4SoyRMnx/qO9b33Bw/y90ie1LPSPdB7lbMt5WkfAaA6iif2VmMSosQCAEBSBFQAAJKSZIu/FM020accPeumq7ur97GVT6ys6xi2dxIbQLU59ynkhwoqAABJEVABAEhKblv81dQIs45LmYVt5nX6Fi1YlPUQoKFo60M+qaACAJAUFVQiYuCrdDWK4grl1C9NzWwcjaZ4mxmomt/I2xdpUDWF/FNBBQAgKQIqAABJ0eKP91uSeWk95vV8ouW0gWtFW782SjlEJK/bLfmhtQ+NQwUVAICkCKgAACSl5BZ/oVCo5Tj6VXy5zlqp1WVAJ0+cXPLyy3ltzzopfm3P/+/rdf95vy8pXAq1r89QDf/zlf/pvZ/VdlyqQYMG9d6vx7ZfqnK2pf5+jnk5hIZ8OfHEE3vvb+/+3dnZub3D6dPixYtrslxoZIMioruUF65YsaLGQwGA7LS1tdUkpAqo6WhpaYmIiI6OjoxH0tiqcTy4Fj8AAEkpucXf1tZWy3H0a+HChb33a9UCrmZ7u1ZjrERXd1dseHlDRES07tH6gZc67W8dbO/nKefwhlrZ+7/2runyq+m2m2/b5rHDjzg8g5FsrZxtqVi9Dx2hORS39aupVi1+oHwlB9QUdtxSfylmKdUxDh40uKKx1ePz1Po9Uth2S9Xdve0RN6ltU5VuS1Atedqngcokfx7U4uMYNqzdkOFIttZo53Ss1efpb7kmzADlcI5TaC7KIAAAJEVABQAgKcm3+OuhrzZ0CpflbGS1XqcOIciOdU81ae1Dc1JBBQAgKQIqAABJSb7Fv2jBokzet7gFrd1PLWW1jVeTtj7bSysfKKaCCgBAUgRUAACSknyLH4DGpbUPjaHa+7IKKgAASUm+gjr1S1N77y9evLj3fj0nK+VhYpSJXFvLU1WmeBvva8JU64dbe++ndLnfCJOjqEye9k8gGyqoAAAkRUAFACApybf4ixW3hVJrdaakp+06eeLkbAdCVaR2ntSVT6zMegjkhFY+zaAah9Xl9XCpWu7jKqgAACRFQAUAICm5avEX66sc3myz1wdqCax8YmW0tLTUaTRpWLZsWdZD2C4DzejPSjNuS1ROa39rxeuj+Gw0edLf5b+pDut3WyqoAAAkJbcV1J6/SPP61+j2qOSvq+KJLX1Vmuv9F1uzVbsr0VNNLa6kFleIp0yZUtP391c8A1EpBWoltwEVAKgPf7Dmx0AFoLz8LLX4AQBIyqCI6M56ENWSt3Oj1uOvmJ6JLR0dHTV/r6w0U5uxv4lTtWj3/+f22QzbEv1rpv2sHprx8LQU1OJ7LKvzoFb7ULlKxuA8qAAANA0BFQCApOR+klR/M5yL1Xq2c3/yciAy+VHN86TaPumLVj6Upp5no0ntvVo/3FrzcaigAgCQFAEVAICk5L7FX9zyHEjxrMlqlsu1SutPG7Lvdv9Ow3bqfay/bdz2Sg/7EbXmEp6Va/YL2qigAgCQlNxXUCvlLzkaSV+dBOdZpIdKaXqKfyaNvK/6Xdu/1NbNQBXbekyMKqaCCgBAUgRUAACS0rQtfvJHm7I8zdJCxL4BNB4VVAAAkiKgAgCQFC1+aALa/Y1DOx/S0DMLv5zzlaY2c38g9Z65X0wFFQCApAioAAAkRYuf5GlpVlfP+tTqzw/7AAMZqM2ct9YyqKACAJCUpqqgmigC77M/pEellHKUMzmn+LXNVk3t+exd3V2xvmN9JmPI0zrPcmJUMRVUAACSIqACAJCUQRHRnfUgstbI7c2WlpaIiOjo6Mh4JOXR6sxOf/tDXrelPLC906Ov/a+cVn6l8tSCLkVf66y4xT+6ZXQMHvReja6vz17tdV7J+q3Hz71YzxhT+T5SQQUAICkCKgAASWmqWfzAwJwntT5SaaORluLtYsPaDXV7377ayY3W9u9PvVvpqUrtO0kFFQCApAioAAAkxSz+Io3Y0uyZed3W1hadnZ0Rke7nTK29wPsWL15sFn8V2MapRD1b/RH5be0P1KrvbxZ/NfW37nrGVsq6rechB6mclL8vKqgAACTFJKkGduKJJ0Z7e/s2j7vEJeXqb1tiYKqm5EFeq6YR2U1y6llnpbx/ntdvVlRQAQBIigoqAJSpUCjU7b26urvq9l55VOr6KX5dNddptX8+9fx513M77tEzH2YgJkn1oxFa38Vt2eJJUn0p5/NqWTafQqHQ57bUCPtJrdhPGtuKFSuyHgLk0mGHHVbS67T4AQBIihY/AJSpra0tk/f932f/t+bvsfKJlTVZ7uSJk0te/va+tuexYl3dXbHh5fdO2dW6R+sHnmaqVuugHH19hmrY+7/2rslyq01A7UcjtOfKObakET4v9dcI243DFKhEqcfRVdvoj4zuvV+r86PW6rKnPYGw2rPuK1nu4EGDa3Ie1IFkdcaBlM932h8tfgAAkqKCCjS1RqgCAzQaARUAcmjZsmW996dMmVLT9ypuTdfjpPPlnAS/Fqr9ebP6HMXbSN5o8QMAkBQVVACgZJVOoiqnKppVxbFHni9NWjz2qV+amt1AtpMKKgAASRFQAQBIihY/AORQX+3bWp0bdSDlTCrq7/ms2/rFKp0klcJnaJQzk6igAgCQlNwF1B133DGefPLJ6O7ujm984xtbPTd37tzo7u7u9/axj30so1FDbU2dOjXuvvvueOGFF6KzszPWrl0bCxYsiAMPPDDroQFA2XLX4v/6178eH/nIR/p8buHChfHcc89t8/gVV1wRw4cPj0ceeaTWw4NMTJo0KTZt2hTz5s2LjRs3Rmtra8ycOTOWL18eRx11VDz++ONZDxGog+JLWuah3Z8HA32eFNr6ebyU6UByFVD32GOP+Pa3vx1XXnllXHbZZds8v2rVqli1atVWj40dOzbGjh0b119/fWzZsqVeQ4W66mt/uP7662PdunVx5plnxplnnpnBqACgMmW1+D/xiU9Ed3d3TJ06dZvnTjnllOju7q7p1Sy+973vxTPPPBM333xzyf/nlFNOicGDB8ctt9xSs3HBQAqFQjz11FPx1FNPRaFQ6H189913j/Xr18eDDz4YgwdX94ibjo6OePPNN2O33Xar6nKBfGj9cGvvbdmyZb23evropI/23ip5PjXF48163MU/30ZUVgX13nvvjeeffz6mTZsWixYt2uq5adOmxXPPPRfLli2LnXbaKXbZZZeSlvnKK6+U9LrDDz88pk+fHm1tbdHd3V3ymKdNmxbPP/983H///SX/H6i2zs7OmD59ejz44INx+eWX9x4//ZOf/CRGjBgRM2bMiK6uru3ed0aMGBFDhgyJ1tbWOPfcc2PEiBGxdOnSqn4WAKi1slv8N998c5x33nmx6667xr/+9a+IiBg1alR85jOficsvvzwi3qta3nTTTSUtb9CgQSW97kc/+lHcdtttsWzZshg3blxJ/+eAAw6Igw8+OK688sqSXg+1tHz58vj+978f3/rWt+I3v/lN7LnnnnHKKafEOeecE3//+98jYvv3nWXLlsX+++8fERGvv/56XHbZZXHDDTdU7TMAQD2UHVDnz58fF110UZx00knxi1/8IiIiTj755BgyZEhv6/33v/99fOpTn6raIGfMmBGTJk2Kk046qaz/N23atIgI7X2Scemll8bnP//5+OUvfxnDhw+Pe++9N374wx/2Pr+9+85pp50Wu+66a+y9995x2mmnxdChQ2OHHXaId955pxrDB3Kqr3OmLlqwqPd+LQ/P+yDFk47y0ubPQvGhGXm+fGk5yg6ozzzzTCxfvjymTZvWG1CnTZsWDz30UPzjH/+IiIgNGzbEhg3lzR4cNmxYDB8+vPff7777bmzcuDF22WWX+O53vxtXXXVVrFu3rqxlfvnLX+5z4hRkZcuWLTFz5sxYsWJFvPXWW3Haaadt9Xwl+06x4i+xX//61/HUU09FRMT5559f8TIBoN4qmsU/f/78mDdvXowZMyZ23nnnOOqoo+Kss87qfb5QKMSIESNKWtZLL70UERGzZ8+OSy+9tPfxNWvWxIQJE2L27Nmx0047xW233dbb2h87dmxEvDfBZNy4cbF+/fptZuh//OMfj/Hjx8cFF1xQyUdsCJ2dndHW1tZ7nzQce+yxERExdOjQ2HfffWPNmjW9z1Wy7/Tn1VdfjT/+8Y8xbdq07Q6otiUA6mlQRJQ+4+j/jRw5MtavXx8XX3xxDB06NC655JIYPXp076SN6dOnl30c3YQJE2Lvvffuffytt96KP//5z3HjjTfGjBkzPnAZkydPjscee2yrx37605/G1772tRg/fnysXbu29A8HNTRp0qR45JFH4pZbbonJkyfHqFGjYtKkSb3Hc1ey73yQhQsXxrHHHhvDhg3bnmEDDS6Fdn+tdXV3xfqO9RERMbpldAwelPa1ihp1dn6pKgqoERGLFi2K8ePHR6FQiGeeeSa+8IUv9D7X2tpa8hVsBpphfMghh2xzYv6Wlpb4+c9/HjfeeGPceeed8ac//an3F3zEe1ebevHFF+Nvf/tbHHPMMWV8KqidHXfcMR5++OHYfffd46CDDooJEyb0htXTTz89Iirfd/bYY494+eWXt3p+3Lhx8fjjj8fKlSvtB8AHElDT0+wBteIT9c+fPz/uuOOOiIiYM2fOVs9t73F0xR599NF49NFHt3qsp9X/5JNPxp133rnN/zn22GNj1KhRJkeRlEsuuSQmT54c//3f/x2bN2+OVatWxXe+8524/PLL4/bbb48lS5ZUvO+sWrUqli5dGitXroxNmzbFvvvuG6effnoMGTKkqQ9zASCfKq6gDhkyJDZs2BCDBw+O1tbWePvtt6s8tP6NGzcu1qxZE7Nnz44f/OAH2zx/6623xhe/+MVobW2NTZs21W1c0J9DDjkkHn744bjmmmvinHPO6X188ODB8dBDD8WYMWPiwAMPjNdee62i5c+dOzc+97nPxT777BO77LJLdHR0xP333x9XXHFFPPHEE9X6GEATyepSqdXWc6aAyRMn56KC2uyV0x4VB9Qddtgh1q9fH7/97W/jjDPOqPKwAIAsCajZEFDfU3GLf+rUqdHS0hLz58+v5ngAgAQUB6U8h9We86t2dXdlPJKt1fuys3lTdkA94ogj4qCDDoo5c+bEX//6V5cQBQCgqsqub5955plxzTXXREdHR3z1q1+txZgAAGhiFR+DCgA0n7ydkiqFY1B72vnNcpnSakjzCGEAAJqWgAoAQFIqnsUPADSfvtrUeWv7V1PxbHwt/OpRQQUAICkCKgAkaP/9948lS5bE66+/Hq+88krMnz8/Ro0alfWwoC60+AEgMWPGjIn7778/Xnvttbjoooti+PDhMXv27Jg0aVIcccQRsWXLlqyHuJVSWtt5Ptl/j75Orq+tXxsCKgAk5qKLLophw4bFoYceGmvXro2IiOXLl8c999wTM2bMiOuuuy7jEUJtOQ8qAFRg3LhxsWbNmn6fHzRoUMXL3rBhQ9x3331x8sknb/X4008/HWvXro1Pf/rTFS87a8UTqio10ESsviYu3Xn7nTFm/JiIiHhhzQvR3V1+/FEtrR8VVACowMsvvxynnnrqVo8NGTIkrr766vj3v/8dERFDhw6ND33oQwMu6913341XX301IiJGjx4de+65Z6xYsWKb1y1fvjyOP/747R88JE5ABYAKvPnmm3HLLbds9diPf/zjGD58eG+F85vf/GZceumlAy5rzZo1MWHChIiI2GuvvSIi4sUXX9zmdS+++GKMHDkydtppp94QDI1IQAWAKvjKV74SZ511Vpx33nlx7733RkTE/Pnzo729fcD/+9Zbb/XeHzp0aEREvP3229u8rrOzs/c1eQ2o1WiTV3KYwBdO+kIUCoWIeH89ki4BFQC208EHHxw/+9nP4tZbb42rr7669/HVq1fH6tWry1pWT1jdeeedt3muJ2AVB1pKJ5jmh4AKANtht912izvuuCOeffbZOOOMM7Z6btiwYTF8+PABl/Huu+/Gxo0bI+L91n5Pq7/YXnvtFa+88kpuq6dQjm43Nzc3Nze38m+DBg3q/t3vfte9cePG7vHjx2/z/Ny5c7tLsXr16q3+30svvdR92223bbO8p59+uvuee+7J/HO7udX6poIKABWaO3duHHvssfHZz362z1NOVXIMakTEHXfcEdOnT4+xY8fGunXrIiLik5/8ZOy3335bHUIAjcp5UAGgAhMnTozHHnss7r///rj++uu3ef4/Z/iXY+zYsfHoo4/Gq6++GvPmzYvhw4fH+eefH+vWrYvDDz9ci5+mkHkZ183Nzc3NLW+3Y4455gPb9tu7/AMOOKD77rvv7t68eXP3P//5z+5f/epX3S0tLZl/bje3etxUUAEASMrgrAcAAADFBFQAAJIioAIAkBQBFQCApAioAAAkRUAFACApAioAAEkRUAEASIqACgBAUgRUAACSIqACAJAUARUAgKQIqAAAJEVABQAgKQIqAABJEVABAEiKgAoAQFIEVAAAkiKgAgCQFAEVAICkCKgAACRFQAUAICkCKgAASRFQAQBIioAKAEBSBFQAAJIioAIAkBQBFQCApAioAAAkRUAFACApAioAAEkRUAEASIqACgBAUgRUAACSIqACAJAUARUAgKQIqAAAJEVABQAgKQIqAABJEVABAEiKgAoAQFIEVAAAkiKgAgCQFAEVAICkCKgAACRFQAUAICkCKgAASRFQAQBIioAKAEBSBFQAAJIioAIAkBQBFQCApAioAAAkRUAFACApAioAAEkRUAEASIqACgBAUgRUAACSIqACAJAUARUAgKQIqAAAJEVABQAgKQIqAABJEVABAEiKgAoAQFL+D9p3kCKPFOwwAAAAAElFTkSuQmCC", "text/plain": [ "
" ] }, "metadata": {}, "output_type": "display_data" } ], "source": [ "roi = nib.load(nsdgeneral_path)\n", "print(roi.shape)\n", "plot_roi(roi, bg_img=avg_mask)\n", "plt.show()" ] }, { "cell_type": "code", "execution_count": 17, "id": "d906312b-ea5d-418d-8326-e8b395c9a9c2", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "total voxels (whole brain) = 183408\n", "nsdgeneral voxels = 19577\n" ] } ], "source": [ "avg_mask = avg_mask.get_fdata().flatten()\n", "print(f\"total voxels (whole brain) = {int(avg_mask.sum())}\")\n", "\n", "roi = roi.get_fdata()\n", "roi = roi.flatten()\n", "roi = roi[avg_mask.astype(bool)]\n", "roi[np.isnan(roi)] = 0\n", "roi = roi.astype(bool)\n", "print(f\"nsdgeneral voxels = {roi.sum()}\")" ] }, { "cell_type": "code", "execution_count": 18, "id": "ce12274a-3b35-444d-92b0-7cfd0949badc", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "vox before ROI exclusion: (693, 183408)\n", "vox after ROI exclusion: (693, 19577)\n" ] } ], "source": [ "# ROI masking?\n", "print(f\"vox before ROI exclusion: {vox.shape}\")\n", "vox = vox[:,roi]\n", "print(f\"vox after ROI exclusion: {vox.shape}\")\n", "\n", "if np.any(np.isnan(vox)):\n", " print(\"NaNs found! Removing voxels...\")\n", " x,y = np.where(np.isnan(vox))\n", " vox = vox[:,np.setdiff1d(np.arange(vox.shape[-1]), y)]" ] }, { "cell_type": "code", "execution_count": 18, "id": "26802a5b-7bc8-4d47-b8e0-1dfa557fc6ad", "metadata": {}, "outputs": [], "source": [ "pairs_homog = np.array([[p[0], p[1]] for p in pairs])" ] }, { "cell_type": "code", "execution_count": 19, "id": "50d52f93-af1d-448d-92e4-5af8096aaaf2", "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "100%|██████████| 19302/19302 [00:01<00:00, 17349.27it/s]" ] }, { "name": "stdout", "output_type": "stream", "text": [ "rels (19302,)\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "\n" ] } ], "source": [ "vox_pairs = utils.zscore(vox[pairs_homog])\n", "rels = np.full(vox.shape[-1],np.nan)\n", "for v in tqdm(range(vox.shape[-1])):\n", " rels[v] = np.corrcoef(vox_pairs[:,0,v], vox_pairs[:,1,v])[1,0]\n", "print(\"rels\", rels.shape)\n", "assert np.sum(np.all(np.isnan(rels))) == 0" ] }, { "cell_type": "code", "execution_count": 20, "id": "84be077b-fbef-4b23-895c-4928228229d2", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "(162, 19302, 2)\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "100%|██████████| 162/162 [00:00<00:00, 3290.51it/s]\n" ] } ], "source": [ "# creating img x vox x repetitions matrix | shape=(150, 18419, 2)\n", "vox0 = np.zeros((len(pairs_homog), vox.shape[-1], 2))\n", "print(vox0.shape)\n", "for ipair, pair in enumerate(tqdm(pairs_homog)):\n", " pair = pair[:2] # to keep things consistent, just using the first two repeats\n", " i,j = pair\n", " vox0[ipair, :, :] = vox[pair].T\n", "vox_avg = vox0.mean(-1) # average across the repetitions" ] }, { "cell_type": "code", "execution_count": 21, "id": "6206a31e-3d0a-4a30-ada2-4cffa1009856", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "\n", "vox before reliability thresholding: (1386, 19302)\n", "\n", "vox after reliability thresholding: (1386, 1053)\n" ] } ], "source": [ "# Reliability thresholding?\n", "print(f\"\\nvox before reliability thresholding: {vox.shape}\")\n", "vox = vox[:,rels>.2]\n", "print(f\"\\nvox after reliability thresholding: {vox.shape}\")" ] }, { "cell_type": "code", "execution_count": 22, "id": "e6f632cc-2b26-4dc8-a4d5-12b641765601", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "torch.Size([1386, 3, 224, 224])\n", "(1386, 1053)\n" ] } ], "source": [ "print(images.shape)\n", "print(vox.shape)" ] }, { "cell_type": "code", "execution_count": 23, "id": "735dfc27-a9bd-4a22-ac3f-a1f44515293e", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "1138 248\n" ] } ], "source": [ "utils.seed_everything(seed)\n", "\n", "# add_repeats = 48\n", "# imageTrain = np.arange(len(images))\n", "# train_image_indices = np.array([item for item in imageTrain if item not in pairs.flatten()])\n", "# train_image_indices = np.sort(np.append(train_image_indices, np.array(pairs[:add_repeats].flatten())))\n", "\n", "# # check that there's no repeat indices in training data\n", "# assert len(sorted(np.append(np.array([item for item in imageTrain if item not in pairs.flatten()]), np.array(pairs[:add_repeats].flatten())))) == len(set(sorted(np.append(np.array([item for item in imageTrain if item not in pairs.flatten()]), np.array(pairs[:add_repeats].flatten())))))\n", "\n", "# test_image_indices = pairs[add_repeats:]\n", "# print(len(train_image_indices), len(test_image_indices))\n", "\n", "if train_test_split == 'orig':\n", " # train = all images except images that were repeated\n", " # test = average of the same-image presentations\n", " imageTrain = np.arange(len(images))\n", " train_image_indices = np.array([item for item in imageTrain if item not in pairs.flatten()])\n", " test_image_indices = pairs\n", " print(len(train_image_indices), len(test_image_indices))\n", "elif train_test_split == 'MST':\n", " # non-MST images are the train split\n", " # MST images are the test split\n", " train_image_indices = np.where(MST_images==False)[0]\n", " test_image_indices = np.where(MST_images==True)[0]\n", " print(len(train_image_indices), len(test_image_indices))\n", " # for i in test_image_indices:\n", " # assert i in pairs # all MST images have pairs" ] }, { "cell_type": "code", "execution_count": 24, "id": "a292cfad-83f4-4bf8-994e-da2c871c0a6c", "metadata": {}, "outputs": [], "source": [ "# test_image_indices" ] }, { "cell_type": "code", "execution_count": 25, "id": "b81220cd-c11d-4a2a-8755-53b70d90cfe7", "metadata": {}, "outputs": [], "source": [ "# repeats_in_test = []\n", "# for p in pairs:\n", "# group = []\n", "# for item in p:\n", "# curr = np.where(test_image_indices == item)\n", "# if curr[0].size > 0:\n", "# group.append(curr[0][0])\n", "# # print(np.array(group))\n", "# if len(group) > 0:\n", "# repeats_in_test.append(np.array(group))\n", "# # if p[0] in test_image_indices:\n", "# # repeats_in_test.append(p)\n", " \n", "# repeats_in_test = np.array(repeats_in_test)\n" ] }, { "cell_type": "code", "execution_count": 26, "id": "5528d877-b662-41f7-8982-3f31051871f6", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "voxels have been zscored\n", "-0.0318167 1.0120775\n", "vox (1386, 1053)\n" ] } ], "source": [ "train_mean = np.mean(vox[train_image_indices],axis=0)\n", "train_std = np.std(vox[train_image_indices],axis=0)\n", "\n", "vox = utils.zscore(vox,train_mean=train_mean,train_std=train_std)\n", "print(\"voxels have been zscored\")\n", "print(vox[:,0].mean(), vox[:,0].std())\n", "print(\"vox\", vox.shape)\n", "\n", "images = torch.Tensor(images)\n", "vox = torch.Tensor(vox)" ] }, { "cell_type": "code", "execution_count": 27, "id": "1eb5d464-7ffa-419a-a6b4-d0108f8e196a", "metadata": {}, "outputs": [], "source": [ "test_data = torch.utils.data.TensorDataset(torch.tensor(test_image_indices))" ] }, { "cell_type": "markdown", "id": "d8a3901c-60dd-4ae2-b0f5-8a55aa231908", "metadata": {}, "source": [ "# Model" ] }, { "cell_type": "code", "execution_count": 28, "id": "64672583-9f00-46f5-8d4e-00e4c7068a1d", "metadata": { "tags": [] }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Loaded test dl for subj1!\n", "\n" ] } ], "source": [ "subj_list = [subj]\n", "subj = subj_list[0]\n", "test_dl = torch.utils.data.DataLoader(test_data, batch_size=len(test_data), shuffle=False, drop_last=True, pin_memory=True)\n", "print(f\"Loaded test dl for subj{subj}!\\n\")" ] }, { "cell_type": "code", "execution_count": 29, "id": "a3cbeea8-e95b-48d9-9bc2-91af260c93d1", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "0 248 248\n" ] } ], "source": [ "test_voxels, test_images = None, None\n", "for test_i, behav in enumerate(test_dl):\n", " behav = behav[0]\n", "\n", " if behav.ndim>1:\n", " test_image = images[behav[:,0].long().cpu()].to(device)\n", " test_vox = vox[behav.long().cpu()].mean(1)\n", " else:\n", " test_image = images[behav.long().cpu()].to(device)\n", " test_vox = vox[behav.long().cpu()]\n", " \n", " if test_voxels is None:\n", " test_voxels = test_vox\n", " test_images = test_image\n", " else:\n", " test_voxels = torch.vstack((test_voxels, test_vox))\n", " test_images = torch.vstack((test_images, test_image))\n", "\n", "print(test_i, len(test_voxels), len(test_images))" ] }, { "cell_type": "code", "execution_count": 30, "id": "a3ae7a06-7135-4073-b315-59579e35e2a1", "metadata": {}, "outputs": [], "source": [ "num_voxels_list = []\n", "num_voxels_list.append(test_voxels.shape[-1])" ] }, { "cell_type": "code", "execution_count": 31, "id": "de0400d4-cbd6-4941-a0b2-1a4bc2ae97da", "metadata": { "tags": [] }, "outputs": [], "source": [ "## USING OpenCLIP ViT-bigG ###\n", "sys.path.append('generative_models/')\n", "import sgm\n", "from generative_models.sgm.modules.encoders.modules import FrozenOpenCLIPImageEmbedder\n", "\n", "try:\n", " print(clip_img_embedder)\n", "except:\n", " clip_img_embedder = FrozenOpenCLIPImageEmbedder(\n", " arch=\"ViT-bigG-14\",\n", " version=\"laion2b_s39b_b160k\",\n", " output_tokens=True,\n", " only_tokens=True,\n", " )\n", " clip_img_embedder.to(device)\n", "clip_seq_dim = 256\n", "clip_emb_dim = 1664" ] }, { "cell_type": "code", "execution_count": 32, "id": "56b606a4-7302-4ac5-b89d-bbe4fcb00d11", "metadata": {}, "outputs": [], "source": [ "import utils" ] }, { "cell_type": "code", "execution_count": 33, "id": "e452b5b2-47d9-4271-b9fc-ea331fbac1bc", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "MindEyeModule()\n", "param counts:\n", "1,079,296 total\n", "1,079,296 trainable\n", "param counts:\n", "1,079,296 total\n", "1,079,296 trainable\n", "param counts:\n", "453,360,280 total\n", "453,360,280 trainable\n", "param counts:\n", "454,439,576 total\n", "454,439,576 trainable\n", "param counts:\n", "259,865,216 total\n", "259,865,200 trainable\n", "param counts:\n", "714,304,792 total\n", "714,304,776 trainable\n" ] } ], "source": [ "model = utils.prepare_model_and_training(\n", " num_voxels_list=num_voxels_list,\n", " n_blocks=n_blocks,\n", " hidden_dim=hidden_dim,\n", " clip_emb_dim=clip_emb_dim,\n", " clip_seq_dim=clip_seq_dim,\n", " use_prior=use_prior,\n", " clip_scale=clip_scale\n", ")" ] }, { "cell_type": "code", "execution_count": null, "id": "f726f617-39f5-49e2-8d0c-d11d27d01c30", "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "WARNING:sgm.modules.attention:SpatialTransformer: Found context dims [1664] of depth 1, which does not match the specified 'depth' of 2. Setting context_dim to [1664, 1664] now.\n", "WARNING:sgm.modules.attention:SpatialTransformer: Found context dims [1664] of depth 1, which does not match the specified 'depth' of 2. Setting context_dim to [1664, 1664] now.\n", "WARNING:sgm.modules.attention:SpatialTransformer: Found context dims [1664] of depth 1, which does not match the specified 'depth' of 10. Setting context_dim to [1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664] now.\n", "WARNING:sgm.modules.attention:SpatialTransformer: Found context dims [1664] of depth 1, which does not match the specified 'depth' of 10. Setting context_dim to [1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664] now.\n", "WARNING:sgm.modules.attention:SpatialTransformer: Found context dims [1664] of depth 1, which does not match the specified 'depth' of 10. Setting context_dim to [1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664] now.\n", "WARNING:sgm.modules.attention:SpatialTransformer: Found context dims [1664] of depth 1, which does not match the specified 'depth' of 10. Setting context_dim to [1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664] now.\n", "WARNING:sgm.modules.attention:SpatialTransformer: Found context dims [1664] of depth 1, which does not match the specified 'depth' of 10. Setting context_dim to [1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664] now.\n", "WARNING:sgm.modules.attention:SpatialTransformer: Found context dims [1664] of depth 1, which does not match the specified 'depth' of 10. Setting context_dim to [1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664] now.\n", "WARNING:sgm.modules.attention:SpatialTransformer: Found context dims [1664] of depth 1, which does not match the specified 'depth' of 2. Setting context_dim to [1664, 1664] now.\n", "WARNING:sgm.modules.attention:SpatialTransformer: Found context dims [1664] of depth 1, which does not match the specified 'depth' of 2. Setting context_dim to [1664, 1664] now.\n", "WARNING:sgm.modules.attention:SpatialTransformer: Found context dims [1664] of depth 1, which does not match the specified 'depth' of 2. Setting context_dim to [1664, 1664] now.\n" ] } ], "source": [ "# prep unCLIP\n", "config = OmegaConf.load(\"/scratch/gpfs/ri4541/MindEyeV2/src/generative_models/configs/unclip6.yaml\")\n", "config = OmegaConf.to_container(config, resolve=True)\n", "unclip_params = config[\"model\"][\"params\"]\n", "network_config = unclip_params[\"network_config\"]\n", "denoiser_config = unclip_params[\"denoiser_config\"]\n", "first_stage_config = unclip_params[\"first_stage_config\"]\n", "conditioner_config = unclip_params[\"conditioner_config\"]\n", "sampler_config = unclip_params[\"sampler_config\"]\n", "scale_factor = unclip_params[\"scale_factor\"]\n", "disable_first_stage_autocast = unclip_params[\"disable_first_stage_autocast\"]\n", "offset_noise_level = unclip_params[\"loss_fn_config\"][\"params\"][\"offset_noise_level\"]\n", "\n", "first_stage_config['target'] = 'sgm.models.autoencoder.AutoencoderKL'\n", "sampler_config['params']['num_steps'] = 38\n", "\n", "diffusion_engine = DiffusionEngine(network_config=network_config,\n", " denoiser_config=denoiser_config,\n", " first_stage_config=first_stage_config,\n", " conditioner_config=conditioner_config,\n", " sampler_config=sampler_config,\n", " scale_factor=scale_factor,\n", " disable_first_stage_autocast=disable_first_stage_autocast)\n", "# set to inference\n", "diffusion_engine.eval().requires_grad_(False)\n", "diffusion_engine.to(device)\n", "\n", "ckpt_path = '/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/unclip6_epoch0_step110000.ckpt' \n", "ckpt = torch.load(ckpt_path, map_location='cpu')\n", "diffusion_engine.load_state_dict(ckpt['state_dict'])\n", "\n", "batch={\"jpg\": torch.randn(1,3,1,1).to(device), # jpg doesnt get used, it's just a placeholder\n", " \"original_size_as_tuple\": torch.ones(1, 2).to(device) * 768,\n", " \"crop_coords_top_left\": torch.zeros(1, 2).to(device)}\n", "out = diffusion_engine.conditioner(batch)\n", "vector_suffix = out[\"vector\"].to(device)\n", "print(\"vector_suffix\", vector_suffix.shape)" ] }, { "cell_type": "code", "execution_count": null, "id": "68abd440-7e6b-4023-9dc8-05b1b5c0baa9", "metadata": {}, "outputs": [], "source": [ "# setup text caption networks\n", "from transformers import AutoProcessor, AutoModelForCausalLM\n", "from modeling_git import GitForCausalLMClipEmb\n", "# processor = AutoProcessor.from_pretrained(\"microsoft/git-large-coco\")\n", "# clip_text_model = GitForCausalLMClipEmb.from_pretrained(\"microsoft/git-large-coco\")\n", "processor = AutoProcessor.from_pretrained(\"/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2\")\n", "clip_text_model = GitForCausalLMClipEmb.from_pretrained(\"/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2\")\n", "\n", "clip_text_model.to(device) # if you get OOM running this script, you can switch this to cpu and lower minibatch_size to 4\n", "clip_text_model.eval().requires_grad_(False)\n", "clip_text_seq_dim = 257\n", "clip_text_emb_dim = 1024\n", "\n", "class CLIPConverter(torch.nn.Module):\n", " def __init__(self):\n", " super(CLIPConverter, self).__init__()\n", " self.linear1 = nn.Linear(clip_seq_dim, clip_text_seq_dim)\n", " self.linear2 = nn.Linear(clip_emb_dim, clip_text_emb_dim)\n", " def forward(self, x):\n", " x = x.permute(0,2,1)\n", " x = self.linear1(x)\n", " x = self.linear2(x.permute(0,2,1))\n", " return x\n", " \n", "clip_convert = CLIPConverter()\n", "state_dict = torch.load(\"/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/bigG_to_L_epoch8.pth\", map_location='cpu')['model_state_dict']\n", "clip_convert.load_state_dict(state_dict, strict=True)\n", "clip_convert.to(device) # if you get OOM running this script, you can switch this to cpu and lower minibatch_size to 4\n", "del state_dict" ] }, { "cell_type": "code", "execution_count": null, "id": "41b4a640", "metadata": {}, "outputs": [], "source": [ "# Load pretrained model ckpt\n", "tag='last'\n", "outdir = os.path.abspath(f'/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/train_logs/{model_name}')\n", "print(f\"\\n---loading {outdir}/{tag}.pth ckpt---\\n\")\n", "checkpoint = torch.load(outdir+f'/{tag}.pth', map_location='cpu')\n", "state_dict = checkpoint['model_state_dict']\n", "model.load_state_dict(state_dict, strict=True)\n", "del checkpoint\n", "print(\"ckpt loaded!\")" ] }, { "cell_type": "code", "execution_count": 31, "id": "c6a706a3-d151-4643-bb34-7d08aa7361c8", "metadata": { "tags": [] }, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ " 0%| | 0/4 [00:000:\n", " if all_clipvoxels is None:\n", " all_clipvoxels = clip_voxels.cpu()\n", " else:\n", " all_clipvoxels = torch.vstack((all_clipvoxels, clip_voxels.cpu()))\n", " \n", " # Feed voxels through OpenCLIP-bigG diffusion prior\n", " prior_out = model.diffusion_prior.p_sample_loop(backbone.shape, \n", " text_cond = dict(text_embed = backbone), \n", " cond_scale = 1., timesteps = 20).cpu()\n", " \n", " if all_prior_out is None:\n", " all_prior_out = prior_out\n", " else:\n", " all_prior_out = torch.vstack((all_prior_out, prior_out))\n", "\n", " pred_caption_emb = clip_convert(prior_out.to(device).float())\n", " generated_ids = clip_text_model.generate(pixel_values=pred_caption_emb, max_length=20)\n", " generated_caption = processor.batch_decode(generated_ids, skip_special_tokens=True)\n", " all_predcaptions = np.hstack((all_predcaptions, generated_caption))\n", " print(generated_caption)\n", " \n", " # Feed diffusion prior outputs through unCLIP\n", " if plotting:\n", " jj=-1\n", " fig, axes = plt.subplots(1, 12, figsize=(10, 4))\n", "\n", " for i in range(len(voxel)):\n", " samples = utils.unclip_recon(prior_out[[i]],\n", " diffusion_engine,\n", " vector_suffix,\n", " num_samples=num_samples_per_image)\n", " if all_recons is None:\n", " all_recons = samples.cpu()\n", " else:\n", " all_recons = torch.vstack((all_recons, samples.cpu()))\n", " \n", " if plotting: \n", " jj+=1\n", " axes[jj].imshow(utils.torch_to_Image(image[i]))\n", " axes[jj].axis('off')\n", " jj+=1\n", " axes[jj].imshow(utils.torch_to_Image(samples.cpu()[0]))\n", " axes[jj].axis('off')\n", " \n", " plt.show()\n", "\n", " print(model_name)\n", " # err # dont actually want to run the whole thing with plotting=True\n", "\n", "# resize outputs before saving\n", "imsize = 256\n", "all_images = transforms.Resize((imsize,imsize))(all_images).float()\n", "all_recons = transforms.Resize((imsize,imsize))(all_recons).float()\n", "if blurry_recon: \n", " all_blurryrecons = transforms.Resize((imsize,imsize))(all_blurryrecons).float()\n", " \n", "## Saving ##\n", "if not os.path.exists(eval_dir):\n", " os.mkdir(eval_dir)\n", "\n", "if \"MST\" in model_name:\n", " np.save(f\"{eval_dir}/{model_name}_MST_ID.npy\", MST_ID)\n", "torch.save(all_images.cpu(),f\"{eval_dir}/{model_name}_all_images.pt\")\n", "\n", "# repeats_in_test = []\n", "# for p in pairs:\n", "# if p[0] in test_image_indices:\n", "# repeats_in_test.append(p)\n", " \n", "# repeats_in_test = np.array(repeats_in_test)\n", "\n", "# torch.save(test_image_indices, f\"{eval_dir}/{model_name}_test_image_indices.pt\")\n", "# torch.save(repeats_in_test, f\"{eval_dir}/{model_name}_repeats_in_test.pt\")\n", "torch.save(all_recons,f\"{eval_dir}/{model_name}_all_recons.pt\")\n", "if clip_scale>0:\n", " torch.save(all_clipvoxels,f\"{eval_dir}/{model_name}_all_clipvoxels.pt\")\n", "torch.save(all_prior_out,f\"{eval_dir}/{model_name}_all_prior_out.pt\")\n", "torch.save(all_predcaptions,f\"{eval_dir}/{model_name}_all_predcaptions.pt\")\n", "print(f\"saved {model_name} outputs!\")" ] }, { "cell_type": "code", "execution_count": null, "id": "73b243d7-6552-4fc8-bef7-d5ad03b17cb2", "metadata": {}, "outputs": [], "source": [ "if \"MST\" in model_name:\n", " np.save(f\"{eval_dir}/{model_name}_MST_ID.npy\", MST_ID)" ] }, { "cell_type": "code", "execution_count": null, "id": "6c6856c3-9205-48f5-bfb2-7e0099f429a4", "metadata": {}, "outputs": [], "source": [ "all_images.shape" ] }, { "cell_type": "code", "execution_count": null, "id": "f9a7162f-ca3b-4b14-9676-3037094994c8", "metadata": {}, "outputs": [], "source": [ "x = torch.permute(all_images, (0,2,3,1))\n", "y = torch.permute(all_recons, (0,2,3,1))" ] }, { "cell_type": "code", "execution_count": null, "id": "7fa41429-ab6a-4aa6-96b9-5c963016b33a", "metadata": {}, "outputs": [], "source": [ "fig, ax = plt.subplots(5, 2, figsize=(8, 8))\n", "for row, _ in enumerate(ax):\n", " ax[row][0].imshow(x.cpu()[row])\n", " ax[row][1].imshow(y.cpu()[row])\n", "plt.tight_layout()\n", "plt.show()" ] }, { "cell_type": "code", "execution_count": null, "id": "d553a7b3-9bdf-44b3-a0bf-398cf5cf402b", "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "rt_mindEye2 [~/.conda/envs/rt_mindEye2/]", "language": "python", "name": "conda_rt_mindeye2" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.11.7" } }, "nbformat": 4, "nbformat_minor": 5 }