{ "cells": [ { "cell_type": "markdown", "id": "b0f0f4f3", "metadata": {}, "source": [ "# Import packages & functions" ] }, { "cell_type": "code", "execution_count": 2, "id": "5bad764b-45c1-45ce-a716-8d055e09821a", "metadata": { "tags": [] }, "outputs": [], "source": [ "import os\n", "import sys\n", "import json\n", "import argparse\n", "import numpy as np\n", "import time\n", "import random\n", "import string\n", "import h5py\n", "from tqdm import tqdm\n", "import webdataset as wds\n", "from PIL import Image\n", "import pandas as pd\n", "import nibabel as nib\n", "\n", "import matplotlib.pyplot as plt\n", "import torch\n", "import torch.nn as nn\n", "from torchvision import transforms\n", "\n", "# tf32 data type is faster than standard float32\n", "torch.backends.cuda.matmul.allow_tf32 = True\n", "\n", "# custom functions #\n", "seed = 0\n", "import utils\n", "\n", "if utils.is_interactive():\n", " from IPython.display import clear_output # function to clear print outputs in cell\n", " %load_ext autoreload \n", " # this allows you to change functions in models.py or utils.py and have this notebook automatically update with your revisions\n", " %autoreload 2 " ] }, { "cell_type": "markdown", "id": "bae2b2ad-e1ef-4262-8263-6ae9a0766caa", "metadata": {}, "source": [ "# Princeton data prep" ] }, { "cell_type": "markdown", "id": "c6dbeabe-9e9c-4d8d-a8c3-414d79d14e63", "metadata": {}, "source": [ "## Load Data & Design" ] }, { "cell_type": "code", "execution_count": 3, "id": "0f2d14fc-bfe3-40dc-b14e-070812c43406", "metadata": {}, "outputs": [], "source": [ "sub = \"sub-001\"\n", "session = \"ses-02\"\n", "n_runs = 16\n", "train_test_split = 'MST'\n", "remove_close_to_MST = True # optionally skip close_to_MST images " ] }, { "cell_type": "code", "execution_count": 4, "id": "34c1e0c6-0641-4239-8201-f2c676532302", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "csv/sub-001_ses-02.csv\n", "len_unique_images 708\n", "n_runs 16\n", "['all_stimuli/special515/special_40840.jpg'\n", " 'all_stimuli/unchosen_nsd_1000_images/unchosen_5137_cocoid_57944.png'\n", " 'all_stimuli/shared1000_notspecial/notspecial_38278.png'\n", " 'all_stimuli/special515/special_30632.jpg']\n", "[658.05201488 662.06546921 666.06833092 670.06900812]\n", "[0. 0. 0. 0.]\n", "torch.Size([933])\n" ] } ], "source": [ "filename = f\"csv/{sub}_{session}.csv\"\n", "print(filename)\n", "data = pd.read_csv(filename)\n", "image_names = data['current_image'].values[14:]\n", "starts = data['trial.started'].values[14:]\n", "is_new_run = data['is_new_run'].values[14:]\n", "\n", "unique_images = np.unique(image_names.astype(str))\n", "unique_images = unique_images[(unique_images!=\"nan\")]\n", "unique_images = unique_images[(unique_images!=\"blank.jpg\")]\n", "len_unique_images = len(unique_images)\n", "print(\"len_unique_images\",len_unique_images)\n", "print(\"n_runs\",n_runs)\n", "\n", "print(image_names[:4])\n", "print(starts[:4])\n", "print(is_new_run[:4])\n", "\n", "image_idx = np.array([])\n", "for i in range(len(image_names)):\n", " if image_names[i] == \"blank.jpg\":\n", " continue\n", " if str(image_names[i]) == \"nan\":\n", " continue\n", " if remove_close_to_MST: # optionally skip close_to_MST images \n", " if \"closest_pairs\" in image_names[i]:\n", " continue\n", "\n", "\n", " image_idx_ = np.where(image_names[i]==unique_images)[0].item()\n", " image_idx = np.append(image_idx, image_idx_)\n", "image_idx = torch.Tensor(image_idx).long()\n", "\n", "all_MST_images = []\n", "for im in image_names:\n", " if im == \"blank.jpg\":\n", " continue\n", " if str(im) == \"nan\":\n", " continue\n", " if remove_close_to_MST: # optionally skip close_to_MST images \n", " if \"closest_pairs\" in im:\n", " continue\n", "\n", " # print(im)\n", " if 'MST' in im:\n", " all_MST_images.append(im)\n", "assert len(all_MST_images) == 150\n", "\n", "unique_MST_images = np.unique(all_MST_images) \n", "\n", "MST_ID = np.array([], dtype=int)\n", "\n", "for i in range(len(image_names)):\n", " if image_names[i] == \"blank.jpg\":\n", " continue\n", " if str(image_names[i]) == \"nan\":\n", " continue\n", " if remove_close_to_MST: # optionally skip close_to_MST images \n", " if \"closest_pairs\" in image_names[i]:\n", " continue\n", " # print(image_names[i])\n", " curr = np.where(image_names[i] == unique_MST_images)\n", " if curr[0].size == 0:\n", " MST_ID = np.append(MST_ID, np.array(len(unique_MST_images))) # add a value that should be out of range based on the for loop, will index it out later\n", " else:\n", " MST_ID = np.append(MST_ID, curr)\n", "MST_ID = torch.Tensor(MST_ID)\n", "# assert len(MST_ID) == len(images)\n", "print(MST_ID.shape)" ] }, { "cell_type": "markdown", "id": "e48ffe08-71ec-4a3f-9371-66fed2c21de4", "metadata": {}, "source": [ "## Load images" ] }, { "cell_type": "code", "execution_count": null, "id": "2ceb404f-b04f-42b6-afc4-283bb2b40c08", "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ " 0%| | 0/933 [00:00" ] }, "execution_count": 15, "metadata": {}, "output_type": "execute_result" }, { "data": { "image/png": "iVBORw0KGgoAAAANSUhEUgAAAqgAAAFyCAYAAAA59SiIAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjguMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8g+/7EAAAACXBIWXMAAA9hAAAPYQGoP6dpAAAeuUlEQVR4nO3de4xU5f0/8M8SkEVRvK4ULWoMxbRo8YpWojVqtdQY0trytWBBNE1FFKO0VSnVtLG1pdaoJd4V8FItSLG1YCsmqBgRsV7aaqS2C3yRwIpKDdZVZPf7B7/Z3yCz7OzsXJ6Zeb0SknVm9pxnzpzBN5/PeZ7TEBHtAQAAiehV6QEAAEA2ARUAgKQIqAAAJEVABQAgKQIqAABJEVABAEiKgAoAQFIEVAAAkiKgAgCQFAEVAICkCKgAACRFQAUAICkCKgAASRFQAQBIioAKAEBSBFQAAJIioAIAkBQBFQCApAioAAAkRUAFACApAioAAEkRUAEASIqACgBAUgRUAACSIqACAJAUARUAgKQIqAAAJEVABQAgKQIqAABJEVABAEiKgAoAQFIEVAAAkiKgAgCQFAEVAICkCKgAACRFQAUAICkCKgAASRFQAQBIioAKAEBSBFQAAJIioAIAkBQBFQCApAioAAAkRUAFACApAioAAEkRUAEASIqACgBAUgRUAACSIqACAJAUARUAgKQIqAAAJEVABQAgKQIqAABJEVABAEiKgAoAQFIEVAAAkiKgAgCQFAEVAICkCKgAACRFQAUAICkCKgAASRFQAQBIioAKAEBSeld6AAAAXVm4cGFerxs1alS3Xt8dmW1TeiqoAAAkRUAFAOpKU1NTNDU1VXoY7ERDRLRXehAAAJ9WijZ9RHSE05aWlh5vS9u/NFRQAaAKjB8/Ptrb2+Poo4+u9FCoAZnzKfNny5YtsXbt2rj33ntj0KBBlR6eSVIAQOWVqlpaatnjrsZq6vTp06O5uTkaGxvj+OOPjwkTJsTIkSNj2LBh8dFHH1VsXAIqAECdWrRoUbz44osREXH33XfHxo0b48orr4yzzz475s6dW7FxafEDABAREc8880xERBx66KEVHYcKKgCwg5213LvTyq7W1n0hiv1eK3HJwMEHHxwREe+9917Z951NQAUAqFMDBgyIffbZJxobG2PEiBFxzTXXRGtrazz22GMVHZeACgBQp5588snt/ru5uTnGjRsXb731VoVGtI2ACgB1rrut6Xpq21dSruNc7Lb/pEmTYuXKlTFgwICYOHFinHTSSRWdvZ8hoAIA1Knly5d3zOJfsGBBLF26NB588MEYOnRofPDBBxUbl4AKAHVIFbQ6dfW59aTC2tbWFldddVUsWbIkJk+eHL/4xS8K3lZPWWYKAICIiHjqqafi+eefj8suuyz69u1bsXGooAJAFZk4cWKceeaZOzx+0003xebNmyswImrNjBkzYt68eTFhwoS4/fbbKzIGARUAqsikSZNyPj5r1qwuA6q2fu0rxq1X58+fH2+++WZMnTo17rzzzmhrayvW8PLWEBHtZd8rAFB2Auo2TU1NERHR0tJS4ZGUViUW+i8WARUAapxgur16CagZ1RhUTZICACApAioAAEnR4geAGqSt37l6a/FntLS0xIQJEyo9jLyooAIA1IFMMK8GKqgAUENUTrtWrxXUbKlPnFJBBQAgKQIqAABJcScpAKhy2vrUGhVUAACSIqACAJAULf4a0djYGBERra2tFR4J1cj5A1Bfsi8LSXFGvwpqDWhsbIylS5fG0qVLO4IG5Mv5A0BqVFABoEqZHEWtqngFdfz48dHe3h5HH310pYdClcucS5k/W7ZsibVr18a9994bgwYNqvTwAIA8qaBSc6ZPnx7Nzc3R2NgYxx9/fEyYMCFGjhwZw4YNi48++qjSwwMAuiCgUnMWLVoUL774YkRE3H333bFx48a48sor4+yzz465c+dWeHQAkJbMpSIpTZaqeIsfSu2ZZ56JiIhDDz20wiMBAPIhoFLzDj744IiIeO+99yo7EAAgL1r81JwBAwbEPvvsE42NjTFixIi45pprorW1NR577LFKDw0AyIOASs158sknt/vv5ubmGDduXLz11lsVGhEA0B0CKjVn0qRJsXLlyhgwYEBMnDgxTjrpJLP3AaCKCKjUnOXLl3fM4l+wYEEsXbo0HnzwwRg6dGh88MEHFR4dANAVk6SoaW1tbXHVVVfFAQccEJMnT670cAB6bOHChR1/oFYJqNS8p556Kp5//vm47LLLom/fvpUeDgDQhWRa/BMnTowzzzxzh8dvuumm2Lx5cwVGRC2ZMWNGzJs3LyZMmBC33357pYcDAOxEMgF10qRJOR+fNWuWgEqPzZ8/P958882YOnVq3HnnndHW1lbpIQEAnah4i3/27NnR0NDQ6R9LA5GvzLmUmSCVrb29PYYMGRJDhgwRTgEgcRUPqAAAkE1ABQAgKQIqAABJEVABAEiKgAoAQFIEVAAAkiKgAgCQFAEVAICkCKgAACRFQAUAICkCKgAASeld6QFQWgsXLqz0ELYzatSoSg+h5qX2mQNAd6mgAgCQFBXUHkixUjV//vxKD2Gnso+ZamrxFOtcLNb547MFoCdUUAEASIqACgBAUuqyxZ9iax4Kkeq5XIpxuWwAoLRSugxPBRUAgKQIqAAAbGfhwoUV7dLVVYs/1XZovcr1eVS6pUC6iv39da4BpEsFFQCApNRsBVW1tDiOOvyoom7vr3/7606fz3xuqludc24Xh4lcVBvffeqJCioAAEkRUAEASEpNtfi1P3qm2O38nkhpLbZUOL/TV6zPyDkPpKJS/z9WQQUAIClVX0FNraqUXYXsakJQCnpSNc31/kpRhVVNpd6YwAXUOxVUAACSIqACAJCUqm3xp9Daz9XOzm57Z55PrdVfrDZ8IZczVNslEJWWwnlObXAnLqCaqKACAJCUqq2gpipXdbIeqoaFVGVTrTADXTORC+pPOSctq6ACAJAUARUAgKRUVYs/tQkj+a4DmloLu7PxlPtOUoUcl8w5oBUItcdELiBDBRUAgKRUVQUVAOpJap1DKBcBtQfybYl39rpUW//lbvWzI/9Tgp7r6ffIJQJQOVr8AAAkpSoqqNVeTUqtUtqVXHfDAqg3JmVC50r9/VBBBQAgKQIqAABJqYoWfyX1ZF3Tamvt51LsiVPFOiblvN1auVT7pSwAUCwqqAAAJEUFtchqoWqaS7knTmXvo1aPaT6q4c5kUOvK3bHRTaGalOr7oYIKAEBSBFQAAJKixZ+DtT93ricTp3K17uv5eGvlAcCOVFABAEhKshXUSlaWupqEUs8Vv2LK9zjW44Qp5xikqZR3z9FRodoV8/uhggoAQFIEVAAAkpJsi7+UStEyrpfWc7aero1ayCSpWml9a+UBQOdUUAEASErVV1B7WlHL/H49VkCLqZLV0FJOWignk8YAYBsVVAAAkiKgAgCQlKpv8RdLZ+3VXC1UbVVS4RIVAGqRCioAAEkRUAEASErVtvhrZT1M6kcp1z41sx/Ko9pXC4FyyP7/XaHfGRVUAACSUhUV1HJXS1WgKBedAADYkQoqAABJEVABAEhKVbT4C7mNJvXDeQFUq1JOnoRqpoIKAEBSqqKCqkJWXN25a1Z3ZW/D59a5ro5NT7oGlpwCoNqpoAIAkBQBFQCApCTb4tcern7a/dvkmgSRq/WefYyKdbwy29HqB6CaqKACAJCUZCuoFEeuSpxqWppKWXEuZOKUyVZQOpaXgp1TQQUAICkCKgAASUmuxa/t0X3Dhw2PdS3rOn7u1ZD73x1dra1ZyjZuOe4GtmDugo6fR39zdMn2Uyrlmkj26f20tbd1ef6YbAVAOamgAgCQFAEVAICkJNfir5R6aF0Wa0Z/sdu91kvdphqOQ763aAWAnlBBBQAgKXlXUBsbG0s5jh1kV2KGDxte1n1Xg+xj0tbelvPn7m7n5b+/vNPnP72Prj6XXNvLNb7O3kt3/c95/9Pxc7nP153JdZxyHZtsmfO/HOd+T84fACiFhohoz+eFK1asKPFQACBNLS0tERExatSoomzPijWV1dTUFBH//3OldAr9zmjxAwCQlLxb/CNHjizlODrMnz8/Isrf1u+q5dqVQsab7z672nZbe1usf3t9REQM3G9gp+ug5juGUhz7zH5Kse0Xlr8QERFjxo0p+raLId9zulyfxaf15PyJ6Pl3BwA+Le+A2traWspx7KC7/5OstFKOtzvb7tXQq8djSeW95Ku9fdtVKuU+R7urkPde7u9BMc4fAOgpy0z9P9nL5+S7VE5PlwLa2bJPlbqrULXvJ0WZ62/W/+/6HZ6rhqWlsllGCoCuFONabaUSAACSIqACAJAULf4ccrX7tcLTNfqboys9hIL5vKF+WFoK8qeCCgBAUpKroGYurM3+l2Ylq0wqXGlatmxZpYdQFNUyScrkKADKSQUVAICkCKgAACQluRY/1KqBnx3Y8XNmTVRtfQBqRTHWP81QQQUAICkCKgAASdHir1LZ7dfhw4ZXbiAANa6YbUsgPyqoAAAkJdkKava/WDMTSsjt5b+/HE1NTZUeRlnUyvqnmQlTqZ3bL//95UoPAQBUUAEASIuACgBAUpJt8WdLtR1abj1dl7Kr3095Tc5PG/3N0ZUeQlHkWhsVqBwToiANKqgAACSlISLaKz2IQtRqtanQKmlmklRLS0sxh1N02VXaQt5rvVQ3Fi5cuMNjmWOX67h1Vf3u6lhXy/kDhUrt745c33HKx995peFOUgAA1CwBFQCApFTFJKlcMpNLstsk1TTJJ6Lnk56qUT2+50Jk2iTZ53e+x84xrk+5Wmv12kZOrZ0PtaxU3zcVVAAAklK1FdSM7OS+s4kllaCSRbk412pfIVWKUlQ2cv09q2IJFJsKKgAASRFQAQBIStW3+LPlavf39O5Jmd/v6TqTAIVIrX2e2niqVa6JkLWkp2te17pcmcJx2p4KKgAASampCmohLN0DAJCWug+oAEBxKersqNrWaq80LX4AAJKiggqQsMwkGpOTalNXa3lT/fKtnJZ7Ylnqk79VUAEASIqACgBAUrT42Sltxcoq1vHXOgRSkmkvV7qNXCrlmhCV736yj3Mhlxzk2k6pqaACAJCUmq2gVsNdOopVHWtsbIylS5dGRMTXv/71aG1tLcp2qR07O9cKPX9S/m7VouzjrbMBdEcpKrql/ntIBRUAgKQIqAAAJKVmW/wZWmFQGiZwQXFVw6VpPeFOSsVRL8dRBRUAgKTUfAUVSFuxuxy1Wn3K5u5SVLtaXV6q1pXz7xwVVAAAkiKgAgCQFC1+oKaUogVVD5cNAKREBRUAgKSooAJ0wUQugPISUAGAksueuZ9Zy7PWZvOXco3Seln/NEOLHwCApKigApSZ9UvZmezzw+UgVNrAzw6syH5VUAEASIqACgBAUrT4AaAGFTKpplyTlmppclSxJi/V0jEpBhVUAACSooIKAERE7mpgLVf2hg8bHr0aetX0eyxUpSZHZaigAgCQFAEVAICkaPEDQKJSWBO11tr+w4cNj3Ut67Z7LN87W9X63ZyWLVvW8fOCuQsiImL0N0dXZCwqqAAAJEUFFQBqSL5VvuxqYa1XBrPfX1t7W16vK3eVONe+K/m5VKpymqGCCgBAUgRUAACSosUPAFUgM2Eqe7JUT1rAtd7Wz5bdrh8+bHhev1Pu41PJiWeZyVGVbutnU0EFACApKqgAQE3Ld5JUKspRvc1eUiqlymmGCioAAEkRUAEASIoWPwBUkVx3l6qnCU/5KtcxKcV6suVYBzXFtn42FVQAAJKiggoARdbY2FjW/VXDxJ9y6+yYZD9e7OOW+vaylfsczWhtbc3rdQIqABTZ0qVLy7q/dS3ryrq/pqamsu6vEPkck/Vvr+/xfrKPRU8+h1zHtJSfa7nP0Yxjjjkmr9dp8QMAkBQVVAAospEjR5Z1f/Pnz+/4Od87JfXEy39/uSjbyR5rZpu5HuvptrO1tbd1VE4H7jcwejUUVqvLNbbuHvvuvL9ifa4vLH8hIiLGjBtTlO2VStUF1N69e8crr7wSn//852Pq1Klxww03dDz3mc98Jn75y1/GscceG4MGDYqtW7fGypUrY+bMmTFnzpwKjhp65nOf+1x873vfixEjRsRRRx0VjY2NcfDBB8fq1asrPTQgh3yvsyuFQgNXJeQaa7HGn892ejX0KurxKuWxL9a229vbI6Ky52g+qi6gXnLJJTF48OCcz+27775x4IEHxrx582LNmjXRp0+fOP3002P27NkxdOjQmDZtWplHC8VxwgknxKWXXhqvvfZavP7663HkkUdWekgAUDINEdFe6UHka7/99ouVK1fGDTfcED/96U93qKB25g9/+EOccsopMWDAgGhrq72Zjo2NjR0XO48cOTL5fxXRfXvttVds2bIlNm/eHFdccUX86le/KloF1fkDtaUca6Nmr/3ZE90ZY7777Gqbbe1tHZOPBjUNqkjFuav3UsrPbuBnB5Zs28XUrU/ly1/+crS3t8fo0aN3eO7cc8+N9vb2OP7444s1th1cf/318cYbb8T999/frd9btWpV7LrrrrHLLruUaGTUu8bGxnj99dfj9ddf327pjr322ivWrVsXzz77bPTqVfhfgu+9915s3ry5GEMFgOR1q8W/ZMmSWLNmTYwdOzYWLFiw3XNjx46NN998M5YtWxa77LJL7L777nlt85133snrdccee2yMHz8+Ro4c2XH9RGcaGxtjt912i/79+8fJJ58c559/fjz33HMqQ5RMa2trjB8/Pp599tm47rrr4oorroiIiJkzZ8aAAQNiwoQJ0dbWVpLvBkC5ZVf4ilVNLWTf1STXcSrXe6mWqmm2bl+Dev/998fll18ee+yxR7z//vsRse3az6985Stx3XXXRcS2auqsWbPy2l5DQ0Ner7vlllvi4YcfjmXLlsVBBx2009dOmTIlrr/++o7/Xrx4cZx//vl57QcKtXz58vjlL38ZP/zhD+P3v/997L///nHuuefGlClT4p///GdElOa7AQC1ptsBdc6cOXH11VfHOeecE/fcc09ERIwZMyb69OnT0Xr/85//HKeddlrRBjlhwoQ4/PDD45xzzsnr9b/97W9jxYoVsd9++8VZZ50V+++/f/Tr169o44HOXHvttXHWWWfF7Nmzo3///rFkyZK4+eabO54v9ncDAGpRtwPqG2+8EcuXL4+xY8d2BNSxY8fGc889F//6178iImL9+vWxfn337s6QaclnbN26NTZu3Bi77757/PznP48ZM2bE2rVr89rWmjVrYs2aNRER8dBDD8Xtt98eixcvjqFDh2rzU1JbtmyJiRMnxooVK+LDDz/coXJfyHcDoDtGjRoVERHr/7c8f9f0pN2f/fpc7e58n69k27+rMeZ6rtyXRVSjgpaZmjNnTtx0001xwAEHRN++feOEE06Iiy++uOP5xsbGGDBgQF7b2rBhQ0RETJ06Na699tqOx1etWhWHHHJITJ06NXbZZZd4+OGHO1r7Bx54YERsm4By0EEHxbp162LLli2d7mPevHnx3e9+N0466aT4y1/+0t23m7zW1taORaEF8Mo744wzIiKiX79+MWTIkFi1alXHc4V8N0rN+QNAagoKqA899FD8+te/jnPPPTf69esXH3/8cTz88MMdz48ZM6bb19nNmTNnu/vCfvjhhxERMXjw4Nh7773jtdde2+F3p02bFtOmTYvhw4fHK6+80uk+Mu39fINBNRIs0nD44YfHj3/847jnnnti+PDhcdddd8Xhhx/ecb12Id+NcnD+AMWwswpivlXD7lQXU5gw1dUYKjk5atmyZWXZTykUFFDfeeedWLRoUYwbNy4aGxvj8ccf327GcSHX2TU3N0dzc/MOj9988807rBjQ1NQUd9xxR9x7773x6KOPdvzevvvuGxs3btxhGxdccEG0tbXFX/+qpE7p9O7dO2bNmhXr1q2LKVOmxCGHHBIvvPBC3HjjjXHBBRdEhGtQASAfBd9Jas6cOfHII49ERMT06dO3e66Y19m99NJL8dJLL233WKbV/49//CMeffTRjsenTZsWJ554Yjz++OOxZs2a2HvvveMb3/hGHHfccXHzzTd3XCMLpfCjH/0ohg8fHqeeemps3rw5/va3v8VPfvKTuO6662LevHmxaNGigr8be+yxR1xyySUREXHiiSdGRMTkyZNj06ZNsWnTppg5c2ZR3wsAVFLBd5Lq06dPrF+/Pnr16hUDBw6Mjz76qMhD69xBBx0Uq1at2uFOUqeddlpceumlcdRRR8V+++0Xra2t8eqrr8Zdd90Vs2fPLtv4qD9HHnlkPP/883HrrbfGlClTOh7v1atXPPfcc3HAAQfEF77whfjPf/5T0PYz53wumeu1ATpTrglTO1PIxKBStMJTuJNUKVXjmqe5FFxBbWtri08++ST++Mc/ljWcRkSsXr065/V5ixcvjsWLF5d1LBCxrdKf605lbW1tMWLEiB5vv7NzHgBqUcH/bBg9enQ0NTXFnDlzijkeAADqXLdb/Mcdd1wcccQRMX369Ni4cWMcffTRJRoaAFBMCxcu7Pg5lbVDP63U4yp3i7/cs/hrpcXf7U/loosuiltvvTVaWlriO9/5TinGBABAHSt4khQAUL0qOXGqniqo5ZC93unob46u3ECKqPo/FQAAaoqACgBAUrT4AaDOpbROqhZ/99XKxKhs1f+pAABQU1RQAYCISKuSGuFOUl2pxcppRvV+KgAA1CQBFQCApGjxAwA7SKHdXwrZLf6WlpaIqOxdtbqrltv62VRQAQBISu9KDwAASE+mUpddSS3XUlBskz1hbNSoURUcSfmpoAIAkBQBFQCApJgkBQDkZeHChTs8Vm3t/lyTpDJSeS+Z1n69tfWzqaACQBU59thjY+bMmbFixYr4+OOPo71953WmpqamuO2222Lt2rXx4YcfRnNzc9x1111lGi0UxiQpAKgio0aNigsvvDBeffXV+Pe//x1Dhw7t9LUHHnhgPPvssxERcdttt8Vbb70VgwYNiuOOO65cw4WCaPEDQBVpamqK999/P1pbW+OWW26JyZMnR0NDQ87X/ulPf4rDDjssjj322Hj33XdLMp7stn8qLfKdSbXFX88z9nPR4geAIjvooIOivb290z890dLSEq2trV2+bujQoTFq1KiYMWNGvPvuu9G3b9/o3VvjlOrgTAWAInv77bdj3Lhx2z3Wp0+fuPHGG+Pjjz+OiIh+/frFrrvu2uW2tm7dGps2ber2GE477bSIiNiwYUMsXrw4Tj311Pjkk0/iiSeeiIsuuihWr17d7W3mkqvaVy13oRo+bHj0aui1XfWynFRNOyegAkCR/fe//40HHnhgu8d+85vfRP/+/eP000+PiIgf/OAHce2113a5rVWrVsUhhxzS7TEMGTIkIiLuuOOOeOGFF+Jb3/pWDB48OK655ppYvHhxHHHEEfHhhx92e7vVriEaYlDToI6fSZOACgAldt5558XFF18cl19+eSxZsiQiIubMmRNLly7t8ncLDZH9+/ePiIj169fH1772tY5LC9auXRsPPfRQfPvb34677767oG1Xs4aGBsG0CgioAFBCX/ziF+O2226LBx98MG688caOx5ubm6O5ublk+80E29/97nfbXfc6d+7cuO++++JLX/pSyQJq5japEdXT7o+InK3+QiZO5XvJgLZ+5wRUACiRPffcMx555JFYuXJlXHjhhds9t9tuu3VUOXdm69atsXHjxm7ve926bTPVN2zYsN3jbW1t8c4778Ree+3V7W1CuQioAFACDQ0N8cADD8See+4Zp5122g6t+qlTp5b0GtQXX3wxIiIOOOCA7R7v06dP7LvvvvH22293e5uFyK6mflquO1Nly7d6mV2xzPxOZ1XMnlQtuxpvsfdXzwRUACiBa665Js4444z46le/GqtWrdrh+VJfg7pkyZLYsGFDjB07Nn72s5/FRx99FBEREyZMiN69e8cTTzxR0HahHARUACiyYcOGxfTp0+Ppp5+OpqamGDt27HbPP/DAAwVfgzp48OA477zzIiLimGOOiYiIadOmRUTE6tWr4/7774+IiI8//ji+//3vx5w5c+Lpp5+O++67LwYPHhxTpkyJp59+OubPn9+Ttwgl5U5SAFBkJ598csds/Vw6u/NTT7e9ZMmSOOWUU7Z7bMyYMXHllVfGYYcdFps2bYq5c+fG1VdfHZs3by54DKWkjU6EgAoAJERAJUJABQAgMb0qPQAAAMgmoAIAkBQBFQCApAioAAAkRUAFACApAioAAEkRUAEASIqACgBAUgRUAACSIqACAJAUARUAgKQIqAAAJEVABQAgKQIqAABJEVABAEiKgAoAQFIEVAAAkiKgAgCQFAEVAICkCKgAACRFQAUAICkCKgAASRFQAQBIioAKAEBSBFQAAJIioAIAkBQBFQCApAioAAAkRUAFACApAioAAEkRUAEASIqACgBAUgRUAACSIqACAJAUARUAgKQIqAAAJEVABQAgKQIqAABJEVABAEiKgAoAQFIEVAAAkiKgAgCQFAEVAICkCKgAACRFQAUAICkCKgAASRFQAQBIioAKAEBSBFQAAJIioAIAkBQBFQCApAioAAAkRUAFACApAioAAEkRUAEASIqACgBAUv4PhV94MdAkyuIAAAAASUVORK5CYII=", "text/plain": [ "
" ] }, "metadata": {}, "output_type": "display_data" } ], "source": [ "from nilearn.plotting import plot_roi, plot_anat, plot_epi\n", "\n", "avg_mask=nib.load(f'masks/{sub}_{session}_brain.nii.gz')\n", "\n", "# mask info\n", "dimsize=avg_mask.header.get_zooms()\n", "affine_mat = avg_mask.affine\n", "brain=avg_mask.get_fdata()\n", "xyz=brain.shape #xyz dimensionality of brain mask and epi data\n", "\n", "print('Mask dimensions:', dimsize)\n", "print('')\n", "print('Affine:')\n", "print(affine_mat)\n", "print('')\n", "print(f'There are {int(np.sum(brain))} voxels in the included brain mask\\n')\n", "\n", "roi = nib.load(f'masks/{sub}_nsdgeneral.nii.gz')\n", "\n", "plot_roi(roi, bg_img=avg_mask)" ] }, { "cell_type": "code", "execution_count": 16, "id": "f7f2e9dd-88af-4ca9-bd80-17cbe429b6ce", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "total voxels (whole brain) = 194410\n", "nsdgeneral voxels = 25069\n" ] } ], "source": [ "avg_mask = avg_mask.get_fdata().flatten()\n", "print(f\"total voxels (whole brain) = {int(avg_mask.sum())}\")\n", "\n", "roi = roi.get_fdata()\n", "roi = roi.flatten()\n", "roi = roi[avg_mask.astype(bool)]\n", "roi[np.isnan(roi)] = 0\n", "roi = roi.astype(bool)\n", "print(f\"nsdgeneral voxels = {roi.sum()}\")" ] }, { "cell_type": "markdown", "id": "3bf38b1b-1270-4f65-bc01-07f90343963d", "metadata": {}, "source": [ "### ROI voxel exclusion" ] }, { "cell_type": "code", "execution_count": 17, "id": "de856487-6f6b-4971-8e84-8b30c9a9e943", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "vox before ROI exclusion: (1008, 194410)\n", "vox after ROI exclusion: (1008, 25069)\n" ] } ], "source": [ "# ROI masking?\n", "print(f\"vox before ROI exclusion: {vox.shape}\")\n", "vox = vox[:,roi]\n", "print(f\"vox after ROI exclusion: {vox.shape}\")\n", "\n", "if np.any(np.isnan(vox)):\n", " print(\"NaNs found! Removing voxels...\")\n", " x,y = np.where(np.isnan(vox))\n", " vox = vox[:,np.setdiff1d(np.arange(vox.shape[-1]), y)]" ] }, { "cell_type": "markdown", "id": "903c4f38-4f6e-44d7-8186-44c47c77507a", "metadata": {}, "source": [ "## Reliability calculation" ] }, { "cell_type": "markdown", "id": "288e757f-fe51-4c3b-bb3c-e78b18c755c5", "metadata": {}, "source": [ "### Calculate reliability (corr between first and second presentation of same image) for every voxel" ] }, { "cell_type": "code", "execution_count": null, "id": "765ab07f-dbb3-4bdd-8de1-57fe85231d82", "metadata": {}, "outputs": [], "source": [ "vox_pairs = utils.zscore(vox[pairs])\n", "rels = np.full(vox.shape[-1],np.nan)\n", "for v in tqdm(range(vox.shape[-1])):\n", " rels[v] = np.corrcoef(vox_pairs[:,0,v], vox_pairs[:,1,v])[1,0]\n", "print(\"rels\", rels.shape)\n", "assert np.sum(np.all(np.isnan(rels))) == 0" ] }, { "cell_type": "markdown", "id": "b646ecf4-dd03-4352-abcf-6c6ccb54d96b", "metadata": {}, "source": [ "### Create representational similarity matrix" ] }, { "cell_type": "code", "execution_count": null, "id": "81aebd28-a66b-43fd-8123-fbcd99851383", "metadata": {}, "outputs": [], "source": [ "# creating img x vox x repetitions matrix | shape=(150, 18419, 2)\n", "vox0 = np.zeros((len(pairs), vox.shape[-1], 2))\n", "for ipair, pair in enumerate(tqdm(pairs)):\n", " pair = pair[:2] # to keep things consistent, just using the first two repeats\n", " i,j = pair\n", " vox0[ipair, :, :] = vox[pair].T\n", "vox_avg = vox0.mean(-1) # average across the repetitions" ] }, { "cell_type": "code", "execution_count": null, "id": "30fad9fc-5399-4820-8a8a-8b63fec5d371", "metadata": {}, "outputs": [], "source": [ "# Masking RDM for each reliability threshold\n", "r_thresholds = np.array([.0, .1, .2, .3])\n", "rdm = np.zeros((len(r_thresholds), len(pairs), len(pairs))) \n", "for ir_thresh, r_thresh in enumerate(r_thresholds):\n", " print(f\"reliability threshold = {r_thresh}\")\n", " for i in tqdm(range(len(pairs))):\n", " for j in range(len(pairs)):\n", " rdm[ir_thresh,i,j] = np.corrcoef(vox_avg[i,rels>r_thresh], \n", " vox_avg[j,rels>r_thresh])[0,1]\n", "# rdm is shape (4, 150, 150)" ] }, { "cell_type": "code", "execution_count": null, "id": "feac20e6-d50f-466f-98c9-eab730db65e9", "metadata": {}, "outputs": [], "source": [ "reliability_threshold_to_visualize = .1\n", "plt.figure(figsize=(4,4))\n", "plt.imshow(rdm[np.where(r_thresholds==reliability_threshold_to_visualize)[0].item()], clim=(-1,1))\n", "plt.colorbar(shrink=0.8)\n", "plt.title(f\"{sub}_{session}\\nreliability threshold={reliability_threshold_to_visualize}\\n\")\n", "plt.show()" ] }, { "cell_type": "code", "execution_count": null, "id": "4a4575dc-a6b1-449b-bcb8-31b3eef348ba", "metadata": {}, "outputs": [], "source": [ "for thresh in range(rdm.shape[0]):\n", " for img in range(rdm.shape[1]):\n", " assert np.isclose(rdm[thresh, img, img], 1)" ] }, { "cell_type": "code", "execution_count": null, "id": "ef19ef06-afc2-408d-b870-fe4df3473f90", "metadata": {}, "outputs": [], "source": [ "vox0.shape" ] }, { "cell_type": "code", "execution_count": null, "id": "2fafa9cf-0638-4a4d-9743-70e15bbc8801", "metadata": {}, "outputs": [], "source": [ "r=0\n", "for isamp, samp in enumerate(vox0):\n", " while r==isamp:\n", " r = np.random.randint(len(vox0))\n", " if isamp==0:\n", " same_corrs = np.array([np.corrcoef(samp[:,0], samp[:,1])[0,1]])\n", " diff_corrs = np.array([np.corrcoef(samp[:,0], vox0[r][:,0])[0,1]])\n", " else:\n", " same_corrs = np.append(same_corrs, np.corrcoef(samp[:,0], samp[:,1])[0,1])\n", " diff_corrs = np.append(diff_corrs, np.corrcoef(samp[:,0], vox0[r][:,0])[0,1])\n", "\n", "plt.figure(figsize=(5,4))\n", "plt.title(f\"{sub}_{session} same/diff Pearson corr.\")\n", "plt.plot(np.sort(same_corrs),c='blue',label='same')\n", "plt.plot(np.sort(diff_corrs),c='cyan',label='diff')\n", "plt.axhline(0,c='k',ls='--')\n", "plt.legend()\n", "plt.xlabel(\"sample\")\n", "plt.ylabel(\"Pearson R\")\n", "plt.show()" ] }, { "cell_type": "code", "execution_count": null, "id": "31646431-10ac-4820-ba5d-c35f1e104557", "metadata": {}, "outputs": [], "source": [ "vox_pairs = utils.zscore(vox[pairs])\n", "plt.figure(figsize=(5,4))\n", "plt.title(f\"{sub}_{session} same minus diff difference Pearson corr.\")\n", "plt.plot(np.sort(same_corrs - diff_corrs),c='cyan',label='difference')\n", "plt.axhline(0,c='k',ls='--')\n", "plt.legend()\n", "plt.xlabel(\"sample\")\n", "plt.ylabel(\"Pearson R\")\n", "plt.show()" ] }, { "cell_type": "markdown", "id": "a8866ce2-1cf2-459e-aa81-dec26a3dcd33", "metadata": {}, "source": [ "# Training MindEye" ] }, { "cell_type": "code", "execution_count": null, "id": "248ce3a0-f03b-4c97-aee1-920432664ae1", "metadata": {}, "outputs": [], "source": [ "# Reliability thresholding?\n", "print(f\"\\nvox before reliability thresholding: {vox.shape}\")\n", "vox = vox[:,rels>.2]\n", "print(f\"\\nvox after reliability thresholding: {vox.shape}\")" ] }, { "cell_type": "code", "execution_count": null, "id": "b80aeb2d-6d53-431c-90ed-658dca7ecebd", "metadata": {}, "outputs": [], "source": [ "print(images.shape)\n", "print(vox.shape)\n", "assert len(images) == len(vox)" ] }, { "cell_type": "code", "execution_count": null, "id": "8f554db1-f7cd-40d2-ab62-5d1e282c2bc8", "metadata": {}, "outputs": [], "source": [ "utils.seed_everything(0)\n", "\n", "if train_test_split == 'orig':\n", " # train = all images except images that were repeated\n", " # test = average of the same-image presentations\n", " imageTrain = np.arange(len(images))\n", " train_image_indices = np.array([item for item in imageTrain if item not in pairs.flatten()])\n", " test_image_indices = pairs\n", " print(len(train_image_indices), len(test_image_indices))\n", "elif train_test_split == 'MST':\n", " # non-MST images are the train split\n", " # MST images are the test split\n", " train_image_indices = np.where(MST_images==False)[0]\n", " test_image_indices = np.where(MST_images==True)[0]\n", " print(len(train_image_indices), len(test_image_indices))\n", "else:\n", " raise Exception(\"invalid train_test_split\")\n", " \n", "for i in train_image_indices:\n", " assert i not in test_image_indices" ] }, { "cell_type": "code", "execution_count": null, "id": "590f2b4b-db7c-42a1-bfd0-cc578e6af988", "metadata": {}, "outputs": [], "source": [ "train_mean = np.mean(vox[train_image_indices],axis=0)\n", "train_std = np.std(vox[train_image_indices],axis=0)\n", "\n", "vox = utils.zscore(vox,train_mean=train_mean,train_std=train_std)\n", "print(\"voxels have been zscored\")\n", "print(vox[:,0].mean(), vox[:,0].std())\n", "print(\"vox\", vox.shape)\n", "\n", "images = torch.Tensor(images)\n", "vox = torch.Tensor(vox)" ] }, { "cell_type": "code", "execution_count": null, "id": "cc5d2e32-6027-4a19-bef4-5ca068db35bb", "metadata": {}, "outputs": [], "source": [ "### Multi-GPU config ###\n", "from accelerate import Accelerator, DeepSpeedPlugin\n", "\n", "local_rank = os.getenv('RANK')\n", "if local_rank is None: \n", " local_rank = 0\n", "else:\n", " local_rank = int(local_rank)\n", "print(\"LOCAL RANK \", local_rank) \n", "\n", "data_type = torch.float32 # change depending on your mixed_precision\n", "\n", "accelerator = Accelerator(split_batches=False)\n", "batch_size = 8 " ] }, { "cell_type": "code", "execution_count": null, "id": "b767ab6f-d4a9-47a5-b3bf-f56bf6760c0c", "metadata": {}, "outputs": [], "source": [ "print(\"PID of this process =\",os.getpid())\n", "device = accelerator.device\n", "print(\"device:\",device)\n", "world_size = accelerator.state.num_processes\n", "distributed = not accelerator.state.distributed_type == 'NO'\n", "num_devices = torch.cuda.device_count()\n", "global_batch_size = batch_size * num_devices\n", "print(\"global_batch_size\", global_batch_size)\n", "if num_devices==0 or not distributed: num_devices = 1\n", "num_workers = num_devices\n", "print(accelerator.state)\n", "\n", "# set data_type to match your mixed precision (automatically set based on deepspeed config)\n", "if accelerator.mixed_precision == \"bf16\":\n", " data_type = torch.bfloat16\n", "elif accelerator.mixed_precision == \"fp16\":\n", " data_type = torch.float16\n", "else:\n", " data_type = torch.float32\n", "\n", "print(\"distributed =\",distributed, \"num_devices =\", num_devices, \"local rank =\", local_rank, \"world size =\", world_size, \"data_type =\", data_type)\n", "print = accelerator.print # only print if local_rank=0" ] }, { "cell_type": "markdown", "id": "9018b82b-c054-4463-9527-4b0c2a75bda6", "metadata": { "tags": [] }, "source": [ "## Configurations" ] }, { "cell_type": "code", "execution_count": 6, "id": "2b61fec7-72a0-4b67-86da-1375f1d9fbd3", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "model_name: sub-001_ses-02_bs24_MST_rishab_MSTsplit\n" ] }, { "ename": "NameError", "evalue": "name 'batch_size' is not defined", "output_type": "error", "traceback": [ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", "\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)", "Cell \u001b[0;32mIn[6], line 10\u001b[0m\n\u001b[1;32m 4\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mmodel_name:\u001b[39m\u001b[38;5;124m\"\u001b[39m, model_name)\n\u001b[1;32m 6\u001b[0m \u001b[38;5;66;03m# global_batch_size and batch_size should already be defined in the above cells\u001b[39;00m\n\u001b[1;32m 7\u001b[0m \u001b[38;5;66;03m# other variables can be specified in the following string:\u001b[39;00m\n\u001b[1;32m 8\u001b[0m jupyter_args \u001b[38;5;241m=\u001b[39m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m--data_path=/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2 \u001b[39m\u001b[38;5;130;01m\\\u001b[39;00m\n\u001b[1;32m 9\u001b[0m \u001b[38;5;124m --model_name=\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mmodel_name\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m \u001b[39m\u001b[38;5;130;01m\\\u001b[39;00m\n\u001b[0;32m---> 10\u001b[0m \u001b[38;5;124m --no-multi_subject --subj=1 --batch_size=\u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[43mbatch_size\u001b[49m\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m \u001b[39m\u001b[38;5;130;01m\\\u001b[39;00m\n\u001b[1;32m 11\u001b[0m \u001b[38;5;124m --hidden_dim=1024 --clip_scale=1. \u001b[39m\u001b[38;5;130;01m\\\u001b[39;00m\n\u001b[1;32m 12\u001b[0m \u001b[38;5;124m --no-blurry_recon --blur_scale=.5 \u001b[39m\u001b[38;5;130;01m\\\u001b[39;00m\n\u001b[1;32m 13\u001b[0m \u001b[38;5;124m --no-use_prior --prior_scale=30 \u001b[39m\u001b[38;5;130;01m\\\u001b[39;00m\n\u001b[1;32m 14\u001b[0m \u001b[38;5;124m --n_blocks=4 --max_lr=3e-4 --mixup_pct=.33 --num_epochs=10 --no-use_image_aug \u001b[39m\u001b[38;5;130;01m\\\u001b[39;00m\n\u001b[1;32m 15\u001b[0m \u001b[38;5;124m --ckpt_interval=999 --no-ckpt_saving --new_test \u001b[39m\u001b[38;5;130;01m\\\u001b[39;00m\n\u001b[1;32m 16\u001b[0m \u001b[38;5;124m --multisubject_ckpt=/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/train_logs/multisubject_subj01_1024hid_nolow_300ep\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 17\u001b[0m \u001b[38;5;28mprint\u001b[39m(jupyter_args)\n\u001b[1;32m 18\u001b[0m jupyter_args \u001b[38;5;241m=\u001b[39m jupyter_args\u001b[38;5;241m.\u001b[39msplit()\n", "\u001b[0;31mNameError\u001b[0m: name 'batch_size' is not defined" ] } ], "source": [ "# if running this interactively, can specify jupyter_args here for argparser to use\n", "if utils.is_interactive():\n", " model_name = f\"sub-001_{session}_bs24_MST_rishab_{train_test_split}split\"\n", " print(\"model_name:\", model_name)\n", " \n", " # global_batch_size and batch_size should already be defined in the above cells\n", " # other variables can be specified in the following string:\n", " jupyter_args = f\"--data_path=/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2 \\\n", " --model_name={model_name} \\\n", " --no-multi_subject --subj=1 --batch_size={batch_size} \\\n", " --hidden_dim=1024 --clip_scale=1. \\\n", " --no-blurry_recon --blur_scale=.5 \\\n", " --no-use_prior --prior_scale=30 \\\n", " --n_blocks=4 --max_lr=3e-4 --mixup_pct=.33 --num_epochs=10 --no-use_image_aug \\\n", " --ckpt_interval=999 --no-ckpt_saving --new_test \\\n", " --multisubject_ckpt=/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/train_logs/multisubject_subj01_1024hid_nolow_300ep\"\n", " print(jupyter_args)\n", " jupyter_args = jupyter_args.split()" ] }, { "cell_type": "code", "execution_count": null, "id": "2028bdf0-2f41-46d9-b6e7-86b870dbf16c", "metadata": {}, "outputs": [], "source": [ "parser = argparse.ArgumentParser(description=\"Model Training Configuration\")\n", "parser.add_argument(\n", " \"--model_name\", type=str, default=\"testing\",\n", " help=\"name of model, used for ckpt saving and wandb logging (if enabled)\",\n", ")\n", "parser.add_argument(\n", " \"--data_path\", type=str, default=\"/weka/proj-fmri/shared/natural-scenes-dataset\",\n", " help=\"Path to where NSD data is stored / where to download it to\",\n", ")\n", "parser.add_argument(\n", " \"--subj\",type=int, default=1, choices=[1,2,3,4,5,6,7,8],\n", " help=\"Validate on which subject?\",\n", ")\n", "parser.add_argument(\n", " \"--multisubject_ckpt\", type=str, default=None,\n", " help=\"Path to pre-trained multisubject model to finetune a single subject from. multisubject must be False.\",\n", ")\n", "parser.add_argument(\n", " \"--num_sessions\", type=int, default=0,\n", " help=\"Number of training sessions to include (if multi_subject, this variable doesnt matter)\",\n", ")\n", "parser.add_argument(\n", " \"--use_prior\",action=argparse.BooleanOptionalAction,default=False,\n", " help=\"whether to train diffusion prior (True) or just rely on retrieval part of the pipeline (False)\",\n", ")\n", "parser.add_argument(\n", " \"--batch_size\", type=int, default=32,\n", " help=\"Batch size can be increased by 10x if only training v2c and not diffusion diffuser\",\n", ")\n", "parser.add_argument(\n", " \"--wandb_log\",action=argparse.BooleanOptionalAction,default=False,\n", " help=\"whether to log to wandb\",\n", ")\n", "parser.add_argument(\n", " \"--resume_from_ckpt\",action=argparse.BooleanOptionalAction,default=False,\n", " help=\"if not using wandb and want to resume from a ckpt\",\n", ")\n", "parser.add_argument(\n", " \"--wandb_project\",type=str,default=\"stability\",\n", " help=\"wandb project name\",\n", ")\n", "parser.add_argument(\n", " \"--mixup_pct\",type=float,default=.33,\n", " help=\"proportion of way through training when to switch from BiMixCo to SoftCLIP\",\n", ")\n", "parser.add_argument(\n", " \"--low_mem\",action=argparse.BooleanOptionalAction,default=False,\n", " help=\"whether to preload images to cpu to speed things up but consume more memory\",\n", ")\n", "parser.add_argument(\n", " \"--blurry_recon\",action=argparse.BooleanOptionalAction,default=True,\n", " help=\"whether to output blurry reconstructions\",\n", ")\n", "parser.add_argument(\n", " \"--blur_scale\",type=float,default=.5,\n", " help=\"multiply loss from blurry recons by this number\",\n", ")\n", "parser.add_argument(\n", " \"--clip_scale\",type=float,default=1.,\n", " help=\"multiply contrastive loss by this number\",\n", ")\n", "parser.add_argument(\n", " \"--prior_scale\",type=float,default=30,\n", " help=\"multiply diffusion prior loss by this\",\n", ")\n", "parser.add_argument(\n", " \"--use_image_aug\",action=argparse.BooleanOptionalAction,default=True,\n", " help=\"whether to use image augmentation\",\n", ")\n", "parser.add_argument(\n", " \"--num_epochs\",type=int,default=120,\n", " help=\"number of epochs of training\",\n", ")\n", "parser.add_argument(\n", " \"--multi_subject\",action=argparse.BooleanOptionalAction,default=False,\n", ")\n", "parser.add_argument(\n", " \"--new_test\",action=argparse.BooleanOptionalAction,default=True,\n", ")\n", "parser.add_argument(\n", " \"--n_blocks\",type=int,default=2,\n", ")\n", "parser.add_argument(\n", " \"--hidden_dim\",type=int,default=1024,\n", ")\n", "parser.add_argument(\n", " \"--seq_past\",type=int,default=0,\n", ")\n", "parser.add_argument(\n", " \"--seq_future\",type=int,default=0,\n", ")\n", "parser.add_argument(\n", " \"--lr_scheduler_type\",type=str,default='cycle',choices=['cycle','linear'],\n", ")\n", "parser.add_argument(\n", " \"--ckpt_saving\",action=argparse.BooleanOptionalAction,default=True,\n", ")\n", "parser.add_argument(\n", " \"--ckpt_interval\",type=int,default=5,\n", " help=\"save backup ckpt and reconstruct every x epochs\",\n", ")\n", "parser.add_argument(\n", " \"--seed\",type=int,default=42,\n", ")\n", "parser.add_argument(\n", " \"--max_lr\",type=float,default=3e-4,\n", ")\n", "\n", "if utils.is_interactive():\n", " args = parser.parse_args(jupyter_args)\n", "else:\n", " args = parser.parse_args()\n", "\n", "# create global variables without the args prefix\n", "for attribute_name in vars(args).keys():\n", " globals()[attribute_name] = getattr(args, attribute_name)\n", " \n", "# seed all random functions\n", "utils.seed_everything(seed)\n", "\n", "outdir = os.path.abspath(f'/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/train_logs/{model_name}')\n", "if not os.path.exists(outdir) and ckpt_saving:\n", " os.makedirs(outdir,exist_ok=True)\n", "\n", "cache_dir = \"/scratch/gpfs/ri4541/MindEyeV2/src\"\n", "\n", " \n", "if use_image_aug or blurry_recon:\n", " import kornia\n", " import kornia.augmentation as K\n", " from kornia.augmentation.container import AugmentationSequential\n", "if use_image_aug:\n", " img_augment = AugmentationSequential(\n", " kornia.augmentation.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.1, p=0.3),\n", " same_on_batch=False,\n", " data_keys=[\"input\"],\n", " )\n", " # Define the blurring augmentations\n", " blur_augment = K.RandomGaussianBlur(kernel_size=(21, 21), sigma=(51.0, 51.0), p=1.)\n", " \n", "if multi_subject:\n", " subj_list = np.arange(1,9)\n", " subj_list = subj_list[subj_list != subj]\n", "else:\n", " subj_list = [subj]\n", "\n", "print(\"subj_list\", subj_list, \"num_sessions\", num_sessions)" ] }, { "cell_type": "markdown", "id": "42d13c25-1369-4c49-81d4-83d713586096", "metadata": { "tags": [] }, "source": [ "## Prep data, models, and dataloaders" ] }, { "cell_type": "markdown", "id": "1c023f24-5233-4a15-a2f5-78487b3a8546", "metadata": {}, "source": [ "### Creating wds dataloader, preload betas and all 73k possible images" ] }, { "cell_type": "code", "execution_count": 7, "id": "78dc192e-40dd-4d84-96c8-1c6b78fcb5bb", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/evals/sub-001_ses-02_bs24_MST_rishab_MSTsplit\n" ] } ], "source": [ "# save MST_ID for 2-alternative forced-choice retrieval evaluation \n", "if 'MST' in model_name:\n", " eval_dir = f\"/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/evals/{model_name}\"\n", " print(eval_dir)\n", " # Saving ##\n", " if not os.path.exists(eval_dir):\n", " os.mkdir(eval_dir)\n", "\n", " np.save(f\"{eval_dir}/{model_name}_MST_ID.npy\", MST_ID)\n", " np.save(f\"{eval_dir}/{model_name}_MST_pairmate_indices.npy\", MST_pairmate_indices)" ] }, { "cell_type": "code", "execution_count": null, "id": "aefe7c27-ab39-4b2c-90f4-480f4087b7ab", "metadata": {}, "outputs": [], "source": [ "def my_split_by_node(urls): return urls\n", "num_voxels_list = []\n", "\n", "if multi_subject:\n", " nsessions_allsubj=np.array([40, 40, 32, 30, 40, 32, 40, 30])\n", " num_samples_per_epoch = (750*40) // num_devices \n", "else:\n", " # num_samples_per_epoch = (750*num_sessions) // num_devices \n", " num_samples_per_epoch = len(train_image_indices)\n", "\n", "print(\"dividing batch size by subj_list, which will then be concatenated across subj during training...\") \n", "batch_size = batch_size // len(subj_list)\n", "\n", "num_iterations_per_epoch = num_samples_per_epoch // (batch_size*len(subj_list))\n", "\n", "print(\"batch_size =\", batch_size, \"num_iterations_per_epoch =\",num_iterations_per_epoch, \"num_samples_per_epoch =\",num_samples_per_epoch)" ] }, { "cell_type": "code", "execution_count": null, "id": "e1942b0e-1223-40e6-b543-2f7ff2e8ebcd", "metadata": { "tags": [] }, "outputs": [], "source": [ "train_data = {}\n", "train_dl = {}\n", "\n", "train_data[f'subj0{subj}'] = torch.utils.data.TensorDataset(torch.tensor(train_image_indices))\n", "\n", "test_data = torch.utils.data.TensorDataset(torch.tensor(test_image_indices))" ] }, { "cell_type": "code", "execution_count": null, "id": "81084834-035f-4465-ad59-59e6b806a2f5", "metadata": {}, "outputs": [], "source": [ "num_voxels = {}\n", "voxels = {}\n", "for s in subj_list:\n", " print(f\"Training with {num_sessions} sessions\")\n", " train_dl = torch.utils.data.DataLoader(train_data[f'subj0{s}'], batch_size=batch_size, shuffle=True, drop_last=True, pin_memory=True)\n", "\n", " num_voxels_list.append(vox[0].shape[-1])\n", " num_voxels[f'subj0{s}'] = vox[0].shape[-1]\n", " voxels[f'subj0{s}'] = vox\n", " print(f\"num_voxels for subj0{s}: {num_voxels[f'subj0{s}']}\")\n", "\n", "print(\"Loaded all subj train dls and vox!\\n\")\n", "\n", "# Validate only on one subject\n", "if multi_subject: \n", " subj = subj_list[0] # cant validate on the actual held out person so picking first in subj_list\n", "test_dl = torch.utils.data.DataLoader(test_data, batch_size=24, shuffle=False, drop_last=True, pin_memory=True)\n", "\n", "print(f\"Loaded test dl for subj{subj}!\\n\")" ] }, { "cell_type": "markdown", "id": "10ec4517-dbdf-4ece-98f6-4714d5de4e15", "metadata": {}, "source": [ "## Load models" ] }, { "cell_type": "markdown", "id": "48d6160e-1ee8-4da7-a755-9dbb452a6fa5", "metadata": {}, "source": [ "### CLIP image embeddings model" ] }, { "cell_type": "code", "execution_count": null, "id": "b0420dc0-199e-4c1a-857d-b1747058b467", "metadata": {}, "outputs": [], "source": [ "## USING OpenCLIP ViT-bigG ###\n", "sys.path.append('generative_models/')\n", "import sgm\n", "from generative_models.sgm.modules.encoders.modules import FrozenOpenCLIPImageEmbedder\n", "# from generative_models.sgm.models.diffusion import DiffusionEngine\n", "# from omegaconf import OmegaConf\n", "\n", "try:\n", " print(clip_img_embedder)\n", "except:\n", " clip_img_embedder = FrozenOpenCLIPImageEmbedder(\n", " arch=\"ViT-bigG-14\",\n", " version=\"laion2b_s39b_b160k\",\n", " output_tokens=True,\n", " only_tokens=True,\n", " cache_dir=cache_dir\n", " )\n", " clip_img_embedder.to(device)\n", "clip_seq_dim = 256\n", "clip_emb_dim = 1664\n", "\n", "# ## USING OPEN AI CLIP ViT-L ###\n", "# import clip\n", "# try:\n", "# print(clip_model)\n", "# except:\n", "# clip_model, preprocess = clip.load(\"ViT-L/14\", device=device)\n", "# preprocess = transforms.Compose([\n", "# transforms.Resize(224, interpolation=transforms.InterpolationMode.BILINEAR),\n", "# transforms.Normalize(mean=[0.48145466, 0.4578275, 0.40821073],\n", "# std=[0.26862954, 0.26130258, 0.27577711]),\n", "# ])\n", "# def clip_img_embedder(image):\n", "# preproc_img = preprocess(image)\n", "# return clip_model.encode_image(preproc_img)\n", "# clip_seq_dim = 1\n", "# clip_emb_dim = 768" ] }, { "cell_type": "markdown", "id": "260e5e4a-f697-4b2c-88fc-01f6a54886c0", "metadata": {}, "source": [ "### MindEye modules" ] }, { "cell_type": "code", "execution_count": null, "id": "c44c271b-173f-472e-b059-a2eda0f4c4c5", "metadata": {}, "outputs": [], "source": [ "class MindEyeModule(nn.Module):\n", " def __init__(self):\n", " super(MindEyeModule, self).__init__()\n", " def forward(self, x):\n", " return x\n", " \n", "model = MindEyeModule()\n", "model" ] }, { "cell_type": "code", "execution_count": null, "id": "038a5d61-4769-40b9-a004-f4e7b5b38bb0", "metadata": {}, "outputs": [], "source": [ "class RidgeRegression(torch.nn.Module):\n", " # make sure to add weight_decay when initializing optimizer\n", " def __init__(self, input_sizes, out_features, seq_len=1): \n", " super(RidgeRegression, self).__init__()\n", " self.seq_len = seq_len\n", " self.out_features = out_features\n", " self.linears = torch.nn.ModuleList([\n", " torch.nn.Linear(input_size, out_features) for input_size in input_sizes\n", " ])\n", " def forward(self, x, subj_idx=0):\n", " out = torch.cat([self.linears[subj_idx](x[:,seq]).unsqueeze(1) for seq in range(self.seq_len)], dim=1)\n", " return out\n", " \n", "model.ridge = RidgeRegression(num_voxels_list, out_features=hidden_dim)\n", "utils.count_params(model.ridge)\n", "utils.count_params(model)\n", "\n", "# test on subject 1 with fake data\n", "b = torch.randn((2,1,num_voxels_list[0]))\n", "print(b.shape, model.ridge(b,0).shape)" ] }, { "cell_type": "code", "execution_count": null, "id": "7b8de65a-6d3b-4248-bea9-9b6f4d562321", "metadata": {}, "outputs": [], "source": [ "from functools import partial\n", "from diffusers.models.vae import Decoder\n", "class BrainNetwork(nn.Module):\n", " def __init__(self, h=4096, in_dim=15724, out_dim=768, seq_len=1, n_blocks=n_blocks, drop=.15, \n", " clip_size=768):\n", " super().__init__()\n", " self.seq_len = seq_len\n", " self.h = h\n", " self.clip_size = clip_size\n", " \n", " self.mixer_blocks1 = nn.ModuleList([\n", " self.mixer_block1(h, drop) for _ in range(n_blocks)\n", " ])\n", " self.mixer_blocks2 = nn.ModuleList([\n", " self.mixer_block2(seq_len, drop) for _ in range(n_blocks)\n", " ])\n", " \n", " # Output linear layer\n", " self.backbone_linear = nn.Linear(h * seq_len, out_dim, bias=True) \n", " if clip_scale>0:\n", " self.clip_proj = self.projector(clip_size, clip_size, h=clip_size)\n", " \n", " def projector(self, in_dim, out_dim, h=2048):\n", " return nn.Sequential(\n", " nn.LayerNorm(in_dim),\n", " nn.GELU(),\n", " nn.Linear(in_dim, h),\n", " nn.LayerNorm(h),\n", " nn.GELU(),\n", " nn.Linear(h, h),\n", " nn.LayerNorm(h),\n", " nn.GELU(),\n", " nn.Linear(h, out_dim)\n", " )\n", " \n", " def mlp(self, in_dim, out_dim, drop):\n", " return nn.Sequential(\n", " nn.Linear(in_dim, out_dim),\n", " nn.GELU(),\n", " nn.Dropout(drop),\n", " nn.Linear(out_dim, out_dim),\n", " )\n", " \n", " def mixer_block1(self, h, drop):\n", " return nn.Sequential(\n", " nn.LayerNorm(h),\n", " self.mlp(h, h, drop), # Token mixing\n", " )\n", "\n", " def mixer_block2(self, seq_len, drop):\n", " return nn.Sequential(\n", " nn.LayerNorm(seq_len),\n", " self.mlp(seq_len, seq_len, drop) # Channel mixing\n", " )\n", " \n", " def forward(self, x):\n", " # make empty tensors\n", " c,b = torch.Tensor([0.]), torch.Tensor([[0.],[0.]])\n", " \n", " # Mixer blocks\n", " residual1 = x\n", " residual2 = x.permute(0,2,1)\n", " for block1, block2 in zip(self.mixer_blocks1,self.mixer_blocks2):\n", " x = block1(x) + residual1\n", " residual1 = x\n", " x = x.permute(0,2,1)\n", " \n", " x = block2(x) + residual2\n", " residual2 = x\n", " x = x.permute(0,2,1)\n", " \n", " x = x.reshape(x.size(0), -1)\n", " backbone = self.backbone_linear(x).reshape(len(x), -1, self.clip_size)\n", " if clip_scale>0:\n", " c = self.clip_proj(backbone)\n", " \n", " return backbone, c, b\n", "\n", "model.backbone = BrainNetwork(h=hidden_dim, in_dim=hidden_dim, seq_len=1, \n", " clip_size=clip_emb_dim, out_dim=clip_emb_dim*clip_seq_dim)\n", "utils.count_params(model.backbone)\n", "utils.count_params(model)\n", "\n", "# test that the model works on some fake data\n", "b = torch.randn((2,1,hidden_dim))\n", "print(\"b.shape\",b.shape)\n", "\n", "backbone_, clip_, blur_ = model.backbone(b)\n", "print(backbone_.shape, clip_.shape, blur_[0].shape, blur_[1].shape)" ] }, { "cell_type": "markdown", "id": "b397c0d7-52a3-4153-823b-c27d2eb3eeba", "metadata": {}, "source": [ "### Adding diffusion prior + unCLIP if use_prior=True" ] }, { "cell_type": "code", "execution_count": null, "id": "69965344-9346-4592-9cc5-e537e31d5fce", "metadata": { "tags": [] }, "outputs": [], "source": [ "if use_prior:\n", " from models import *\n", "\n", " # setup diffusion prior network\n", " out_dim = clip_emb_dim\n", " depth = 6\n", " dim_head = 52\n", " heads = clip_emb_dim//52 # heads * dim_head = clip_emb_dim\n", " timesteps = 100\n", "\n", " prior_network = VersatileDiffusionPriorNetwork(\n", " dim=out_dim,\n", " depth=depth,\n", " dim_head=dim_head,\n", " heads=heads,\n", " causal=False,\n", " num_tokens = clip_seq_dim,\n", " learned_query_mode=\"pos_emb\"\n", " )\n", "\n", " model.diffusion_prior = BrainDiffusionPrior(\n", " net=prior_network,\n", " image_embed_dim=out_dim,\n", " condition_on_text_encodings=False,\n", " timesteps=timesteps,\n", " cond_drop_prob=0.2,\n", " image_embed_scale=None,\n", " )\n", " \n", " utils.count_params(model.diffusion_prior)\n", " utils.count_params(model)" ] }, { "cell_type": "markdown", "id": "ec25271a-2209-400c-8026-df3b8ddc1eef", "metadata": {}, "source": [ "### Setup optimizer / lr / ckpt saving" ] }, { "cell_type": "code", "execution_count": null, "id": "e14d0482-dc42-43b9-9ce1-953c32f2c9c1", "metadata": {}, "outputs": [], "source": [ "no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']\n", "\n", "opt_grouped_parameters = [\n", " {'params': [p for n, p in model.ridge.named_parameters()], 'weight_decay': 1e-2},\n", " {'params': [p for n, p in model.backbone.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': 1e-2},\n", " {'params': [p for n, p in model.backbone.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0},\n", "]\n", "# model.backbone.requires_grad_(False)\n", "\n", "if use_prior:\n", " opt_grouped_parameters.extend([\n", " {'params': [p for n, p in model.diffusion_prior.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': 1e-2},\n", " {'params': [p for n, p in model.diffusion_prior.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}\n", " ])\n", "\n", "optimizer = torch.optim.AdamW(opt_grouped_parameters, lr=max_lr)\n", "\n", "if lr_scheduler_type == 'linear':\n", " lr_scheduler = torch.optim.lr_scheduler.LinearLR(\n", " optimizer,\n", " total_iters=int(np.floor(num_epochs*num_iterations_per_epoch)),\n", " last_epoch=-1\n", " )\n", "elif lr_scheduler_type == 'cycle':\n", " if num_iterations_per_epoch==0:\n", " num_iterations_per_epoch=1\n", " total_steps=int(np.floor(num_epochs*num_iterations_per_epoch))\n", " print(\"total_steps\", total_steps)\n", " lr_scheduler = torch.optim.lr_scheduler.OneCycleLR(\n", " optimizer, \n", " max_lr=max_lr,\n", " total_steps=total_steps,\n", " final_div_factor=1000,\n", " last_epoch=-1, pct_start=2/num_epochs\n", " )\n", " \n", "def save_ckpt(tag):\n", " ckpt_path = outdir+f'/{tag}.pth'\n", " if accelerator.is_main_process:\n", " unwrapped_model = accelerator.unwrap_model(model)\n", " torch.save({\n", " 'epoch': epoch,\n", " 'model_state_dict': unwrapped_model.state_dict(),\n", " 'optimizer_state_dict': optimizer.state_dict(),\n", " 'lr_scheduler': lr_scheduler.state_dict(),\n", " 'train_losses': losses,\n", " 'test_losses': test_losses,\n", " 'lrs': lrs,\n", " }, ckpt_path)\n", " print(f\"\\n---saved {outdir}/{tag} ckpt!---\\n\")\n", "\n", "def load_ckpt(tag,load_lr=True,load_optimizer=True,load_epoch=True,strict=True,outdir=outdir,multisubj_loading=False): \n", " print(f\"\\n---loading {outdir}/{tag}.pth ckpt---\\n\")\n", " checkpoint = torch.load(outdir+'/last.pth', map_location='cpu')\n", " state_dict = checkpoint['model_state_dict']\n", " if multisubj_loading: # remove incompatible ridge layer that will otherwise error\n", " state_dict.pop('ridge.linears.0.weight',None)\n", " model.load_state_dict(state_dict, strict=strict)\n", " if load_epoch:\n", " globals()[\"epoch\"] = checkpoint['epoch']\n", " print(\"Epoch\",epoch)\n", " if load_optimizer:\n", " optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n", " if load_lr:\n", " lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])\n", " del checkpoint\n", "\n", "print(\"\\nDone with model preparations!\")\n", "num_params = utils.count_params(model)" ] }, { "cell_type": "markdown", "id": "b1e8dcc4-5ce2-4206-88dc-a68d1dd701cd", "metadata": {}, "source": [ "# Wandb" ] }, { "cell_type": "code", "execution_count": null, "id": "990cce8c-df83-473a-93c8-c47ba355eccd", "metadata": {}, "outputs": [], "source": [ "if local_rank==0 and wandb_log: # only use main process for wandb logging\n", " import wandb\n", " wandb_project = 'rtmindeye'\n", " print(f\"wandb {wandb_project} run {model_name}\")\n", " # need to configure wandb beforehand in terminal with \"wandb init\"!\n", " wandb_config = {\n", " \"model_name\": model_name,\n", " \"global_batch_size\": global_batch_size,\n", " \"batch_size\": batch_size,\n", " \"num_epochs\": num_epochs,\n", " \"num_sessions\": num_sessions,\n", " \"num_params\": num_params,\n", " \"clip_scale\": clip_scale,\n", " \"prior_scale\": prior_scale,\n", " \"blur_scale\": blur_scale,\n", " \"use_image_aug\": use_image_aug,\n", " \"max_lr\": max_lr,\n", " \"mixup_pct\": mixup_pct,\n", " \"num_samples_per_epoch\": num_samples_per_epoch,\n", " \"ckpt_interval\": ckpt_interval,\n", " \"ckpt_saving\": ckpt_saving,\n", " \"seed\": seed,\n", " \"distributed\": distributed,\n", " \"num_devices\": num_devices,\n", " \"world_size\": world_size,\n", " }\n", " print(\"wandb_config:\\n\",wandb_config)\n", " print(\"wandb_id:\",model_name)\n", " wandb.init(\n", " id=model_name,\n", " project=wandb_project,\n", " name=model_name,\n", " config=wandb_config,\n", " resume=\"allow\",\n", " )\n", "else:\n", " wandb_log = False" ] }, { "cell_type": "markdown", "id": "d5690151-2131-4918-b750-e869cbd1a8a8", "metadata": {}, "source": [ "# Train the model" ] }, { "cell_type": "code", "execution_count": null, "id": "12de6387-6e18-4e4b-b5ce-a847d625330a", "metadata": {}, "outputs": [], "source": [ "epoch = 0\n", "losses, test_losses, lrs = [], [], []\n", "best_test_loss = 1e9\n", "torch.cuda.empty_cache()" ] }, { "cell_type": "code", "execution_count": null, "id": "607a7c7b-fe5e-41a4-80bf-d2814b3a57cc", "metadata": { "tags": [] }, "outputs": [], "source": [ "# load multisubject stage1 ckpt if set\n", "if multisubject_ckpt is not None and not resume_from_ckpt:\n", " load_ckpt(\"last\",outdir=multisubject_ckpt,load_lr=False,load_optimizer=False,load_epoch=False,strict=False,multisubj_loading=True)" ] }, { "cell_type": "code", "execution_count": null, "id": "927350ea-b234-48e6-ae7b-2eee41ec0358", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "id": "00ea5ae0-5c92-4276-af5b-25a17ba4dc17", "metadata": {}, "outputs": [], "source": [ "# checkpoint = torch.load(multisubject_ckpt+'/last.pth', map_location='cpu')\n", "# state_dict = checkpoint['model_state_dict']\n", "# model.load_state_dict(state_dict, strict=False)" ] }, { "cell_type": "code", "execution_count": null, "id": "99f09f76-4481-4133-b09a-a22b10dbc0c4", "metadata": {}, "outputs": [], "source": [ "# train_dls = [train_dl[f'subj0{s}'] for s in subj_list]\n", "\n", "model, optimizer, train_dl, lr_scheduler = accelerator.prepare(model, optimizer, train_dl, lr_scheduler)\n", "# leaving out test_dl since we will only have local_rank 0 device do evals" ] }, { "cell_type": "code", "execution_count": null, "id": "60be0d5f-3e94-4612-9373-61b53d836393", "metadata": { "scrolled": true }, "outputs": [], "source": [ "print(f\"{model_name} starting with epoch {epoch} / {num_epochs}\")\n", "progress_bar = tqdm(range(epoch,num_epochs), ncols=1200, disable=(local_rank!=0))\n", "test_image, test_voxel = None, None\n", "mse = nn.MSELoss()\n", "l1 = nn.L1Loss()\n", "soft_loss_temps = utils.cosine_anneal(0.004, 0.0075, num_epochs - int(mixup_pct * num_epochs))\n", "skip_train = True if epoch>=(num_epochs-1) else False # skip training if you are resuming from a fully trained model\n", "\n", "for epoch in progress_bar:\n", " model.train()\n", "\n", " fwd_percent_correct = 0.\n", " bwd_percent_correct = 0.\n", " test_fwd_percent_correct = 0.\n", " test_bwd_percent_correct = 0.\n", " \n", " recon_cossim = 0.\n", " test_recon_cossim = 0.\n", " recon_mse = 0.\n", " test_recon_mse = 0.\n", "\n", " loss_clip_total = 0.\n", " loss_blurry_total = 0.\n", " loss_blurry_cont_total = 0.\n", " test_loss_clip_total = 0.\n", " \n", " loss_prior_total = 0.\n", " test_loss_prior_total = 0.\n", "\n", " blurry_pixcorr = 0.\n", " test_blurry_pixcorr = 0. \n", "\n", " # you now have voxel_iters and image_iters with num_iterations_per_epoch batches each\n", " for train_i, behav in enumerate(train_dl): \n", " with torch.cuda.amp.autocast(dtype=data_type):\n", " optimizer.zero_grad()\n", " loss = 0.\n", " \n", " behav = behav[0]\n", "\n", " image = images[behav.long().cpu()].to(device)\n", " voxel = vox[behav.long().cpu()]\n", " # voxel = (voxel - train_mean) / train_std\n", " voxel = torch.Tensor(voxel).unsqueeze(1).to(device)\n", "\n", " if use_image_aug: \n", " image = img_augment(image)\n", "\n", " clip_target = clip_img_embedder(image)\n", " assert not torch.any(torch.isnan(clip_target))\n", "\n", " if epoch < int(mixup_pct * num_epochs):\n", " voxel, perm, betas, select = utils.mixco(voxel)\n", "\n", " voxel_ridge = model.ridge(voxel,0) #[model.ridge(voxel_list[si],si) for si,s in enumerate(subj_list)]\n", " # voxel_ridge = torch.cat(voxel_ridge_list, dim=0)\n", "\n", " backbone, clip_voxels, blurry_image_enc_ = model.backbone(voxel_ridge)\n", "\n", " if clip_scale>0:\n", " clip_voxels_norm = nn.functional.normalize(clip_voxels.flatten(1), dim=-1)\n", " clip_target_norm = nn.functional.normalize(clip_target.flatten(1), dim=-1)\n", "\n", " if use_prior:\n", " loss_prior, prior_out = model.diffusion_prior(text_embed=backbone, image_embed=clip_target)\n", " loss_prior_total += loss_prior.item()\n", " loss_prior *= prior_scale\n", " loss += loss_prior\n", "\n", " recon_cossim += nn.functional.cosine_similarity(prior_out, clip_target).mean().item()\n", " recon_mse += mse(prior_out, clip_target).item()\n", "\n", " if clip_scale>0:\n", " if epoch < int(mixup_pct * num_epochs): \n", " loss_clip = utils.mixco_nce(\n", " clip_voxels_norm,\n", " clip_target_norm,\n", " temp=.006,\n", " perm=perm, betas=betas, select=select)\n", " else:\n", " epoch_temp = soft_loss_temps[epoch-int(mixup_pct*num_epochs)]\n", " loss_clip = utils.soft_clip_loss(\n", " clip_voxels_norm,\n", " clip_target_norm,\n", " temp=epoch_temp)\n", "\n", " loss_clip_total += loss_clip.item()\n", " loss_clip *= clip_scale\n", " loss += loss_clip\n", "\n", " if blurry_recon: \n", " image_enc_pred, transformer_feats = blurry_image_enc_\n", "\n", " image_enc = autoenc.encode(2*image-1).latent_dist.mode() * 0.18215\n", " loss_blurry = l1(image_enc_pred, image_enc)\n", " loss_blurry_total += loss_blurry.item()\n", "\n", " if epoch < int(mixup_pct * num_epochs):\n", " image_enc_shuf = image_enc[perm]\n", " betas_shape = [-1] + [1]*(len(image_enc.shape)-1)\n", " image_enc[select] = image_enc[select] * betas[select].reshape(*betas_shape) + \\\n", " image_enc_shuf[select] * (1 - betas[select]).reshape(*betas_shape)\n", "\n", " image_norm = (image - mean)/std\n", " image_aug = (blur_augs(image) - mean)/std\n", " _, cnx_embeds = cnx(image_norm)\n", " _, cnx_aug_embeds = cnx(image_aug)\n", "\n", " cont_loss = utils.soft_cont_loss(\n", " nn.functional.normalize(transformer_feats.reshape(-1, transformer_feats.shape[-1]), dim=-1),\n", " nn.functional.normalize(cnx_embeds.reshape(-1, cnx_embeds.shape[-1]), dim=-1),\n", " nn.functional.normalize(cnx_aug_embeds.reshape(-1, cnx_embeds.shape[-1]), dim=-1),\n", " temp=0.2)\n", " loss_blurry_cont_total += cont_loss.item()\n", "\n", " loss += (loss_blurry + 0.1*cont_loss) * blur_scale #/.18215\n", "\n", " if clip_scale>0:\n", " # forward and backward top 1 accuracy \n", " labels = torch.arange(len(clip_voxels_norm)).to(clip_voxels_norm.device) \n", " fwd_percent_correct += utils.topk(utils.batchwise_cosine_similarity(clip_voxels_norm, clip_target_norm), labels, k=1).item()\n", " bwd_percent_correct += utils.topk(utils.batchwise_cosine_similarity(clip_target_norm, clip_voxels_norm), labels, k=1).item()\n", "\n", " if blurry_recon:\n", " with torch.no_grad():\n", " # only doing pixcorr eval on a subset of the samples per batch because its costly & slow to compute autoenc.decode()\n", " random_samps = np.random.choice(np.arange(len(image)), size=len(image)//5, replace=False)\n", " blurry_recon_images = (autoenc.decode(image_enc_pred[random_samps]/0.18215).sample/ 2 + 0.5).clamp(0,1)\n", " pixcorr = utils.pixcorr(image[random_samps], blurry_recon_images)\n", " blurry_pixcorr += pixcorr.item()\n", " \n", " utils.check_loss(loss)\n", " accelerator.backward(loss)\n", " optimizer.step()\n", "\n", " losses.append(loss.item())\n", " lrs.append(optimizer.param_groups[0]['lr'])\n", "\n", " if lr_scheduler_type is not None:\n", " lr_scheduler.step()\n", " \n", " if train_i >= num_iterations_per_epoch-1:\n", " break\n", " \n", " model.eval()\n", " if local_rank==0:\n", " with torch.no_grad(), torch.cuda.amp.autocast(dtype=data_type): \n", " for test_i, behav in enumerate(test_dl): \n", " behav = behav[0]\n", "\n", " loss=0.\n", "\n", " if behav.ndim>1:\n", " image = images[behav[:,0].long().cpu()].to(device)\n", " voxel = vox[behav.long().cpu()].mean(1)\n", " else:\n", " image = images[behav.long().cpu()].to(device)\n", " voxel = vox[behav.long().cpu()]\n", " \n", " voxel = torch.Tensor(voxel).unsqueeze(1).to(device)\n", "\n", " clip_img_embedder = clip_img_embedder.to(device)\n", " clip_target = clip_img_embedder(image.float())\n", " \n", " voxel_ridge = model.ridge(voxel,0)\n", "\n", " backbone, clip_voxels, blurry_image_enc_ = model.backbone(voxel_ridge)\n", "\n", " if clip_scale>0:\n", " clip_voxels_norm = nn.functional.normalize(clip_voxels.flatten(1), dim=-1)\n", " clip_target_norm = nn.functional.normalize(clip_target.flatten(1), dim=-1)\n", " \n", " # for some evals, only doing a subset of the samples per batch because of computational cost\n", " random_samps = np.random.choice(np.arange(len(image)), size=len(image)//5, replace=False)\n", " \n", " if use_prior:\n", " loss_prior, contaminated_prior_out = model.diffusion_prior(text_embed=backbone[random_samps], image_embed=clip_target[random_samps])\n", " test_loss_prior_total += loss_prior.item()\n", " loss_prior *= prior_scale\n", " loss += loss_prior\n", " \n", " if clip_scale>0:\n", " loss_clip = utils.soft_clip_loss(\n", " clip_voxels_norm,\n", " clip_target_norm,\n", " temp=.006)\n", "\n", " test_loss_clip_total += loss_clip.item()\n", " loss_clip = loss_clip * clip_scale\n", " loss += loss_clip\n", "\n", " if blurry_recon:\n", " image_enc_pred, _ = blurry_image_enc_\n", " blurry_recon_images = (autoenc.decode(image_enc_pred[random_samps]/0.18215).sample / 2 + 0.5).clamp(0,1)\n", " pixcorr = utils.pixcorr(image[random_samps], blurry_recon_images)\n", " test_blurry_pixcorr += pixcorr.item()\n", "\n", " if clip_scale>0:\n", " # forward and backward top 1 accuracy \n", " labels = torch.arange(len(clip_voxels_norm)).to(clip_voxels_norm.device) \n", " test_fwd_percent_correct += utils.topk(utils.batchwise_cosine_similarity(clip_voxels_norm, clip_target_norm), labels, k=1).item()\n", " test_bwd_percent_correct += utils.topk(utils.batchwise_cosine_similarity(clip_target_norm, clip_voxels_norm), labels, k=1).item()\n", " \n", " utils.check_loss(loss) \n", " test_losses.append(loss.item())\n", "\n", " # if utils.is_interactive(): clear_output(wait=True)\n", " if skip_train: break\n", " print(\"---\")\n", "\n", " # assert (test_i+1) == 1\n", " logs = {\"train/loss\": np.mean(losses[-(train_i+1):]),\n", " \"test/loss\": np.mean(test_losses[-(test_i+1):]),\n", " \"train/lr\": lrs[-1],\n", " \"train/num_steps\": len(losses),\n", " \"test/num_steps\": len(test_losses),\n", " \"train/fwd_pct_correct\": fwd_percent_correct / (train_i + 1),\n", " \"train/bwd_pct_correct\": bwd_percent_correct / (train_i + 1),\n", " \"test/test_fwd_pct_correct\": test_fwd_percent_correct / (test_i + 1),\n", " \"test/test_bwd_pct_correct\": test_bwd_percent_correct / (test_i + 1),\n", " \"train/loss_clip_total\": loss_clip_total / (train_i + 1),\n", " \"train/loss_blurry_total\": loss_blurry_total / (train_i + 1),\n", " \"train/loss_blurry_cont_total\": loss_blurry_cont_total / (train_i + 1),\n", " \"test/loss_clip_total\": test_loss_clip_total / (test_i + 1),\n", " \"train/blurry_pixcorr\": blurry_pixcorr / (train_i + 1),\n", " \"test/blurry_pixcorr\": test_blurry_pixcorr / (test_i + 1),\n", " \"train/recon_cossim\": recon_cossim / (train_i + 1),\n", " \"test/recon_cossim\": test_recon_cossim / (test_i + 1),\n", " \"train/recon_mse\": recon_mse / (train_i + 1),\n", " \"test/recon_mse\": test_recon_mse / (test_i + 1),\n", " \"train/loss_prior\": loss_prior_total / (train_i + 1),\n", " \"test/loss_prior\": test_loss_prior_total / (test_i + 1),\n", " }\n", "\n", " # if finished training, save jpg recons if they exist\n", " if (epoch == num_epochs-1) or (epoch % ckpt_interval == 0):\n", " if blurry_recon: \n", " image_enc = autoenc.encode(2*image[:4]-1).latent_dist.mode() * 0.18215\n", " # transform blurry recon latents to images and plot it\n", " fig, axes = plt.subplots(1, 8, figsize=(10, 4))\n", " jj=-1\n", " for j in [0,1,2,3]:\n", " jj+=1\n", " axes[jj].imshow(utils.torch_to_Image((autoenc.decode(image_enc[[j]]/0.18215).sample / 2 + 0.5).clamp(0,1)))\n", " axes[jj].axis('off')\n", " jj+=1\n", " axes[jj].imshow(utils.torch_to_Image((autoenc.decode(image_enc_pred[[j]]/0.18215).sample / 2 + 0.5).clamp(0,1)))\n", " axes[jj].axis('off')\n", " plt.show()\n", "\n", " progress_bar.set_postfix(**logs)\n", "\n", " if wandb_log: wandb.log(logs)\n", " \n", " # Save model checkpoint and reconstruct\n", " if (ckpt_saving) and (epoch % ckpt_interval == 0):\n", " save_ckpt(f'last')\n", "\n", " # wait for other GPUs to catch up if needed\n", " accelerator.wait_for_everyone()\n", " torch.cuda.empty_cache()\n", "\n", "print(\"\\n===Finished!===\\n\")\n", "if ckpt_saving:\n", " save_ckpt(f'last')" ] }, { "cell_type": "code", "execution_count": null, "id": "b0af03cb-58c3-4e3e-9e2b-a3485635864b", "metadata": {}, "outputs": [], "source": [ "blurry_recon" ] }, { "cell_type": "code", "execution_count": null, "id": "5702acf6-45fe-44f5-8842-c0e2d4d8e8ce", "metadata": {}, "outputs": [], "source": [ "# # Track metrics here:\n", "# https://docs.google.com/spreadsheets/d/1-dbmr4ovl2-4-MFNAL1DqLS651KM_ihjDkkUeP1kHXs/edit?gid=1494588999#gid=1494588999" ] }, { "cell_type": "markdown", "id": "23a54acc-1dce-4de4-9d5f-d0582f5097c5", "metadata": {}, "source": [ "**To tell if the model is working I'm looking at test_bwd/fwd_pct_correct and seeing if that is doing better than chance (1/batch_size)**" ] } ], "metadata": { "kernelspec": { "display_name": "rt_mindEye2 [~/.conda/envs/rt_mindEye2/]", "language": "python", "name": "conda_rt_mindeye2" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.11.7" }, "toc": { "base_numbering": 1, "nav_menu": {}, "number_sections": true, "sideBar": true, "skip_h1_title": false, "title_cell": "Table of Contents", "title_sidebar": "Contents", "toc_cell": false, "toc_position": { "height": "calc(100% - 180px)", "left": "10px", "top": "150px", "width": "165px" }, "toc_section_display": true, "toc_window_display": true }, "toc-autonumbering": true, "vscode": { "interpreter": { "hash": "62aae01ef0cf7b6af841ab1c8ce59175c4332e693ab3d00bc32ceffb78a35376" } } }, "nbformat": 4, "nbformat_minor": 5 }