{ "cells": [ { "cell_type": "code", "execution_count": 1, "id": "6b18f6a3-cc4f-437e-9756-c99fc6a5fad4", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "importing modules\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "Detected kernel version 4.18.0, which is below the recommended minimum of 5.5.0; this can cause the process to hang. It is recommended to upgrade the kernel to the minimum version or higher.\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "LOCAL RANK 0\n", "device: cuda\n" ] } ], "source": [ "print('importing modules')\n", "import os\n", "import sys\n", "import json\n", "import argparse\n", "import numpy as np\n", "import math\n", "from einops import rearrange\n", "import time\n", "import random\n", "import string\n", "import h5py\n", "from tqdm import tqdm\n", "\n", "import matplotlib.pyplot as plt\n", "import torch\n", "import torch.nn as nn\n", "from torchvision import transforms\n", "from accelerate import Accelerator, DeepSpeedPlugin\n", "\n", "# SDXL unCLIP requires code from https://github.com/Stability-AI/generative-models/tree/main\n", "sys.path.append('generative_models/')\n", "import sgm\n", "from generative_models.sgm.modules.encoders.modules import FrozenOpenCLIPImageEmbedder, FrozenCLIPEmbedder, FrozenOpenCLIPEmbedder2\n", "from generative_models.sgm.models.diffusion import DiffusionEngine\n", "from generative_models.sgm.util import append_dims\n", "from omegaconf import OmegaConf\n", "\n", "# tf32 data type is faster than standard float32\n", "torch.backends.cuda.matmul.allow_tf32 = True\n", "\n", "# custom functions #\n", "import utils\n", "from models import *\n", "\n", "### Multi-GPU config ###\n", "local_rank = os.getenv('RANK')\n", "if local_rank is None: \n", " local_rank = 0\n", "else:\n", " local_rank = int(local_rank)\n", "print(\"LOCAL RANK \", local_rank) \n", "\n", "accelerator = Accelerator(split_batches=False, mixed_precision=\"fp16\")\n", "device = accelerator.device\n", "print(\"device:\",device)" ] }, { "cell_type": "code", "execution_count": 6, "id": "20cdb696-1d6e-4b73-951b-b0cd1dda219a", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "model_name: sub-001_multi_bs24_MST_rishab_MSTsplit_remove_150_random_seed_0\n", "--model_name=sub-001_multi_bs24_MST_rishab_MSTsplit_remove_150_random_seed_0 --all_recons_path=/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/evals/sub-001_multi_bs24_MST_rishab_MSTsplit_remove_150_random_seed_0/all_recons.pt\n", "The autoreload extension is already loaded. To reload it, use:\n", " %reload_ext autoreload\n" ] } ], "source": [ "# if running this interactively, can specify jupyter_args here for argparser to use\n", "if utils.is_interactive():\n", " model_name = f\"sub-001_multi_bs24_MST_rishab_MSTsplit_remove_150_random_seed_0\" #\"sub-001_bs24_MST\"\n", " print(\"model_name:\", model_name)\n", " if (\"remove\" in model_name and \"random\" in model_name) or \"ses-04\" in model_name:\n", " all_recons_path = f\"{eval_dir}/all_recons.pt\"\n", " elif \"paul\" in model_name:\n", " all_recons_path = f\"evals/{model_name}/{model_name}_all_recons.pt\"\n", " else:\n", " all_recons_path = f\"{eval_dir}/{model_name}_all_recons.pt\" \n", "\n", " # global_batch_size and batch_size should already be defined in the above cells\n", " # other variables can be specified in the following string:\n", " jupyter_args = f\"--model_name={model_name} --all_recons_path={all_recons_path}\"\n", " print(jupyter_args)\n", " jupyter_args = jupyter_args.split()\n", " \n", " from IPython.display import clear_output # function to clear print outputs in cell\n", " %load_ext autoreload \n", " # this allows you to change functions in models.py or utils.py and have this notebook automatically update with your revisions\n", " %autoreload 2 " ] }, { "cell_type": "code", "execution_count": 8, "id": "4b31d7c0-f5bd-4a19-a8be-7a3a165d79b6", "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "/home/ri4541/.conda/envs/rt_mindEye2/lib/python3.11/site-packages/torchvision/transforms/functional.py:1603: UserWarning: The default value of the antialias parameter of all the resizing transforms (Resize(), RandomResizedCrop(), etc.) will change from None to True in v0.17, in order to be consistent across the PIL and Tensor backends. To suppress this warning, directly pass antialias=True (recommended, future default), antialias=None (current default, which means False for Tensors and True for PIL), or antialias=False (only works on Tensors - PIL will still use antialiasing). This also applies if you are using the inference transforms from the models weights: update the call to weights.transforms(antialias=True).\n", " warnings.warn(\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "sub-001_multi_bs24_MST_rishab_MSTsplit_remove_150_random_seed_0\n", "torch.Size([300, 3, 768, 768]) torch.Size([300, 256, 1664])\n" ] } ], "source": [ "parser = argparse.ArgumentParser(description=\"Model Training Configuration\")\n", "parser.add_argument(\n", " \"--model_name\", type=str, default=\"testing\",\n", " help=\"will load ckpt for model found in ../train_logs/model_name\",\n", ")\n", "parser.add_argument(\n", " \"--all_recons_path\", type=str,\n", " help=\"Path to where all_recons.pt is stored\",\n", ")\n", "parser.add_argument(\n", " \"--seed\",type=int,default=42,\n", ")\n", "if utils.is_interactive():\n", " args = parser.parse_args(jupyter_args)\n", "else:\n", " args = parser.parse_args()\n", "\n", "# create global variables without the args prefix\n", "for attribute_name in vars(args).keys():\n", " globals()[attribute_name] = getattr(args, attribute_name)\n", " \n", "# seed all random functions\n", "utils.seed_everything(seed)\n", "\n", "# make output directory\n", "eval_dir = f\"/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/evals/{model_name}\"\n", "if not exists(f\"eval_dir\"):\n", " os.mkdir(\"eval_dir\")\n", "\n", "# all_images = torch.load(f\"{eval_dir}/all_images.pt\")\n", "# all_recons = torch.load(f\"{eval_dir}/all_recons.pt\")\n", "# all_clipvoxels = torch.load(f\"{eval_dir}/all_clipvoxels.pt\")\n", "# all_predcaptions = torch.load(f\"{eval_dir}/all_predcaptions.pt\")\n", "if (\"remove\" in model_name and \"random\" in model_name) or \"ses-04\" in model_name:\n", " all_images = torch.load(f\"{eval_dir}/all_images.pt\")\n", " all_clipvoxels = torch.load(f\"{eval_dir}/all_clipvoxels.pt\")\n", " all_predcaptions = torch.load(f\"{eval_dir}/all_predcaptions.pt\")\n", " all_unrefinedrecons = torch.load(f\"{eval_dir}/all_recons.pt\")\n", "elif \"ses-01\" in model_name and \"paul\" in model_name:\n", " all_images = torch.load(f\"evals/{model_name}/{model_name}_all_images.pt\")\n", " all_clipvoxels = torch.load(f\"evals/{model_name}/{model_name}_all_clipvoxels.pt\")\n", " all_predcaptions = torch.load(f\"evals/{model_name}/{model_name}_all_predcaptions.pt\")\n", " all_unrefinedrecons = torch.load(f\"evals/{model_name}/{model_name}_all_recons.pt\")\n", "else:\n", " all_images = torch.load(f\"{eval_dir}/{model_name}_all_images.pt\") \n", " all_clipvoxels = torch.load(f\"{eval_dir}/{model_name}_all_clipvoxels.pt\") \n", " all_predcaptions = torch.load(f\"{eval_dir}/{model_name}_all_predcaptions.pt\") \n", " all_unrefinedrecons = torch.load(f\"{eval_dir}/{model_name}_all_recons.pt\") \n", "\n", "all_recons = torch.load(all_recons_path)\n", "all_recons = transforms.Resize((768,768))(all_recons).float()\n", "\n", "print(model_name)\n", "print(all_recons.shape, all_clipvoxels.shape)" ] }, { "cell_type": "code", "execution_count": 5, "id": "24bdd667-0862-4561-b432-9fa7543df863", "metadata": { "tags": [] }, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "SpatialTransformer: Found context dims [2048] of depth 1, which does not match the specified 'depth' of 2. Setting context_dim to [2048, 2048] now.\n", "SpatialTransformer: Found context dims [2048] of depth 1, which does not match the specified 'depth' of 2. Setting context_dim to [2048, 2048] now.\n", "SpatialTransformer: Found context dims [2048] of depth 1, which does not match the specified 'depth' of 10. Setting context_dim to [2048, 2048, 2048, 2048, 2048, 2048, 2048, 2048, 2048, 2048] now.\n", "SpatialTransformer: Found context dims [2048] of depth 1, which does not match the specified 'depth' of 10. Setting context_dim to [2048, 2048, 2048, 2048, 2048, 2048, 2048, 2048, 2048, 2048] now.\n", "SpatialTransformer: Found context dims [2048] of depth 1, which does not match the specified 'depth' of 10. Setting context_dim to [2048, 2048, 2048, 2048, 2048, 2048, 2048, 2048, 2048, 2048] now.\n", "SpatialTransformer: Found context dims [2048] of depth 1, which does not match the specified 'depth' of 10. Setting context_dim to [2048, 2048, 2048, 2048, 2048, 2048, 2048, 2048, 2048, 2048] now.\n", "SpatialTransformer: Found context dims [2048] of depth 1, which does not match the specified 'depth' of 10. Setting context_dim to [2048, 2048, 2048, 2048, 2048, 2048, 2048, 2048, 2048, 2048] now.\n", "SpatialTransformer: Found context dims [2048] of depth 1, which does not match the specified 'depth' of 10. Setting context_dim to [2048, 2048, 2048, 2048, 2048, 2048, 2048, 2048, 2048, 2048] now.\n", "SpatialTransformer: Found context dims [2048] of depth 1, which does not match the specified 'depth' of 2. Setting context_dim to [2048, 2048] now.\n", "SpatialTransformer: Found context dims [2048] of depth 1, which does not match the specified 'depth' of 2. Setting context_dim to [2048, 2048] now.\n", "SpatialTransformer: Found context dims [2048] of depth 1, which does not match the specified 'depth' of 2. Setting context_dim to [2048, 2048] now.\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Initialized embedder #0: FrozenCLIPEmbedder with 123060480 params. Trainable: False\n", "Initialized embedder #1: FrozenOpenCLIPEmbedder2 with 694659841 params. Trainable: False\n", "Initialized embedder #2: ConcatTimestepEmbedderND with 0 params. Trainable: False\n", "Initialized embedder #3: ConcatTimestepEmbedderND with 0 params. Trainable: False\n", "Initialized embedder #4: ConcatTimestepEmbedderND with 0 params. Trainable: False\n", "Restored from /scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/zavychromaxl_v30.safetensors with 1 missing and 1 unexpected keys\n", "Missing Keys: ['denoiser.sigmas']\n", "Unexpected Keys: ['conditioner.embedders.0.transformer.text_model.embeddings.position_ids']\n", "crossattn torch.Size([1, 77, 2048])\n", "vector_suffix torch.Size([1, 1536])\n", "---\n", "crossattn_uc torch.Size([1, 77, 2048])\n", "vector_uc torch.Size([1, 2816])\n" ] } ], "source": [ "config = OmegaConf.load(\"generative_models/configs/unclip6.yaml\")\n", "config = OmegaConf.to_container(config, resolve=True)\n", "unclip_params = config[\"model\"][\"params\"]\n", "sampler_config = unclip_params[\"sampler_config\"]\n", "sampler_config['params']['num_steps'] = 38\n", "config = OmegaConf.load(\"generative_models/configs/inference/sd_xl_base.yaml\")\n", "config = OmegaConf.to_container(config, resolve=True)\n", "refiner_params = config[\"model\"][\"params\"]\n", "\n", "network_config = refiner_params[\"network_config\"]\n", "denoiser_config = refiner_params[\"denoiser_config\"]\n", "first_stage_config = refiner_params[\"first_stage_config\"]\n", "conditioner_config = refiner_params[\"conditioner_config\"]\n", "scale_factor = refiner_params[\"scale_factor\"]\n", "disable_first_stage_autocast = refiner_params[\"disable_first_stage_autocast\"]\n", "\n", "# base_ckpt_path = '/weka/robin/projects/stable-research/checkpoints/sd_xl_base_1.0.safetensors'\n", "# base_ckpt_path = '/weka/proj-fmri/paulscotti/stable-research/zavychromaxl_v30.safetensors'\n", "\n", "# if running on Della compute node, won't be able to find openai/clip-vit-large-patch14 (or any other internet-accessed file like from huggingface) so always download \"locally\" (onto Della)\n", "base_ckpt_path = '/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/zavychromaxl_v30.safetensors'\n", "base_engine = DiffusionEngine(network_config=network_config,\n", " denoiser_config=denoiser_config,\n", " first_stage_config=first_stage_config,\n", " conditioner_config=conditioner_config,\n", " sampler_config=sampler_config, # using the one defined by the unclip\n", " scale_factor=scale_factor,\n", " disable_first_stage_autocast=disable_first_stage_autocast,\n", " ckpt_path=base_ckpt_path)\n", "base_engine.eval().requires_grad_(False)\n", "base_engine.to(device)\n", "\n", "base_text_embedder1 = FrozenCLIPEmbedder(\n", " layer=conditioner_config['params']['emb_models'][0]['params']['layer'],\n", " layer_idx=conditioner_config['params']['emb_models'][0]['params']['layer_idx'],\n", ")\n", "base_text_embedder1.to(device)\n", "\n", "base_text_embedder2 = FrozenOpenCLIPEmbedder2(\n", " arch=conditioner_config['params']['emb_models'][1]['params']['arch'],\n", " version=conditioner_config['params']['emb_models'][1]['params']['version'],\n", " freeze=conditioner_config['params']['emb_models'][1]['params']['freeze'],\n", " layer=conditioner_config['params']['emb_models'][1]['params']['layer'],\n", " always_return_pooled=conditioner_config['params']['emb_models'][1]['params']['always_return_pooled'],\n", " legacy=conditioner_config['params']['emb_models'][1]['params']['legacy'],\n", ")\n", "base_text_embedder2.to(device)\n", "\n", "batch={\"txt\": \"\",\n", " \"original_size_as_tuple\": torch.ones(1, 2).to(device) * 768,\n", " \"crop_coords_top_left\": torch.zeros(1, 2).to(device),\n", " \"target_size_as_tuple\": torch.ones(1, 2).to(device) * 1024}\n", "out = base_engine.conditioner(batch)\n", "crossattn = out[\"crossattn\"].to(device)\n", "vector_suffix = out[\"vector\"][:,-1536:].to(device)\n", "print(\"crossattn\", crossattn.shape)\n", "print(\"vector_suffix\", vector_suffix.shape)\n", "print(\"---\")\n", "\n", "batch_uc={\"txt\": \"painting, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, deformed, ugly, blurry, bad anatomy, bad proportions, extra limbs, cloned face, skinny, glitchy, double torso, extra arms, extra hands, mangled fingers, missing lips, ugly face, distorted face, extra legs, anime\",\n", " \"original_size_as_tuple\": torch.ones(1, 2).to(device) * 768,\n", " \"crop_coords_top_left\": torch.zeros(1, 2).to(device),\n", " \"target_size_as_tuple\": torch.ones(1, 2).to(device) * 1024}\n", "out = base_engine.conditioner(batch_uc)\n", "crossattn_uc = out[\"crossattn\"].to(device)\n", "vector_uc = out[\"vector\"].to(device)\n", "print(\"crossattn_uc\", crossattn_uc.shape)\n", "print(\"vector_uc\", vector_uc.shape)" ] }, { "cell_type": "code", "execution_count": 6, "id": "07f437d1-9b8e-4b13-85ad-d45062a5ce09", "metadata": { "tags": [] }, "outputs": [], "source": [ "num_samples = 1 # PS: I tried increasing this to 16 and picking highest cosine similarity, it didnt seem to increase eval performance!\n", "img2img_timepoint = 15 # 13 # higher number means more reliance on prompt, less reliance on matching the conditioning image\n", "base_engine.sampler.guider.scale = 5 # cfg\n", "def denoiser(x, sigma, c): return base_engine.denoiser(base_engine.model, x, sigma, c)" ] }, { "cell_type": "code", "execution_count": 7, "id": "939e1cbb-5836-48c2-87d8-3e493e950011", "metadata": { "tags": [] }, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ " 0%| | 0/300 [00:00