|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
print("importing modules") |
|
import os |
|
import sys |
|
import json |
|
import argparse |
|
import numpy as np |
|
import time |
|
import random |
|
import string |
|
import h5py |
|
from tqdm import tqdm |
|
import webdataset as wds |
|
from PIL import Image |
|
import pandas as pd |
|
import nibabel as nib |
|
import nilearn |
|
|
|
import matplotlib.pyplot as plt |
|
import torch |
|
import torch.nn as nn |
|
from torchvision import transforms |
|
|
|
|
|
torch.backends.cuda.matmul.allow_tf32 = True |
|
|
|
import utils |
|
from utils import load_preprocess_betas, resample, applyxfm, apply_thresh, resample_betas |
|
|
|
|
|
import importlib.util |
|
parent_utils_path = "/home/ri4541/mindeye_preproc/analysis/utils.py" |
|
spec = importlib.util.spec_from_file_location("utils", parent_utils_path) |
|
preproc = importlib.util.module_from_spec(spec) |
|
parent_dir = os.path.dirname(parent_utils_path) |
|
if parent_dir not in sys.path: |
|
sys.path.append(parent_dir) |
|
spec.loader.exec_module(preproc) |
|
|
|
if utils.is_interactive(): |
|
from IPython.display import clear_output |
|
get_ipython().run_line_magic('load_ext', 'autoreload') |
|
|
|
get_ipython().run_line_magic('autoreload', '2') |
|
|
|
seed = utils.get_slurm_seed() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if utils.is_interactive(): |
|
sub = "sub-005" |
|
session = "all" |
|
task = 'C' |
|
func_task_name = 'C' |
|
else: |
|
sub = os.environ["sub"] |
|
session = os.environ["session"] |
|
task = os.environ["task"] |
|
func_task_name = 'C' |
|
|
|
if session == "all": |
|
ses_list = ["ses-01", "ses-02", "ses-03"] |
|
design_ses_list = ["ses-01", "ses-02", "ses-03"] |
|
else: |
|
ses_list = [session] |
|
design_ses_list = [session] |
|
|
|
task_name = f"_task-{task}" if task != 'study' else '' |
|
resample_voxel_size = False |
|
resample_post_glmsingle = False |
|
load_from_resampled_file = False |
|
|
|
train_test_split = 'MST' |
|
remove_close_to_MST = False |
|
remove_random_n = False |
|
|
|
if remove_close_to_MST or remove_random_n: |
|
assert remove_close_to_MST != remove_random_n |
|
|
|
n_to_remove = 0 |
|
if remove_random_n: |
|
assert train_test_split == 'MST' |
|
n_to_remove = 150 |
|
|
|
if resample_voxel_size: |
|
|
|
resampled_vox_size = 2.5 |
|
resample_method = "sinc" |
|
|
|
|
|
vox_dim_str = str(resampled_vox_size).replace('.', '_') |
|
resampled_suffix = f"resampled_{vox_dim_str}mm_{resample_method}" |
|
mask_resampled_suffix = resampled_suffix |
|
if resample_post_glmsingle: |
|
resampled_suffix += '_postglmsingle' |
|
else: |
|
resampled_suffix += '_preglmsingle' |
|
|
|
|
|
|
|
|
|
|
|
session_label = preproc.get_session_label(ses_list) |
|
print('session label:', session_label) |
|
n_runs, _ = preproc.get_runs_per_session(sub, session, ses_list) |
|
|
|
|
|
|
|
|
|
|
|
if utils.is_interactive(): |
|
glmsingle_path = f"/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/glmsingle_{sub}_{session_label}_task-{task}" |
|
else: |
|
glmsingle_path = os.environ["glmsingle_path"] |
|
|
|
designdir = "/home/ri4541/real_time_mindEye2" |
|
print(glmsingle_path) |
|
|
|
if resample_voxel_size: |
|
|
|
|
|
if resample_post_glmsingle: |
|
|
|
orig_glmsingle_path = glmsingle_path |
|
glmsingle_path += f"_{resampled_suffix}" |
|
print("resampled glmsingle path:", glmsingle_path) |
|
if load_from_resampled_file: |
|
|
|
assert os.path.exists(glmsingle_path) |
|
else: |
|
|
|
os.makedirs(glmsingle_path,exist_ok=True) |
|
else: |
|
|
|
glmsingle_path += f"_{resampled_suffix}" |
|
print("glmsingle path:", glmsingle_path) |
|
|
|
assert os.path.exists(glmsingle_path) |
|
print("glmsingle path exists!") |
|
|
|
|
|
|
|
|
|
|
|
data, starts, images, is_new_run, image_names, unique_images, len_unique_images = preproc.load_design_files( |
|
sub=sub, |
|
session=session, |
|
func_task_name=task, |
|
designdir=designdir, |
|
design_ses_list=design_ses_list |
|
) |
|
|
|
if sub == 'sub-001': |
|
if session == 'ses-01': |
|
assert image_names[0] == 'images/image_686_seed_1.png' |
|
elif session in ('ses-02', 'all'): |
|
assert image_names[0] == 'all_stimuli/special515/special_40840.jpg' |
|
elif session == 'ses-03': |
|
assert image_names[0] == 'all_stimuli/special515/special_69839.jpg' |
|
elif session == 'ses-04': |
|
assert image_names[0] == 'all_stimuli/rtmindeye_stimuli/image_686_seed_1.png' |
|
elif sub == 'sub-003': |
|
assert image_names[0] == 'all_stimuli/rtmindeye_stimuli/image_686_seed_1.png' |
|
|
|
unique_images = np.unique(image_names.astype(str)) |
|
unique_images = unique_images[(unique_images!="nan")] |
|
len_unique_images = len(unique_images) |
|
print("n_runs",n_runs) |
|
|
|
if (sub == 'sub-001' and session == 'ses-04') or (sub == 'sub-003' and session == 'ses-01'): |
|
assert len(unique_images) == 851 |
|
|
|
print(image_names[:4]) |
|
print(starts[:4]) |
|
print(is_new_run[:4]) |
|
|
|
if remove_random_n: |
|
|
|
|
|
|
|
|
|
|
|
np.random.seed(seed) |
|
options_to_remove = [x for x in set(image_names) if str(x) != 'nan' and x != 'blank.jpg' and 'MST_pairs' not in x and 'special515' not in x and list(image_names).count(x)==1] |
|
options_to_remove_special515 = [x for x in set(image_names) if str(x) != 'nan' and x != 'blank.jpg' and 'MST_pairs' not in x and 'special515' in x and list(image_names).count(x)>1] |
|
imgs_to_remove = np.random.choice(options_to_remove, size=99, replace=False) |
|
imgs_to_remove = np.append(imgs_to_remove, np.random.choice(options_to_remove_special515, size=17, replace=False)) |
|
|
|
image_idx = np.array([]) |
|
vox_image_names = np.array([]) |
|
all_MST_images = dict() |
|
for i, im in enumerate(image_names): |
|
|
|
if im == "blank.jpg": |
|
i+=1 |
|
continue |
|
if str(im) == "nan": |
|
i+=1 |
|
continue |
|
vox_image_names = np.append(vox_image_names, im) |
|
if remove_close_to_MST: |
|
if "closest_pairs" in im: |
|
i+=1 |
|
continue |
|
elif remove_random_n: |
|
if im in imgs_to_remove: |
|
i+=1 |
|
continue |
|
|
|
image_idx_ = np.where(im==unique_images)[0].item() |
|
image_idx = np.append(image_idx, image_idx_) |
|
|
|
if (sub == 'sub-001' and session == 'ses-04') or (sub == 'sub-003' and session == 'ses-01'): |
|
import re |
|
if ('w_' in im or 'paired_image_' in im or re.match(r'all_stimuli/rtmindeye_stimuli/\d{1,2}_\d{1,3}\.png$', im) or re.match(r'images/\d{1,2}_\d{1,3}\.png$', im)): |
|
|
|
|
|
all_MST_images[i] = im |
|
i+=1 |
|
elif 'MST' in im: |
|
all_MST_images[i] = im |
|
i+=1 |
|
|
|
image_idx = torch.Tensor(image_idx).long() |
|
|
|
|
|
|
|
|
|
unique_MST_images = np.unique(list(all_MST_images.values())) |
|
|
|
MST_ID = np.array([], dtype=int) |
|
if remove_close_to_MST: |
|
close_to_MST_idx = np.array([], dtype=int) |
|
if remove_random_n: |
|
random_n_idx = np.array([], dtype=int) |
|
|
|
vox_idx = np.array([], dtype=int) |
|
j=0 |
|
for i, im in enumerate(image_names): |
|
|
|
if im == "blank.jpg": |
|
i+=1 |
|
continue |
|
if str(im) == "nan": |
|
i+=1 |
|
continue |
|
if remove_close_to_MST: |
|
if "closest_pairs" in im: |
|
close_to_MST_idx = np.append(close_to_MST_idx, i) |
|
i+=1 |
|
continue |
|
if remove_random_n: |
|
if im in imgs_to_remove: |
|
vox_idx = np.append(vox_idx, j) |
|
i+=1 |
|
j+=1 |
|
continue |
|
j+=1 |
|
curr = np.where(im == unique_MST_images) |
|
|
|
if curr[0].size == 0: |
|
MST_ID = np.append(MST_ID, np.array(len(unique_MST_images))) |
|
else: |
|
MST_ID = np.append(MST_ID, curr) |
|
|
|
assert len(MST_ID) == len(image_idx) |
|
|
|
|
|
print(MST_ID.shape) |
|
if (sub == 'sub-001' and session == 'ses-04') or (sub == 'sub-003' and session == 'ses-01'): |
|
assert len(all_MST_images) == 100 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import imageio.v2 as imageio |
|
resize_transform = transforms.Resize((224, 224)) |
|
MST_images = [] |
|
images = None |
|
for im_name in tqdm(image_idx): |
|
if sub == 'sub-001' and session == 'ses-01': |
|
image_file = f"all_stimuli/rtmindeye_stimuli/{unique_images[im_name]}" |
|
else: |
|
image_file = f"{unique_images[im_name]}" |
|
im = imageio.imread(image_file) |
|
im = torch.Tensor(im / 255).permute(2,0,1) |
|
im = resize_transform(im.unsqueeze(0)) |
|
if images is None: |
|
images = im |
|
else: |
|
images = torch.vstack((images, im)) |
|
if (sub == 'sub-001' and session == 'ses-04') or (sub == 'sub-003' and session == 'ses-01'): |
|
if ('w_' in image_file or 'paired_image_' in image_file or re.match(r'all_stimuli/rtmindeye_stimuli/\d{1,2}_\d{1,3}\.png$', image_file) or re.match(r'all_stimuli/rtmindeye_stimuli/images/\d{1,2}_\d{1,3}\.png$', image_file)): |
|
MST_images.append(True) |
|
else: |
|
MST_images.append(False) |
|
else: |
|
if ("MST_pairs" in image_file): |
|
MST_images.append(True) |
|
else: |
|
MST_images.append(False) |
|
|
|
print("images", images.shape) |
|
MST_images = np.array(MST_images) |
|
print("MST_images", len(MST_images)) |
|
if (sub == 'sub-001' and session == 'ses-04') or (sub == 'sub-003' and session == 'ses-01'): |
|
assert len(MST_images[MST_images==True]) == 100 |
|
print("MST_images==True", len(MST_images[MST_images==True])) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
assert unique_MST_images.shape[0] % 2 == 0 |
|
MST_pairmate_names = unique_MST_images.reshape(int(unique_MST_images.shape[0]/2),2) |
|
|
|
|
|
MST_pairmate_indices = np.empty(shape=MST_pairmate_names.shape, dtype=int) |
|
for p, pair in enumerate(MST_pairmate_names): |
|
for i, im in enumerate(pair): |
|
MST_pairmate_indices[p][i] = np.where(np.isin(list(all_MST_images.values()), im))[0][0] |
|
|
|
print(MST_pairmate_indices.shape, MST_pairmate_indices) |
|
|
|
|
|
|
|
|
|
|
|
if (sub == 'sub-001' and session in ('ses-02', 'ses-03', 'all')): |
|
|
|
|
|
MST_pairs = utils.find_paired_indices(torch.tensor(MST_ID)) |
|
MST_pairs = np.array(sorted(MST_pairs[:-1], key=lambda x: x[0])) |
|
|
|
|
|
|
|
fig, ax = plt.subplots(1, 3, figsize=(10,4)) |
|
fig.suptitle('Sample MST pairs') |
|
|
|
ax[0].imshow(images[MST_pairs[-1][0]].permute(1,2,0).numpy()) |
|
ax[0].set_title(f"Trial 0") |
|
|
|
ax[1].imshow(images[MST_pairs[-1][1]].permute(1,2,0).numpy()) |
|
ax[1].set_title(f"Trial 1") |
|
|
|
ax[2].imshow(images[MST_pairs[-1][2]].permute(1,2,0).numpy()) |
|
ax[2].set_title(f"Trial 2") |
|
|
|
plt.setp(ax, xticks=[], yticks=[]) |
|
plt.tight_layout() |
|
plt.show() |
|
|
|
|
|
|
|
|
|
|
|
|
|
pairs = utils.find_paired_indices(image_idx) |
|
pairs = sorted(pairs, key=lambda x: x[0]) |
|
|
|
fig, axes = plt.subplots(1, 3, figsize=(6, 2)) |
|
for i, ax in enumerate(axes): |
|
ax.imshow(images[i].permute(1, 2, 0).numpy()) |
|
ax.set_title(f"Trial {i}") |
|
ax.axis("off") |
|
|
|
plt.tight_layout() |
|
|
|
|
|
plt.show() |
|
|
|
|
|
|
|
|
|
|
|
p=0 |
|
|
|
|
|
fig, ax = plt.subplots(1, 2, figsize=(10,8)) |
|
|
|
ax[0].imshow(images[pairs[p][0]].permute(1,2,0).numpy()) |
|
ax[0].set_title(f"Repeat 1") |
|
|
|
ax[1].imshow(images[pairs[p][1]].permute(1,2,0).numpy()) |
|
ax[1].set_title(f"Repeat 2") |
|
|
|
plt.setp(ax, xticks=[], yticks=[]) |
|
plt.tight_layout() |
|
plt.show() |
|
|
|
|
|
|
|
|
|
|
|
def get_image_pairs(sub, session, func_task_name, designdir): |
|
"""Loads design files and processes image pairs for a given session.""" |
|
_, _, _, _, image_names, unique_images, _ = preproc.load_design_files( |
|
sub=sub, |
|
session=session, |
|
func_task_name=func_task_name, |
|
designdir=designdir, |
|
design_ses_list=[session] |
|
) |
|
return utils.process_images(image_names, unique_images) |
|
|
|
|
|
|
|
|
|
|
|
from collections import defaultdict |
|
|
|
all_dicts = [] |
|
for s_idx, s in enumerate(ses_list): |
|
im, vo, _ = get_image_pairs(sub, s, func_task_name, designdir) |
|
assert len(im) == len(vo) |
|
all_dicts.append({k:v for k,v in enumerate(vo)}) |
|
|
|
|
|
image_to_indices = defaultdict(lambda: [[] for _ in range(len(ses_list))]) |
|
for ses_idx, idx_to_name in enumerate(all_dicts): |
|
for idx, name in idx_to_name.items(): |
|
image_to_indices[name][ses_idx].append(idx) |
|
|
|
image_to_indices = dict(image_to_indices) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if sub == 'sub-005' and len(ses_list) > 1: |
|
session_length = 693 |
|
for image, session_indices_list in image_to_indices.items(): |
|
new_indices_list = [] |
|
for idx, indices in enumerate(session_indices_list): |
|
offset = idx * session_length |
|
new_indices = [i + offset for i in indices] |
|
new_indices_list.append(new_indices) |
|
image_to_indices[image] = new_indices_list |
|
|
|
import itertools |
|
assert max(itertools.chain.from_iterable(list(image_to_indices.values())))[0] == (len(ses_list)*session_length) - 1 |
|
|
|
|
|
|
|
|
|
|
|
if resample_voxel_size: |
|
from nilearn.masking import apply_mask, unmask |
|
ref_name = f'{glmsingle_path}/boldref_resampled.nii.gz' |
|
omat_name = f'{glmsingle_path}/boldref_omat' |
|
|
|
|
|
|
|
|
|
|
|
from nilearn.plotting import plot_roi |
|
|
|
print('loading brain mask') |
|
avg_mask = nib.load('/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/glmsingle_sub-005_task-C/sub-005_final_brain.nii.gz') |
|
final_mask = nib.load('/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/glmsingle_sub-005_task-C/sub-005_final_mask.nii.gz') |
|
|
|
|
|
dimsize=avg_mask.header.get_zooms() |
|
affine_mat = avg_mask.affine |
|
brain=avg_mask.get_fdata() |
|
xyz=brain.shape |
|
|
|
print('Mask dimensions:', dimsize) |
|
print('') |
|
print('Affine:') |
|
print(affine_mat) |
|
print('') |
|
print(f'There are {int(np.sum(brain))} voxels in the included brain mask\n') |
|
|
|
plot_roi(final_mask, bg_img=avg_mask) |
|
plt.show() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
path = f'/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/glmsingle_sub-005_task-C/union_mask_from_ses-01-02.npy' |
|
|
|
|
|
union_mask = np.load(path) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
ses_mask = [] |
|
|
|
for s in ses_list: |
|
ses_mask_path = f'/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/glmsingle_sub-005_{s}_task-C/sub-005_{s}_task-C_brain.nii.gz' |
|
ses_mask.append(nib.load(ses_mask_path)) |
|
|
|
assert np.all(ses_mask[-1].affine == final_mask.affine) |
|
assert np.all(ses_mask[-1].shape == final_mask.shape) |
|
|
|
|
|
|
|
|
|
|
|
ses_vox = [] |
|
vox = None |
|
needs_postprocessing = False |
|
params = (session, ses_list, remove_close_to_MST, image_names, remove_random_n, vox_idx) |
|
|
|
if resample_post_glmsingle == True: |
|
glm_save_path_resampled = f"{glmsingle_path}/vox_resampled.nii.gz" |
|
if load_from_resampled_file == True: |
|
|
|
vox = nib.load(glm_save_path_resampled) |
|
else: |
|
|
|
assert os.path.exists(ref_name) and os.path.exists(omat_name), "need to generate the boldref and omat separately since we don't have access to the functional data here; either do so using flirt on the command line or copy over the glmsingle resampled outputs" |
|
vox = load_preprocess_betas(orig_glmsingle_path, *params) |
|
vox = resample_betas(orig_glmsingle_path, sub, session, task_name, vox, glmsingle_path, glm_save_path_resampled, ref_name, omat_name) |
|
needs_postprocessing = True |
|
|
|
if vox is None: |
|
for i, s in enumerate(ses_list): |
|
|
|
ses_vox_path = f'/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/glmsingle_sub-005_{s}_task-C' |
|
assert os.path.exists(ses_vox_path) |
|
ses_vox.append(load_preprocess_betas(ses_vox_path, *params)) |
|
v = nilearn.masking.unmask(ses_vox[i], ses_mask[i]) |
|
ses_vox[i] = nilearn.masking.apply_mask(v, final_mask) |
|
vox = np.concatenate(ses_vox) |
|
print("applied final brain mask") |
|
print(vox.shape) |
|
vox = vox[:, union_mask] |
|
print("applied union roi mask") |
|
print(vox.shape) |
|
|
|
|
|
if needs_postprocessing == True: |
|
vox = apply_mask(vox, avg_mask) |
|
vox = vox.reshape(-1, vox.shape[-1]) |
|
print(vox.shape) |
|
|
|
assert len(vox) == len(image_idx) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
pairs_homog = np.array([[p[0], p[1]] for p in pairs]) |
|
|
|
|
|
|
|
|
|
|
|
same_corrs = [] |
|
diff_corrs = [] |
|
for isamp, samp in enumerate(vox[pairs_homog]): |
|
avg_same_img = [] |
|
for i in range(samp.shape[0]): |
|
for j in range(i, samp.shape[0]): |
|
if i != j: |
|
avg_same_img.append(np.array([np.corrcoef(samp[i, :], samp[j, :])[0,1]])) |
|
|
|
same_corrs.append(np.mean(avg_same_img)) |
|
|
|
avg_diff_img = [] |
|
for isamp_j, samp_j in enumerate(vox[pairs_homog]): |
|
if isamp_j != isamp: |
|
for i in range(samp_j.shape[0]): |
|
for j in range(i, samp_j.shape[0]): |
|
if i != j: |
|
avg_diff_img.append(np.array([np.corrcoef(samp[i, :], samp_j[j, :])[0,1]])) |
|
|
|
|
|
diff_corrs.append(np.mean(avg_diff_img)) |
|
|
|
|
|
print(len(same_corrs), len(diff_corrs)) |
|
same_corrs = np.array(same_corrs) |
|
diff_corrs = np.array(diff_corrs) |
|
|
|
|
|
plt.figure(figsize=(5,4)) |
|
plt.title(f"{sub}_{session} same/diff Pearson corr.") |
|
plt.plot(np.sort(same_corrs),c='blue',label='same') |
|
plt.plot(np.sort(diff_corrs),c='cyan',label='diff') |
|
plt.axhline(0,c='k',ls='--') |
|
plt.legend() |
|
plt.xlabel("sample") |
|
plt.ylabel("Pearson R") |
|
plt.show() |
|
|
|
|
|
|
|
|
|
|
|
vox_pairs = utils.zscore(vox[pairs_homog]) |
|
plt.figure(figsize=(5,4)) |
|
plt.title(f"{sub}_{session} same minus diff difference Pearson corr.") |
|
plt.plot(np.sort(same_corrs) - np.sort(diff_corrs),c='cyan',label='difference') |
|
plt.axhline(0,c='k',ls='--') |
|
plt.legend() |
|
plt.xlabel("sample") |
|
plt.ylabel("Pearson R") |
|
plt.show() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
utils.seed_everything(seed) |
|
|
|
if train_test_split == 'orig': |
|
|
|
|
|
imageTrain = np.arange(len(images)) |
|
train_image_indices = np.array([item for item in imageTrain if item not in pairs.flatten()]) |
|
test_image_indices = pairs |
|
print(len(train_image_indices), len(test_image_indices)) |
|
assert len(train_image_indices) + len(test_image_indices) == len(image_idx) |
|
elif train_test_split == 'MST': |
|
|
|
|
|
MST_idx = np.array([v for k,v in image_to_indices.items() if 'MST_pairs' in k]) |
|
non_MST_idx = [v for k,v in image_to_indices.items() if 'MST_pairs' not in k] |
|
non_MST_idx = np.array([z for y in non_MST_idx for x in y for z in x]) |
|
train_image_indices = non_MST_idx |
|
test_image_indices = MST_idx.flatten() |
|
print(len(train_image_indices), len(test_image_indices)) |
|
assert len(train_image_indices) + len(test_image_indices) == len(vox) |
|
elif train_test_split == 'unique': |
|
imageTest = np.arange(len(images)) |
|
train_image_indices = pairs.flatten() |
|
test_image_indices = np.array([item for item in imageTest if item not in pairs.flatten()]) |
|
print(len(train_image_indices), len(test_image_indices)) |
|
assert len(train_image_indices) + len(test_image_indices) == len(image_idx) |
|
else: |
|
raise Exception("invalid train_test_split") |
|
|
|
|
|
|
|
for i in train_image_indices: |
|
assert i not in test_image_indices |
|
|
|
|
|
|
|
|
|
|
|
ses_split = vox[train_image_indices].shape[0] // 2 |
|
|
|
train_mean_s1 = np.mean(vox[train_image_indices][:ses_split], axis=0) |
|
train_std_s1 = np.std(vox[train_image_indices][:ses_split], axis=0) |
|
train_mean_s2 = np.mean(vox[train_image_indices][ses_split:], axis=0) |
|
train_std_s2 = np.std(vox[train_image_indices][ses_split:], axis=0) |
|
|
|
|
|
vox[:ses_split] = utils.zscore(vox[:ses_split],train_mean=train_mean_s1,train_std=train_std_s1) |
|
vox[ses_split:] = utils.zscore(vox[ses_split:],train_mean=train_mean_s2,train_std=train_std_s2) |
|
|
|
print("voxels have been zscored") |
|
print("ses-01:", vox[:ses_split,0].mean(), vox[:ses_split,0].std()) |
|
print("ses-02:", vox[ses_split:,0].mean(), vox[ses_split:,0].std()) |
|
print("vox", vox.shape) |
|
|
|
|
|
|
|
|
|
|
|
|
|
train_test_mean_s1 = np.mean(vox[:ses_split], axis=0) |
|
train_test_std_s1 = np.std(vox[:ses_split], axis=0) |
|
train_test_mean_s2 = np.mean(vox[ses_split:], axis=0) |
|
train_test_std_s2 = np.std(vox[ses_split:], axis=0) |
|
print(train_test_mean_s1.shape) |
|
assert np.all(train_test_mean_s1.shape == train_test_std_s1.shape) |
|
assert np.all(train_test_mean_s1.shape == train_test_mean_s2.shape) |
|
assert np.all(train_test_mean_s1.shape == train_test_std_s2.shape) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
images = torch.Tensor(images) |
|
vox = torch.Tensor(vox) |
|
assert len(images) == len(vox) |
|
|
|
|
|
|
|
|
|
|
|
|
|
from accelerate import Accelerator, DeepSpeedPlugin |
|
|
|
local_rank = os.getenv('RANK') |
|
if local_rank is None: |
|
local_rank = 0 |
|
else: |
|
local_rank = int(local_rank) |
|
print("LOCAL RANK ", local_rank) |
|
|
|
data_type = torch.float32 |
|
|
|
accelerator = Accelerator(split_batches=False) |
|
batch_size = 8 |
|
|
|
|
|
|
|
|
|
|
|
print("PID of this process =",os.getpid()) |
|
device = accelerator.device |
|
print("device:",device) |
|
world_size = accelerator.state.num_processes |
|
distributed = not accelerator.state.distributed_type == 'NO' |
|
num_devices = torch.cuda.device_count() |
|
global_batch_size = batch_size * num_devices |
|
print("global_batch_size", global_batch_size) |
|
if num_devices==0 or not distributed: num_devices = 1 |
|
num_workers = num_devices |
|
print(accelerator.state) |
|
|
|
|
|
if accelerator.mixed_precision == "bf16": |
|
data_type = torch.bfloat16 |
|
elif accelerator.mixed_precision == "fp16": |
|
data_type = torch.float16 |
|
else: |
|
data_type = torch.float32 |
|
|
|
print("distributed =",distributed, "num_devices =", num_devices, "local rank =", local_rank, "world size =", world_size, "data_type =", data_type) |
|
print = accelerator.print |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if utils.is_interactive(): |
|
model_name = 'testing_MST' |
|
print("model_name:", model_name) |
|
|
|
|
|
|
|
|
|
|
|
jupyter_args = f"--data_path=/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2 \ |
|
--model_name={model_name} \ |
|
--no-multi_subject --subj=1 --batch_size={batch_size} \ |
|
--hidden_dim=1024 --clip_scale=1. \ |
|
--no-blurry_recon --blur_scale=.5 \ |
|
--no-use_prior --prior_scale=30 \ |
|
--n_blocks=4 --max_lr=3e-4 --mixup_pct=.33 --num_epochs=30 --no-use_image_aug \ |
|
--ckpt_interval=999 --no-ckpt_saving --new_test \ |
|
--multisubject_ckpt=/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/train_logs/multisubject_subj01_1024hid_nolow_300ep" |
|
print(jupyter_args) |
|
jupyter_args = jupyter_args.split() |
|
|
|
|
|
|
|
|
|
|
|
parser = argparse.ArgumentParser(description="Model Training Configuration") |
|
parser.add_argument( |
|
"--model_name", type=str, default="testing", |
|
help="name of model, used for ckpt saving and wandb logging (if enabled)", |
|
) |
|
parser.add_argument( |
|
"--data_path", type=str, default="/weka/proj-fmri/shared/natural-scenes-dataset", |
|
help="Path to where NSD data is stored / where to download it to", |
|
) |
|
parser.add_argument( |
|
"--subj",type=int, default=1, choices=[1,2,3,4,5,6,7,8], |
|
help="Validate on which subject?", |
|
) |
|
parser.add_argument( |
|
"--multisubject_ckpt", type=str, default=None, |
|
help="Path to pre-trained multisubject model to finetune a single subject from. multisubject must be False.", |
|
) |
|
parser.add_argument( |
|
"--num_sessions", type=int, default=0, |
|
help="Number of training sessions to include (if multi_subject, this variable doesnt matter)", |
|
) |
|
parser.add_argument( |
|
"--use_prior",action=argparse.BooleanOptionalAction,default=False, |
|
help="whether to train diffusion prior (True) or just rely on retrieval part of the pipeline (False)", |
|
) |
|
parser.add_argument( |
|
"--batch_size", type=int, default=32, |
|
help="Batch size can be increased by 10x if only training v2c and not diffusion diffuser", |
|
) |
|
parser.add_argument( |
|
"--wandb_log",action=argparse.BooleanOptionalAction,default=False, |
|
help="whether to log to wandb", |
|
) |
|
parser.add_argument( |
|
"--resume_from_ckpt",action=argparse.BooleanOptionalAction,default=False, |
|
help="if not using wandb and want to resume from a ckpt", |
|
) |
|
parser.add_argument( |
|
"--wandb_project",type=str,default="stability", |
|
help="wandb project name", |
|
) |
|
parser.add_argument( |
|
"--mixup_pct",type=float,default=.33, |
|
help="proportion of way through training when to switch from BiMixCo to SoftCLIP", |
|
) |
|
parser.add_argument( |
|
"--low_mem",action=argparse.BooleanOptionalAction,default=False, |
|
help="whether to preload images to cpu to speed things up but consume more memory", |
|
) |
|
parser.add_argument( |
|
"--blurry_recon",action=argparse.BooleanOptionalAction,default=True, |
|
help="whether to output blurry reconstructions", |
|
) |
|
parser.add_argument( |
|
"--blur_scale",type=float,default=.5, |
|
help="multiply loss from blurry recons by this number", |
|
) |
|
parser.add_argument( |
|
"--clip_scale",type=float,default=1., |
|
help="multiply contrastive loss by this number", |
|
) |
|
parser.add_argument( |
|
"--prior_scale",type=float,default=30, |
|
help="multiply diffusion prior loss by this", |
|
) |
|
parser.add_argument( |
|
"--use_image_aug",action=argparse.BooleanOptionalAction,default=True, |
|
help="whether to use image augmentation", |
|
) |
|
parser.add_argument( |
|
"--num_epochs",type=int,default=120, |
|
help="number of epochs of training", |
|
) |
|
parser.add_argument( |
|
"--multi_subject",action=argparse.BooleanOptionalAction,default=False, |
|
) |
|
parser.add_argument( |
|
"--new_test",action=argparse.BooleanOptionalAction,default=True, |
|
) |
|
parser.add_argument( |
|
"--n_blocks",type=int,default=2, |
|
) |
|
parser.add_argument( |
|
"--hidden_dim",type=int,default=1024, |
|
) |
|
parser.add_argument( |
|
"--seq_past",type=int,default=0, |
|
) |
|
parser.add_argument( |
|
"--seq_future",type=int,default=0, |
|
) |
|
parser.add_argument( |
|
"--lr_scheduler_type",type=str,default='cycle',choices=['cycle','linear'], |
|
) |
|
parser.add_argument( |
|
"--ckpt_saving",action=argparse.BooleanOptionalAction,default=True, |
|
) |
|
parser.add_argument( |
|
"--ckpt_interval",type=int,default=5, |
|
help="save backup ckpt and reconstruct every x epochs", |
|
) |
|
parser.add_argument( |
|
"--seed",type=int,default=42, |
|
) |
|
parser.add_argument( |
|
"--max_lr",type=float,default=3e-4, |
|
) |
|
|
|
if utils.is_interactive(): |
|
args = parser.parse_args(jupyter_args) |
|
else: |
|
args = parser.parse_args() |
|
|
|
|
|
for attribute_name in vars(args).keys(): |
|
globals()[attribute_name] = getattr(args, attribute_name) |
|
|
|
outdir = os.path.abspath(f'/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/train_logs/{model_name}') |
|
if not os.path.exists(outdir) and ckpt_saving: |
|
os.makedirs(outdir,exist_ok=True) |
|
|
|
if use_image_aug or blurry_recon: |
|
import kornia |
|
import kornia.augmentation as K |
|
from kornia.augmentation.container import AugmentationSequential |
|
if use_image_aug: |
|
img_augment = AugmentationSequential( |
|
kornia.augmentation.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.1, p=0.3), |
|
same_on_batch=False, |
|
data_keys=["input"], |
|
) |
|
|
|
blur_augment = K.RandomGaussianBlur(kernel_size=(21, 21), sigma=(51.0, 51.0), p=1.) |
|
|
|
if multi_subject: |
|
subj_list = np.arange(1,9) |
|
subj_list = subj_list[subj_list != subj] |
|
else: |
|
subj_list = [subj] |
|
|
|
print("subj_list", subj_list, "num_sessions", num_sessions) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if ckpt_saving: |
|
|
|
if 'MST' in model_name: |
|
eval_dir = os.environ["eval_dir"] |
|
print('saving MST info in', eval_dir) |
|
|
|
if not os.path.exists(eval_dir): |
|
os.mkdir(eval_dir) |
|
|
|
np.save(f"{eval_dir}/MST_ID.npy", MST_ID) |
|
np.save(f"{eval_dir}/MST_pairmate_indices.npy", MST_pairmate_indices) |
|
|
|
if remove_random_n: |
|
np.save(f"{eval_dir}/imgs_to_remove.npy", imgs_to_remove) |
|
|
|
np.save(f"{eval_dir}/train_image_indices.npy", train_image_indices) |
|
np.save(f"{eval_dir}/test_image_indices.npy", test_image_indices) |
|
np.save(f"{eval_dir}/images.npy", images) |
|
np.save(f"{eval_dir}/vox.npy", vox) |
|
|
|
np.save(f'{eval_dir}/train_test_mean_s1.npy', train_test_mean_s1) |
|
np.save(f'{eval_dir}/train_test_std_s1.npy', train_test_std_s1) |
|
np.save(f'{eval_dir}/train_test_mean_s2.npy', train_test_mean_s2) |
|
np.save(f'{eval_dir}/train_test_std_s2.npy', train_test_std_s2) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def my_split_by_node(urls): return urls |
|
num_voxels_list = [] |
|
|
|
if multi_subject: |
|
nsessions_allsubj=np.array([40, 40, 32, 30, 40, 32, 40, 30]) |
|
num_samples_per_epoch = (750*40) // num_devices |
|
else: |
|
|
|
num_samples_per_epoch = len(train_image_indices) |
|
|
|
print("dividing batch size by subj_list, which will then be concatenated across subj during training...") |
|
batch_size = batch_size // len(subj_list) |
|
|
|
num_iterations_per_epoch = num_samples_per_epoch // (batch_size*len(subj_list)) |
|
|
|
print("batch_size =", batch_size, "num_iterations_per_epoch =",num_iterations_per_epoch, "num_samples_per_epoch =",num_samples_per_epoch) |
|
|
|
|
|
|
|
|
|
|
|
train_data = {} |
|
train_dl = {} |
|
|
|
train_data[f'subj0{subj}'] = torch.utils.data.TensorDataset(torch.tensor(train_image_indices)) |
|
test_data = torch.utils.data.TensorDataset(torch.tensor(test_image_indices)) |
|
|
|
|
|
|
|
|
|
|
|
num_voxels = {} |
|
voxels = {} |
|
for s in subj_list: |
|
print(f"Training with {num_sessions} sessions") |
|
train_dl = torch.utils.data.DataLoader(train_data[f'subj0{s}'], batch_size=batch_size, shuffle=True, drop_last=True, pin_memory=True) |
|
|
|
num_voxels_list.append(vox[0].shape[-1]) |
|
num_voxels[f'subj0{s}'] = vox[0].shape[-1] |
|
voxels[f'subj0{s}'] = vox |
|
print(f"num_voxels for subj0{s}: {num_voxels[f'subj0{s}']}") |
|
|
|
print("Loaded all subj train dls and vox!\n") |
|
|
|
|
|
if multi_subject: |
|
subj = subj_list[0] |
|
test_dl = torch.utils.data.DataLoader(test_data, batch_size=24, shuffle=False, drop_last=True, pin_memory=True) |
|
|
|
print(f"Loaded test dl for subj{subj}!\n") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
sys.path.append('generative_models/') |
|
import sgm |
|
from generative_models.sgm.modules.encoders.modules import FrozenOpenCLIPImageEmbedder |
|
|
|
|
|
|
|
try: |
|
print(clip_img_embedder) |
|
except: |
|
clip_img_embedder = FrozenOpenCLIPImageEmbedder( |
|
arch="ViT-bigG-14", |
|
version="laion2b_s39b_b160k", |
|
output_tokens=True, |
|
only_tokens=True, |
|
) |
|
clip_img_embedder.to(device) |
|
clip_seq_dim = 256 |
|
clip_emb_dim = 1664 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
model = utils.prepare_model_and_training( |
|
num_voxels_list=num_voxels_list, |
|
n_blocks=n_blocks, |
|
hidden_dim=hidden_dim, |
|
clip_emb_dim=clip_emb_dim, |
|
clip_seq_dim=clip_seq_dim, |
|
use_prior=use_prior, |
|
clip_scale=clip_scale |
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
b = torch.randn((2,1,num_voxels_list[0])) |
|
print(b.shape, model.ridge(b,0).shape) |
|
|
|
|
|
|
|
|
|
|
|
|
|
b = torch.randn((2,1,hidden_dim)) |
|
print("b.shape",b.shape) |
|
|
|
backbone_, clip_, blur_ = model.backbone(b) |
|
print(backbone_.shape, clip_.shape, blur_[0].shape, blur_[1].shape) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if use_prior: |
|
from models import * |
|
|
|
|
|
out_dim = clip_emb_dim |
|
depth = 6 |
|
dim_head = 52 |
|
heads = clip_emb_dim//52 |
|
timesteps = 100 |
|
|
|
prior_network = VersatileDiffusionPriorNetwork( |
|
dim=out_dim, |
|
depth=depth, |
|
dim_head=dim_head, |
|
heads=heads, |
|
causal=False, |
|
num_tokens = clip_seq_dim, |
|
learned_query_mode="pos_emb" |
|
) |
|
|
|
model.diffusion_prior = BrainDiffusionPrior( |
|
net=prior_network, |
|
image_embed_dim=out_dim, |
|
condition_on_text_encodings=False, |
|
timesteps=timesteps, |
|
cond_drop_prob=0.2, |
|
image_embed_scale=None, |
|
) |
|
|
|
utils.count_params(model.diffusion_prior) |
|
utils.count_params(model) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] |
|
|
|
opt_grouped_parameters = [ |
|
{'params': [p for n, p in model.ridge.named_parameters()], 'weight_decay': 1e-2}, |
|
{'params': [p for n, p in model.backbone.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': 1e-2}, |
|
{'params': [p for n, p in model.backbone.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}, |
|
] |
|
|
|
|
|
if use_prior: |
|
opt_grouped_parameters.extend([ |
|
{'params': [p for n, p in model.diffusion_prior.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': 1e-2}, |
|
{'params': [p for n, p in model.diffusion_prior.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0} |
|
]) |
|
|
|
optimizer = torch.optim.AdamW(opt_grouped_parameters, lr=max_lr) |
|
|
|
if lr_scheduler_type == 'linear': |
|
lr_scheduler = torch.optim.lr_scheduler.LinearLR( |
|
optimizer, |
|
total_iters=int(np.floor(num_epochs*num_iterations_per_epoch)), |
|
last_epoch=-1 |
|
) |
|
elif lr_scheduler_type == 'cycle': |
|
if num_iterations_per_epoch==0: |
|
num_iterations_per_epoch=1 |
|
total_steps=int(np.floor(num_epochs*num_iterations_per_epoch)) |
|
print("total_steps", total_steps) |
|
lr_scheduler = torch.optim.lr_scheduler.OneCycleLR( |
|
optimizer, |
|
max_lr=max_lr, |
|
total_steps=total_steps, |
|
final_div_factor=1000, |
|
last_epoch=-1, pct_start=2/num_epochs |
|
) |
|
|
|
def save_ckpt(tag): |
|
ckpt_path = outdir+f'/{tag}.pth' |
|
if accelerator.is_main_process: |
|
unwrapped_model = accelerator.unwrap_model(model) |
|
torch.save({ |
|
'epoch': epoch, |
|
'model_state_dict': unwrapped_model.state_dict(), |
|
'optimizer_state_dict': optimizer.state_dict(), |
|
'lr_scheduler': lr_scheduler.state_dict(), |
|
'train_losses': losses, |
|
'test_losses': test_losses, |
|
'lrs': lrs, |
|
}, ckpt_path) |
|
print(f"\n---saved {outdir}/{tag} ckpt!---\n") |
|
|
|
def load_ckpt(tag,load_lr=True,load_optimizer=True,load_epoch=True,strict=True,outdir=outdir,multisubj_loading=False): |
|
print(f"\n---loading {outdir}/{tag}.pth ckpt---\n") |
|
checkpoint = torch.load(outdir+'/last.pth', map_location='cpu') |
|
state_dict = checkpoint['model_state_dict'] |
|
if multisubj_loading: |
|
state_dict.pop('ridge.linears.0.weight',None) |
|
model.load_state_dict(state_dict, strict=strict) |
|
if load_epoch: |
|
globals()["epoch"] = checkpoint['epoch'] |
|
print("Epoch",epoch) |
|
if load_optimizer: |
|
optimizer.load_state_dict(checkpoint['optimizer_state_dict']) |
|
if load_lr: |
|
lr_scheduler.load_state_dict(checkpoint['lr_scheduler']) |
|
del checkpoint |
|
|
|
print("\nDone with model preparations!") |
|
num_params = utils.count_params(model) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if local_rank==0 and wandb_log: |
|
import wandb |
|
import time |
|
|
|
wandb_project = 'rtmindeye' |
|
print(f"wandb {wandb_project} run {model_name}") |
|
|
|
|
|
wandb_config = { |
|
"model_name": model_name, |
|
"global_batch_size": global_batch_size, |
|
"batch_size": batch_size, |
|
"num_epochs": num_epochs, |
|
"num_sessions": num_sessions, |
|
"num_params": num_params, |
|
"clip_scale": clip_scale, |
|
"prior_scale": prior_scale, |
|
"blur_scale": blur_scale, |
|
"use_image_aug": use_image_aug, |
|
"max_lr": max_lr, |
|
"mixup_pct": mixup_pct, |
|
"num_samples_per_epoch": num_samples_per_epoch, |
|
"ckpt_interval": ckpt_interval, |
|
"ckpt_saving": ckpt_saving, |
|
"seed": seed, |
|
"distributed": distributed, |
|
"num_devices": num_devices, |
|
"world_size": world_size, |
|
} |
|
print("wandb_config:\n", wandb_config) |
|
print("wandb_id:", model_name) |
|
|
|
|
|
wandb.init( |
|
id=model_name, |
|
project=wandb_project, |
|
name=model_name, |
|
config=wandb_config, |
|
resume="allow", |
|
save_code=True, |
|
) |
|
|
|
|
|
slurm_job_id = utils.get_slurm_job() |
|
slurm_array_id = seed |
|
|
|
|
|
log_dir = "slurms" |
|
log_files = [ |
|
f"{log_dir}/{slurm_job_id}_{slurm_array_id}.out", |
|
f"{log_dir}/{slurm_job_id}_{slurm_array_id}.err", |
|
] |
|
|
|
|
|
for log_file in log_files: |
|
wait_time = 0 |
|
while not os.path.exists(log_file) and wait_time < 60: |
|
time.sleep(5) |
|
wait_time += 5 |
|
|
|
|
|
artifact = wandb.Artifact(f"slurm_logs_{slurm_job_id}_{slurm_array_id}", type="logs") |
|
for log_file in log_files: |
|
if os.path.exists(log_file): |
|
artifact.add_file(log_file) |
|
|
|
wandb.log_artifact(artifact) |
|
else: |
|
wandb_log = False |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
epoch = 0 |
|
losses, test_losses, lrs = [], [], [] |
|
best_test_loss = 1e9 |
|
torch.cuda.empty_cache() |
|
|
|
|
|
|
|
|
|
|
|
|
|
if multisubject_ckpt is not None and not resume_from_ckpt: |
|
load_ckpt("last",outdir=multisubject_ckpt,load_lr=False,load_optimizer=False,load_epoch=False,strict=False,multisubj_loading=True) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
model, optimizer, train_dl, lr_scheduler = accelerator.prepare(model, optimizer, train_dl, lr_scheduler) |
|
|
|
|
|
|
|
|
|
|
|
|
|
print(f"{model_name} starting with epoch {epoch} / {num_epochs}") |
|
progress_bar = tqdm(range(epoch,num_epochs), ncols=1200, disable=(local_rank!=0)) |
|
test_image, test_voxel = None, None |
|
mse = nn.MSELoss() |
|
l1 = nn.L1Loss() |
|
soft_loss_temps = utils.cosine_anneal(0.004, 0.0075, num_epochs - int(mixup_pct * num_epochs)) |
|
skip_train = True if epoch>=(num_epochs-1) else False |
|
|
|
for epoch in progress_bar: |
|
model.train() |
|
|
|
fwd_percent_correct = 0. |
|
bwd_percent_correct = 0. |
|
test_fwd_percent_correct = 0. |
|
test_bwd_percent_correct = 0. |
|
|
|
recon_cossim = 0. |
|
test_recon_cossim = 0. |
|
recon_mse = 0. |
|
test_recon_mse = 0. |
|
|
|
loss_clip_total = 0. |
|
loss_blurry_total = 0. |
|
loss_blurry_cont_total = 0. |
|
test_loss_clip_total = 0. |
|
|
|
loss_prior_total = 0. |
|
test_loss_prior_total = 0. |
|
|
|
blurry_pixcorr = 0. |
|
test_blurry_pixcorr = 0. |
|
|
|
|
|
for train_i, behav in enumerate(train_dl): |
|
with torch.cuda.amp.autocast(dtype=data_type): |
|
optimizer.zero_grad() |
|
loss = 0. |
|
|
|
behav = behav[0] |
|
|
|
image = images[behav.long().cpu()].to(device) |
|
voxel = vox[behav.long().cpu()] |
|
|
|
voxel = torch.Tensor(voxel).unsqueeze(1).to(device) |
|
|
|
if use_image_aug: |
|
image = img_augment(image) |
|
|
|
clip_target = clip_img_embedder(image) |
|
assert not torch.any(torch.isnan(clip_target)) |
|
|
|
if epoch < int(mixup_pct * num_epochs): |
|
voxel, perm, betas, select = utils.mixco(voxel) |
|
|
|
voxel_ridge = model.ridge(voxel,0) |
|
|
|
|
|
backbone, clip_voxels, blurry_image_enc_ = model.backbone(voxel_ridge) |
|
|
|
if clip_scale>0: |
|
clip_voxels_norm = nn.functional.normalize(clip_voxels.flatten(1), dim=-1) |
|
clip_target_norm = nn.functional.normalize(clip_target.flatten(1), dim=-1) |
|
|
|
if use_prior: |
|
loss_prior, prior_out = model.diffusion_prior(text_embed=backbone, image_embed=clip_target) |
|
loss_prior_total += loss_prior.item() |
|
loss_prior *= prior_scale |
|
loss += loss_prior |
|
|
|
recon_cossim += nn.functional.cosine_similarity(prior_out, clip_target).mean().item() |
|
recon_mse += mse(prior_out, clip_target).item() |
|
|
|
if clip_scale>0: |
|
if epoch < int(mixup_pct * num_epochs): |
|
loss_clip = utils.mixco_nce( |
|
clip_voxels_norm, |
|
clip_target_norm, |
|
temp=.006, |
|
perm=perm, betas=betas, select=select) |
|
else: |
|
epoch_temp = soft_loss_temps[epoch-int(mixup_pct*num_epochs)] |
|
loss_clip = utils.soft_clip_loss( |
|
clip_voxels_norm, |
|
clip_target_norm, |
|
temp=epoch_temp) |
|
|
|
loss_clip_total += loss_clip.item() |
|
loss_clip *= clip_scale |
|
loss += loss_clip |
|
|
|
if blurry_recon: |
|
image_enc_pred, transformer_feats = blurry_image_enc_ |
|
|
|
image_enc = autoenc.encode(2*image-1).latent_dist.mode() * 0.18215 |
|
loss_blurry = l1(image_enc_pred, image_enc) |
|
loss_blurry_total += loss_blurry.item() |
|
|
|
if epoch < int(mixup_pct * num_epochs): |
|
image_enc_shuf = image_enc[perm] |
|
betas_shape = [-1] + [1]*(len(image_enc.shape)-1) |
|
image_enc[select] = image_enc[select] * betas[select].reshape(*betas_shape) + \ |
|
image_enc_shuf[select] * (1 - betas[select]).reshape(*betas_shape) |
|
|
|
image_norm = (image - mean)/std |
|
image_aug = (blur_augs(image) - mean)/std |
|
_, cnx_embeds = cnx(image_norm) |
|
_, cnx_aug_embeds = cnx(image_aug) |
|
|
|
cont_loss = utils.soft_cont_loss( |
|
nn.functional.normalize(transformer_feats.reshape(-1, transformer_feats.shape[-1]), dim=-1), |
|
nn.functional.normalize(cnx_embeds.reshape(-1, cnx_embeds.shape[-1]), dim=-1), |
|
nn.functional.normalize(cnx_aug_embeds.reshape(-1, cnx_embeds.shape[-1]), dim=-1), |
|
temp=0.2) |
|
loss_blurry_cont_total += cont_loss.item() |
|
|
|
loss += (loss_blurry + 0.1*cont_loss) * blur_scale |
|
|
|
if clip_scale>0: |
|
|
|
labels = torch.arange(len(clip_voxels_norm)).to(clip_voxels_norm.device) |
|
fwd_percent_correct += utils.topk(utils.batchwise_cosine_similarity(clip_voxels_norm, clip_target_norm), labels, k=1).item() |
|
bwd_percent_correct += utils.topk(utils.batchwise_cosine_similarity(clip_target_norm, clip_voxels_norm), labels, k=1).item() |
|
|
|
if blurry_recon: |
|
with torch.no_grad(): |
|
|
|
random_samps = np.random.choice(np.arange(len(image)), size=len(image)//5, replace=False) |
|
blurry_recon_images = (autoenc.decode(image_enc_pred[random_samps]/0.18215).sample/ 2 + 0.5).clamp(0,1) |
|
pixcorr = utils.pixcorr(image[random_samps], blurry_recon_images) |
|
blurry_pixcorr += pixcorr.item() |
|
|
|
utils.check_loss(loss) |
|
accelerator.backward(loss) |
|
optimizer.step() |
|
|
|
losses.append(loss.item()) |
|
lrs.append(optimizer.param_groups[0]['lr']) |
|
|
|
if lr_scheduler_type is not None: |
|
lr_scheduler.step() |
|
|
|
if train_i >= num_iterations_per_epoch-1: |
|
break |
|
|
|
model.eval() |
|
logs = {} |
|
|
|
if local_rank == 0: |
|
with torch.no_grad(), torch.cuda.amp.autocast(dtype=data_type): |
|
for i in range(2): |
|
for j in range(2): |
|
subset_indices = MST_idx[:, i, j].reshape(-1) |
|
subset_dataset = torch.utils.data.TensorDataset(torch.tensor(subset_indices)) |
|
subset_dl = torch.utils.data.DataLoader( |
|
subset_dataset, batch_size=len(MST_idx), shuffle=False, |
|
drop_last=True, pin_memory=True |
|
) |
|
|
|
|
|
test_losses = [] |
|
test_loss_clip_total = 0 |
|
test_loss_prior_total = 0 |
|
test_blurry_pixcorr = 0 |
|
test_fwd_percent_correct = 0 |
|
test_bwd_percent_correct = 0 |
|
test_recon_cossim = 0 |
|
test_recon_mse = 0 |
|
|
|
for test_i, behav in enumerate(subset_dl): |
|
behav = behav[0] |
|
loss = 0. |
|
|
|
if behav.ndim > 1: |
|
image = images[behav[:, 0].long().cpu()].to(device) |
|
voxel = vox[behav.long().cpu()].mean(1) |
|
else: |
|
image = images[behav.long().cpu()].to(device) |
|
voxel = vox[behav.long().cpu()] |
|
|
|
voxel = torch.Tensor(voxel).unsqueeze(1).to(device) |
|
|
|
clip_img_embedder = clip_img_embedder.to(device) |
|
clip_target = clip_img_embedder(image.float()) |
|
|
|
voxel_ridge = model.ridge(voxel, 0) |
|
backbone, clip_voxels, blurry_image_enc_ = model.backbone(voxel_ridge) |
|
|
|
if clip_scale > 0: |
|
clip_voxels_norm = nn.functional.normalize(clip_voxels.flatten(1), dim=-1) |
|
clip_target_norm = nn.functional.normalize(clip_target.flatten(1), dim=-1) |
|
|
|
random_samps = np.random.choice(np.arange(len(image)), size=len(image) // 5, replace=False) |
|
|
|
if use_prior: |
|
loss_prior, contaminated_prior_out = model.diffusion_prior( |
|
text_embed=backbone[random_samps], image_embed=clip_target[random_samps]) |
|
test_loss_prior_total += loss_prior.item() |
|
loss_prior *= prior_scale |
|
loss += loss_prior |
|
|
|
if clip_scale > 0: |
|
loss_clip = utils.soft_clip_loss( |
|
clip_voxels_norm, |
|
clip_target_norm, |
|
temp=0.006 |
|
) |
|
test_loss_clip_total += loss_clip.item() |
|
loss_clip *= clip_scale |
|
loss += loss_clip |
|
|
|
if blurry_recon: |
|
image_enc_pred, _ = blurry_image_enc_ |
|
blurry_recon_images = (autoenc.decode(image_enc_pred[random_samps] / 0.18215).sample / 2 + 0.5).clamp(0, 1) |
|
pixcorr = utils.pixcorr(image[random_samps], blurry_recon_images) |
|
test_blurry_pixcorr += pixcorr.item() |
|
|
|
if clip_scale > 0: |
|
labels = torch.arange(len(clip_voxels_norm)).to(clip_voxels_norm.device) |
|
test_fwd_percent_correct += utils.topk(utils.batchwise_cosine_similarity(clip_voxels_norm, clip_target_norm), labels, k=1).item() |
|
test_bwd_percent_correct += utils.topk(utils.batchwise_cosine_similarity(clip_target_norm, clip_voxels_norm), labels, k=1).item() |
|
|
|
utils.check_loss(loss) |
|
test_losses.append(loss.item()) |
|
|
|
logs.update({ |
|
f"subset_{i}_{j}_test/loss": np.mean(test_losses), |
|
f"subset_{i}_{j}_test/loss_clip_total": test_loss_clip_total / (test_i + 1), |
|
f"subset_{i}_{j}_test/loss_prior": test_loss_prior_total / (test_i + 1), |
|
f"subset_{i}_{j}_test/blurry_pixcorr": test_blurry_pixcorr / (test_i + 1), |
|
f"subset_{i}_{j}_test/fwd_pct_correct": test_fwd_percent_correct / (test_i + 1), |
|
f"subset_{i}_{j}_test/bwd_pct_correct": test_bwd_percent_correct / (test_i + 1), |
|
}) |
|
print(f"--- Subset ({i},{j}) ---") |
|
for k, v in logs.items(): |
|
if f"subset_{i}_{j}" in k: |
|
print(f"{k}: {v:.4f}") |
|
|
|
|
|
logs.update({ |
|
"train/loss": np.mean(losses[-(train_i+1):]), |
|
"train/lr": lrs[-1], |
|
"train/num_steps": len(losses), |
|
"train/fwd_pct_correct": fwd_percent_correct / (train_i + 1), |
|
"train/bwd_pct_correct": bwd_percent_correct / (train_i + 1), |
|
"train/loss_clip_total": loss_clip_total / (train_i + 1), |
|
"train/loss_blurry_total": loss_blurry_total / (train_i + 1), |
|
"train/loss_blurry_cont_total": loss_blurry_cont_total / (train_i + 1), |
|
"train/blurry_pixcorr": blurry_pixcorr / (train_i + 1), |
|
"train/recon_cossim": recon_cossim / (train_i + 1), |
|
"train/recon_mse": recon_mse / (train_i + 1), |
|
"train/loss_prior": loss_prior_total / (train_i + 1), |
|
}) |
|
|
|
|
|
|
|
if (epoch == num_epochs-1) or (epoch % ckpt_interval == 0): |
|
if blurry_recon: |
|
image_enc = autoenc.encode(2*image[:4]-1).latent_dist.mode() * 0.18215 |
|
|
|
fig, axes = plt.subplots(1, 8, figsize=(10, 4)) |
|
jj=-1 |
|
for j in [0,1,2,3]: |
|
jj+=1 |
|
axes[jj].imshow(utils.torch_to_Image((autoenc.decode(image_enc[[j]]/0.18215).sample / 2 + 0.5).clamp(0,1))) |
|
axes[jj].axis('off') |
|
jj+=1 |
|
axes[jj].imshow(utils.torch_to_Image((autoenc.decode(image_enc_pred[[j]]/0.18215).sample / 2 + 0.5).clamp(0,1))) |
|
axes[jj].axis('off') |
|
plt.show() |
|
|
|
progress_bar.set_postfix(**logs) |
|
|
|
if wandb_log: wandb.log(logs) |
|
|
|
|
|
if (ckpt_saving) and (epoch % ckpt_interval == 0): |
|
save_ckpt(f'last') |
|
|
|
|
|
accelerator.wait_for_everyone() |
|
torch.cuda.empty_cache() |
|
|
|
print("\n===Finished!===\n") |
|
if ckpt_saving: |
|
save_ckpt(f'last') |
|
|
|
|
|
|
|
|
|
|
|
len(test_data) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
x = [im for im in image_names if str(im) not in ('blank.jpg', 'nan')] |
|
assert len(image_idx) == len(x) |
|
pairs = [] |
|
for i, p in enumerate(MST_pairmate_names): |
|
assert p[0] != p[1] |
|
pairs.append([utils.find_all_indices(x,p[0]), utils.find_all_indices(x,p[1])]) |
|
|
|
pairs = np.array(pairs) |
|
|
|
|
|
|
|
|
|
|
|
pairs.shape |
|
|
|
|
|
|
|
|
|
|
|
model.eval() |
|
logs = {} |
|
if local_rank == 0: |
|
with torch.no_grad(), torch.cuda.amp.autocast(dtype=data_type): |
|
for i in range(2): |
|
for j in range(2): |
|
subset_indices = MST_idx[:, i, j].reshape(-1) |
|
subset_dataset = torch.utils.data.TensorDataset(torch.tensor(subset_indices)) |
|
subset_dl = torch.utils.data.DataLoader( |
|
subset_dataset, batch_size=len(MST_idx), shuffle=False, |
|
drop_last=True, pin_memory=True |
|
) |
|
|
|
|
|
test_fwd_percent_correct = 0 |
|
test_bwd_percent_correct = 0 |
|
|
|
for test_i, behav in enumerate(subset_dl): |
|
behav = behav[0] |
|
loss = 0. |
|
image = images[behav.long().cpu()].to(device) |
|
voxel = vox[behav.long().cpu()] |
|
voxel = torch.Tensor(voxel).unsqueeze(1).to(device) |
|
clip_img_embedder = clip_img_embedder.to(device) |
|
clip_target = clip_img_embedder(image.float()) |
|
|
|
voxel_ridge = model.ridge(voxel, 0) |
|
backbone, clip_voxels, blurry_image_enc_ = model.backbone(voxel_ridge) |
|
|
|
clip_voxels_norm = torch.nn.functional.normalize(clip_voxels, dim=-1) |
|
clip_target_norm = torch.nn.functional.normalize(clip_target, dim=-1) |
|
|
|
if clip_scale > 0: |
|
labels = torch.arange(len(clip_voxels_norm)).to(clip_voxels_norm.device) |
|
test_fwd_percent_correct += utils.topk(utils.batchwise_cosine_similarity(clip_voxels_norm, clip_target_norm), labels, k=1).item() |
|
test_bwd_percent_correct += utils.topk(utils.batchwise_cosine_similarity(clip_target_norm, clip_voxels_norm), labels, k=1).item() |
|
print(test_fwd_percent_correct) |
|
print(test_bwd_percent_correct) |
|
logs.update({ |
|
f"subset_{i}_{j}_test/fwd_pct_correct": test_fwd_percent_correct / (test_i + 1), |
|
f"subset_{i}_{j}_test/bwd_pct_correct": test_bwd_percent_correct / (test_i + 1), |
|
}) |
|
|
|
print("--- Full Dataset Evaluation ---") |
|
for k, v in logs.items(): |
|
print(f"{k}: {v:.4f}") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import pdb |
|
|
|
|
|
|
|
|
|
|
|
def evaluate_mst_pairs(mst_pairs): |
|
with torch.no_grad(), torch.cuda.amp.autocast(dtype=data_type): |
|
failed_A = [] |
|
failed_B = [] |
|
failed_non_corr = [] |
|
|
|
|
|
all_indices = np.unique(mst_pairs.flatten()) |
|
|
|
|
|
all_images = images[image_idx[all_indices]].to(device) |
|
all_voxels = torch.Tensor(vox[image_idx[all_indices]]).unsqueeze(1).to(device) |
|
|
|
|
|
all_clip_targets = clip_img_embedder(all_images.float()) |
|
all_clip_targets_norm = nn.functional.normalize(all_clip_targets.flatten(1), dim=-1) |
|
|
|
|
|
all_voxel_ridge = model.ridge(all_voxels, 0) |
|
_, all_clip_voxels, _ = model.backbone(all_voxel_ridge) |
|
all_clip_voxels_norm = nn.functional.normalize(all_clip_voxels.flatten(1), dim=-1) |
|
|
|
|
|
idx_to_pos = {idx: pos for pos, idx in enumerate(all_indices)} |
|
|
|
|
|
corr_score = 0 |
|
non_corr_score = 0 |
|
corr_total = len(mst_pairs) * 2 |
|
non_corr_total = len(mst_pairs) * (len(mst_pairs)-1) * 4 |
|
|
|
|
|
|
|
idxA = np.array([pair[0] for pair in mst_pairs]) |
|
idxB = np.array([pair[1] for pair in mst_pairs]) |
|
|
|
posA = np.array([idx_to_pos[idx] for idx in idxA]) |
|
posB = np.array([idx_to_pos[idx] for idx in idxB]) |
|
|
|
voxA_embeddings = all_clip_voxels_norm[posA] |
|
voxB_embeddings = all_clip_voxels_norm[posB] |
|
imgA_embeddings = all_clip_targets_norm[posA] |
|
imgB_embeddings = all_clip_targets_norm[posB] |
|
|
|
simA_A = utils.batchwise_cosine_similarity(voxA_embeddings, imgA_embeddings) |
|
simA_B = utils.batchwise_cosine_similarity(voxA_embeddings, imgB_embeddings) |
|
simB_B = utils.batchwise_cosine_similarity(voxB_embeddings, imgB_embeddings) |
|
simB_A = utils.batchwise_cosine_similarity(voxB_embeddings, imgA_embeddings) |
|
|
|
|
|
|
|
|
|
correct_A = torch.diag(simA_A) > torch.diag(simA_B) |
|
|
|
correct_B = torch.diag(simB_B) > torch.diag(simB_A) |
|
|
|
corr_score += correct_A.sum().item() |
|
corr_score += correct_B.sum().item() |
|
|
|
|
|
failed_A = [i for i, correct in enumerate(correct_A.cpu()) if not correct] |
|
failed_B = [i for i, correct in enumerate(correct_B.cpu()) if not correct] |
|
|
|
|
|
N = len(mst_pairs) |
|
|
|
row_idx = torch.arange(N).unsqueeze(1) |
|
col_idx = torch.arange(N).unsqueeze(0) |
|
off_diag_mask = row_idx != col_idx |
|
|
|
diagA_A = simA_A.diag().unsqueeze(1).expand(-1, N) |
|
diagB_B = simB_B.diag().unsqueeze(1).expand(-1, N) |
|
|
|
|
|
|
|
|
|
off_diag_mask_device = off_diag_mask.to(device) |
|
|
|
fail_AA = (simA_A < diagA_A) & off_diag_mask_device |
|
fail_AB = (simA_B < diagA_A) & off_diag_mask_device |
|
fail_BB = (simB_B < diagB_B) & off_diag_mask_device |
|
fail_BA = (simB_A < diagB_B) & off_diag_mask_device |
|
|
|
non_corr_score += fail_AA.sum().item() |
|
non_corr_score += fail_AB.sum().item() |
|
non_corr_score += fail_BB.sum().item() |
|
non_corr_score += fail_BA.sum().item() |
|
|
|
|
|
fail_sources = [fail_AA, fail_AB, fail_BB, fail_BA] |
|
for fail_matrix, label in zip(fail_sources, ["AA", "AB", "BB", "BA"]): |
|
fail_coords = torch.nonzero(fail_matrix, as_tuple=False).cpu().numpy() |
|
for i, j in fail_coords: |
|
failed_non_corr.append({"type": label, "i": i, "j": j, "pair_i": mst_pairs[i], "pair_j": mst_pairs[j]}) |
|
|
|
return corr_score, corr_total, int(non_corr_score), non_corr_total, failed_A, failed_B, failed_non_corr |
|
|
|
|
|
|
|
|
|
|
|
all_scores = [] |
|
all_failures = [] |
|
|
|
for i in range(4): |
|
for j in range(4): |
|
mst_pairs = np.stack([pairs[:, 0, i], pairs[:, 1, j]], axis=1) |
|
corr_score, corr_total, non_corr_score, non_corr_total, failed_A, failed_B, failed_non_corr = evaluate_mst_pairs(mst_pairs) |
|
|
|
|
|
all_scores.append((corr_score, corr_total, non_corr_score, non_corr_total)) |
|
all_failures.append({ |
|
"repeat_A": i, |
|
"repeat_B": j, |
|
"failed_A": failed_A, |
|
"failed_B": failed_B, |
|
"failed_non_corr": failed_non_corr, |
|
"mst_pairs": mst_pairs, |
|
}) |
|
|
|
|
|
print(f"pairmate A repeat {i} vs pairmate B repeat {j}:") |
|
print(f"2-AFC corresponding = {corr_score}/{corr_total} ({corr_score/corr_total:.2%})") |
|
print(f"2-AFC non-corresponding = {non_corr_score}/{non_corr_total} ({non_corr_score/non_corr_total:.2%})") |
|
print("") |
|
|
|
|
|
|
|
|
|
|
|
all_scores = np.array(all_scores) |
|
print(f"average 2-AFC corresponding: {all_scores[:,0].mean():.2f}/{all_scores[:,1].mean():.2f} ({(all_scores[:,0].sum()/all_scores[:,1].sum()):.2%})") |
|
print(f"average 2-AFC non-corresponding: {all_scores[:,2].mean():.2f}/{all_scores[:,3].mean():.2f} ({(all_scores[:,2].sum()/all_scores[:,3].sum()):.2%})") |
|
print(f'chance = 1/{corr_total} ({(1/corr_total):.2%})') |
|
|
|
|
|
|
|
|
|
|
|
from collections import defaultdict |
|
|
|
|
|
failed_images = defaultdict(list) |
|
|
|
for failure_entry in all_failures: |
|
mst_pairs = failure_entry["mst_pairs"] |
|
i, j = failure_entry["repeat_A"], failure_entry["repeat_B"] |
|
|
|
|
|
for fail_idx in failure_entry["failed_A"]: |
|
image_idx = mst_pairs[fail_idx][0] |
|
pairmate_idx = mst_pairs[fail_idx][1] |
|
failed_images[image_idx].append({ |
|
"repeat_A": i, |
|
"repeat_B": j, |
|
"pairmate": pairmate_idx, |
|
"type": "A", |
|
}) |
|
|
|
|
|
for fail_idx in failure_entry["failed_B"]: |
|
image_idx = mst_pairs[fail_idx][1] |
|
pairmate_idx = mst_pairs[fail_idx][0] |
|
failed_images[image_idx].append({ |
|
"repeat_A": i, |
|
"repeat_B": j, |
|
"pairmate": pairmate_idx, |
|
"type": "B", |
|
}) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
mst_pairs[:5] |
|
|
|
|
|
|
|
|
|
|
|
pairs[0] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
ix = 0 |
|
display(utils.torch_to_Image(images[pairs[ix][0]])) |
|
display(utils.torch_to_Image(images[pairs[ix][1]])) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|