File size: 5,589 Bytes
0a8b79b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
#!/bin/bash
#SBATCH --job-name=sub-005_ses-01-02_task-C_finetune_rtpreproc_unionmask
#SBATCH --ntasks-per-node=1
#SBATCH --nodes=1              
#SBATCH --gres=gpu:1
#SBATCH --constraint=gpu80
#SBATCH --gpus-per-task=1       # Set to equal gres=gpu:#!
#SBATCH --cpus-per-task=1      # 40 / 80 / 176 distributed across node
#SBATCH --time=02:35:00         # total run time limit (HH:MM:SS)
#SBATCH -e slurms/%A_%a.err     # first create a "slurms" folder in current directory to store logs
#SBATCH -o slurms/%A_%a.out
#SBATCH --no-requeue
#SBATCH --array=0               # 0 or 0-9
#SBATCH --mail-type=END
#SBATCH [email protected]

echo "My SLURM_ARRAY_JOB_ID is ${SLURM_ARRAY_JOB_ID}"
echo "My SLURM_ARRAY_TASK_ID is ${SLURM_ARRAY_TASK_ID}"
echo "Executing on the machine: $(hostname)"

module purge
module load anaconda3/2023.3
module load fsl/6.0.6.2
conda activate rt_mindEye2
# source /scratch/gpfs/ri4541/MindEyeV2/src/fmri/bin/activate

# verify these variables before submitting
# ---
sub="sub-005"
session="all"
session_label='ses-01-02'
split=MST  # MST train/test split, alternative would be train on non-repeats and test on images that repeat (split=orig) 
task=C
func_task_name=C
resample_voxel_size=False
resample_post_glmsingle=False
load_from_resampled_file=True
remove_close_to_MST=False
remove_random_n=False
resampled_vox_size=2.0
resample_method="trilinear"
# Convert decimal point to underscore
vox_dim_str=${resampled_vox_size//./_}

# model_name="${sub}_multi_task-${task}_bs24_MST_rishab_${split}split"
model_name="${sub}_${session}_task-${task}_bs24_MST_rishab_${split}split_finetune_rtpreproc_unionmask"
# model_name="${sub}_${session}_task-${task}_bs24_MST_rishab_${split}split_resampled_${vox_dim_str}mm_${resample_method}_seed${SLURM_ARRAY_TASK_ID}"
main_script="main-finetune-rt-preproc"
# glmsingle_path="/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/glmsingle-multi"
glmsingle_path="/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/glmsingle_${sub}_${session_label}_task-${task}"
# --- 

export NUM_GPUS=1  # Set to equal gres=gpu:#!
export BATCH_SIZE=24
export GLOBAL_BATCH_SIZE=$((BATCH_SIZE * NUM_GPUS))

Make sure another job doesnt use same port, here using random number
export MASTER_PORT=$((RANDOM % (19000 - 11000 + 1) + 11000)) 
export HOSTNAMES=$(scontrol show hostnames "$SLURM_JOB_NODELIST")
export MASTER_ADDR=$(scontrol show hostnames "$SLURM_JOB_NODELIST" | head -n 1)
export COUNT_NODE=$(scontrol show hostnames "$SLURM_JOB_NODELIST" | wc -l)
echo MASTER_ADDR=${MASTER_ADDR}
echo MASTER_PORT=${MASTER_PORT}
echo WORLD_SIZE=${COUNT_NODE}

echo model_name=${model_name}

eval_dir="/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/evals/${model_name}"
export SUB=${sub}
export SESSION=${session}
export SESSION_LABEL=${session_label}
export SPLIT=${split}
export TASK=${task}
export FUNC_TASK_NAME=${func_task_name}
export RESAMPLE_VOXEL_SIZE=${resample_voxel_size}
export RESAMPLE_POST_GLMSINGLE=${resample_post_glmsingle}
export LOAD_FROM_RESAMPLED_FILE=${load_from_resampled_file}
export REMOVE_CLOSE_TO_MST=${remove_close_to_MST}
export REMOVE_RANDOM_N=${remove_random_n}
export RESAMPLED_VOX_SIZE=${resampled_vox_size}
export RESAMPLE_METHOD=${resample_method}

export glmsingle_path=${glmsingle_path}
export eval_dir=${eval_dir}
export WANDB_MODE="offline"

# singlesubject finetuning
jupyter nbconvert "${main_script}.ipynb" --to python && \
accelerate launch --num_processes=$(($NUM_GPUS * $COUNT_NODE)) --num_machines=$COUNT_NODE --main_process_ip=$MASTER_ADDR --main_process_port=$MASTER_PORT "${main_script}.py" --data_path=/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2 --model_name=${model_name} --no-multi_subject --subj=1 --batch_size=${BATCH_SIZE} --max_lr=3e-4 --mixup_pct=.33 --num_epochs=150 --use_prior --prior_scale=30 --clip_scale=1 --no-blurry_recon --blur_scale=.5 --no-use_image_aug --n_blocks=4 --hidden_dim=1024 --num_sessions=40 --ckpt_interval=999 --ckpt_saving --wandb_log --multisubject_ckpt=/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/train_logs/multisubject_subj01_1024hid_nolow_300ep --seed="${SLURM_ARRAY_TASK_ID}" && \

# jupyter nbconvert recon_inference-multisession.ipynb --to python && \
# python recon_inference-multisession.py --model_name=${model_name} --subj=1 --no-blurry_recon --use_prior --hidden_dim=1024 --n_blocks=4 --glmsingle_path="${glmsingle_path}" && \

# #jupyter nbconvert recon_inference_orig.ipynb --to python && \
# #python recon_inference_orig.py --model_name=${model_name} --subj=1 --no-blurry_recon --use_prior --hidden_dim=1024 --n_blocks=4 && \ 

# jupyter nbconvert enhanced_recon_inference.ipynb --to python && \
# python enhanced_recon_inference.py --model_name=${model_name} --all_recons_path=${eval_dir}/${model_name}_all_recons.pt && \

# #jupyter nbconvert enhanced_recon_inference_orig.ipynb --to python && \
# #python enhanced_recon_inference_orig.py --model_name=${model_name} && \

# jupyter nbconvert final_evaluations.ipynb --to python && \
# python final_evaluations.py --model_name=${model_name} --all_recons_path=${eval_dir}/all_enhancedrecons.pt --data_path=/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2 --eval_dir=${eval_dir} && \

#jupyter nbconvert final_evaluations_orig.ipynb --to python && \
#python final_evaluations_orig.py --model_name=${model_name} --all_recons_path=/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/evals/${model_name}/${model_name}_all_recons.pt --data_path=/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2 --eval_dir=/scratch/gpfs/ri4541/MindEyeV2/src/mindeyev2/evals/${model_name}

echo "Remember to sync wandb logs with online node!"