checkpoints_1024 / MindEye2.py
ckadirt's picture
Add files using upload-large-folder tool
0a8b79b verified
import torch
import torch.nn as nn
class MindEyeModule(nn.Module):
def __init__(self):
super(MindEyeModule, self).__init__()
def forward(self, x):
return x
class RidgeRegression(torch.nn.Module):
# make sure to add weight_decay when initializing optimizer
def __init__(self, input_sizes, out_features, seq_len=1):
super(RidgeRegression, self).__init__()
self.seq_len = seq_len
self.out_features = out_features
self.linears = torch.nn.ModuleList([
torch.nn.Linear(input_size, out_features) for input_size in input_sizes
])
def forward(self, x, subj_idx=0):
out = torch.cat([self.linears[subj_idx](x[:,seq]).unsqueeze(1) for seq in range(self.seq_len)], dim=1)
return out
from functools import partial
class BrainNetwork(nn.Module):
def __init__(self, h=4096, in_dim=15724, out_dim=768, seq_len=1, n_blocks=4, drop=.15,
clip_size=768, clip_scale=1):
super().__init__()
self.seq_len = seq_len
self.h = h
self.clip_size = clip_size
self.clip_scale = clip_scale
self.mixer_blocks1 = nn.ModuleList([
self.mixer_block1(h, drop) for _ in range(n_blocks)
])
self.mixer_blocks2 = nn.ModuleList([
self.mixer_block2(seq_len, drop) for _ in range(n_blocks)
])
# Output linear layer
self.backbone_linear = nn.Linear(h * seq_len, out_dim, bias=True)
if self.clip_scale>0:
self.clip_proj = self.projector(clip_size, clip_size, h=clip_size)
def projector(self, in_dim, out_dim, h=2048):
return nn.Sequential(
nn.LayerNorm(in_dim),
nn.GELU(),
nn.Linear(in_dim, h),
nn.LayerNorm(h),
nn.GELU(),
nn.Linear(h, h),
nn.LayerNorm(h),
nn.GELU(),
nn.Linear(h, out_dim)
)
def mlp(self, in_dim, out_dim, drop):
return nn.Sequential(
nn.Linear(in_dim, out_dim),
nn.GELU(),
nn.Dropout(drop),
nn.Linear(out_dim, out_dim),
)
def mixer_block1(self, h, drop):
return nn.Sequential(
nn.LayerNorm(h),
self.mlp(h, h, drop), # Token mixing
)
def mixer_block2(self, seq_len, drop):
return nn.Sequential(
nn.LayerNorm(seq_len),
self.mlp(seq_len, seq_len, drop) # Channel mixing
)
def forward(self, x):
# make empty tensors
c,b = torch.Tensor([0.]), torch.Tensor([[0.],[0.]])
# Mixer blocks
residual1 = x
residual2 = x.permute(0,2,1)
for block1, block2 in zip(self.mixer_blocks1,self.mixer_blocks2):
x = block1(x) + residual1
residual1 = x
x = x.permute(0,2,1)
x = block2(x) + residual2
residual2 = x
x = x.permute(0,2,1)
x = x.reshape(x.size(0), -1)
backbone = self.backbone_linear(x).reshape(len(x), -1, self.clip_size)
if self.clip_scale>0:
c = self.clip_proj(backbone)
return backbone, c, b