AndaiMD commited on
Commit
77c66f1
·
verified ·
1 Parent(s): 5c1ebee

Upload folder using huggingface_hub

Browse files
hf_wrapper/__pycache__/model.cpython-313.pyc ADDED
Binary file (6.25 kB). View file
 
hf_wrapper/brain_unet_hf/config.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "UNetTransformerModel"
4
+ ],
5
+ "image_size": 256,
6
+ "in_channels": 1,
7
+ "model_type": "unet",
8
+ "out_channels": 3,
9
+ "torch_dtype": "float32",
10
+ "transformers_version": "4.53.2"
11
+ }
hf_wrapper/brain_unet_hf/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2d55b7f5fa42bb65e4c951a034f5ea7cf10a22e8776eee9b1e46c65d872db921
3
+ size 124231028
hf_wrapper/convert_save.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from model import UNetTransformerModel, UNetConfig
3
+
4
+ # Load config and model
5
+ config = UNetConfig(in_channels=1, out_channels=3, image_size=256)
6
+ model = UNetTransformerModel(config)
7
+
8
+ # Load your existing model weights
9
+ model.model.load_state_dict(torch.load("unet_epoch20.pth", map_location="cpu"))
10
+
11
+ # Save the model and config in HF-compatible format
12
+ model.save_pretrained("brain_unet_hf")
13
+ config.save_pretrained("brain_unet_hf")
14
+
15
+ print("✅ Saved to brain_unet_hf/")
hf_wrapper/model.py ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.nn.functional as F
4
+ from transformers import PreTrainedModel, PretrainedConfig
5
+
6
+ class DoubleConv(nn.Module):
7
+ """
8
+ This is the core building block of the U-Net architecture.
9
+ Use consecutive convolutional layers
10
+ Each followed by batch normalization and ReLU activation
11
+ """
12
+ def __init__(self, in_channels, out_channels):
13
+ super().__init__()
14
+ """
15
+ nn.Conv2d:
16
+ Applies a 2D convolution filter (kernel size 3×3)
17
+ padding=1 ensures the output spatial size stays the same
18
+ First conv changes input channels → output channels
19
+ Second conv keeps it at out_channels
20
+
21
+ nn.BatchNorm2d
22
+ Normalizes activations across the batch and channels
23
+ Helps stabilize and speed up training
24
+ Reduces internal covariate shift
25
+ """
26
+ self.double_conv = nn.Sequential(
27
+ nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1),
28
+ nn.BatchNorm2d(out_channels),
29
+ nn.ReLU(inplace=True),
30
+ nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1),
31
+ nn.BatchNorm2d(out_channels),
32
+ nn.ReLU(inplace=True)
33
+ )
34
+
35
+ def forward(self, x):
36
+ return self.double_conv(x)
37
+
38
+ class UNet(nn.Module):
39
+ def __init__(self, in_channels=1, out_channels=3):
40
+ super().__init__()
41
+
42
+ # Encoder
43
+ self.down1 = DoubleConv(in_channels, 64)
44
+ self.pool1 = nn.MaxPool2d(2)
45
+
46
+ self.down2 = DoubleConv(64, 128)
47
+ self.pool2 = nn.MaxPool2d(2)
48
+
49
+ self.down3 = DoubleConv(128, 256)
50
+ self.pool3 = nn.MaxPool2d(2)
51
+
52
+ self.down4 = DoubleConv(256, 512)
53
+ self.pool4 = nn.MaxPool2d(2)
54
+
55
+ # Bottleneck
56
+ self.bottleneck = DoubleConv(512, 1024)
57
+
58
+ # Decoder
59
+ self.up4 = nn.ConvTranspose2d(1024, 512, kernel_size=2, stride=2)
60
+ self.dec4 = DoubleConv(1024, 512)
61
+
62
+ self.up3 = nn.ConvTranspose2d(512, 256, kernel_size=2, stride=2)
63
+ self.dec3 = DoubleConv(512, 256)
64
+
65
+ self.up2 = nn.ConvTranspose2d(256, 128, kernel_size=2, stride=2)
66
+ self.dec2 = DoubleConv(256, 128)
67
+
68
+ self.up1 = nn.ConvTranspose2d(128, 64, kernel_size=2, stride=2)
69
+ self.dec1 = DoubleConv(128, 64)
70
+
71
+ # Final output layer
72
+ self.out_conv = nn.Conv2d(64, out_channels, kernel_size=1)
73
+
74
+ def forward(self, x):
75
+ # Encoder
76
+ d1 = self.down1(x)
77
+ d2 = self.down2(self.pool1(d1))
78
+ d3 = self.down3(self.pool2(d2))
79
+ d4 = self.down4(self.pool3(d3))
80
+
81
+ # Bottleneck
82
+ bn = self.bottleneck(self.pool4(d4))
83
+
84
+ # Decoder
85
+ up4 = self.up4(bn)
86
+ dec4 = self.dec4(torch.cat([up4, d4], dim=1))
87
+
88
+ up3 = self.up3(dec4)
89
+ dec3 = self.dec3(torch.cat([up3, d3], dim=1))
90
+
91
+ up2 = self.up2(dec3)
92
+ dec2 = self.dec2(torch.cat([up2, d2], dim=1))
93
+
94
+ up1 = self.up1(dec2)
95
+ dec1 = self.dec1(torch.cat([up1, d1], dim=1))
96
+
97
+ # Output
98
+ return self.out_conv(dec1)
99
+
100
+ # --- Config ---
101
+ class UNetConfig(PretrainedConfig):
102
+ model_type = "unet"
103
+
104
+ def __init__(self, in_channels=1, out_channels=3, image_size=256, **kwargs):
105
+ super().__init__(**kwargs)
106
+ self.in_channels = in_channels
107
+ self.out_channels = out_channels
108
+ self.image_size = image_size
109
+
110
+ # --- Transformer-compatible Model ---
111
+ class UNetTransformerModel(PreTrainedModel):
112
+ config_class = UNetConfig
113
+
114
+ def __init__(self, config):
115
+ super().__init__(config)
116
+ self.model = UNet(config.in_channels, config.out_channels)
117
+
118
+ def forward(self, pixel_values):
119
+ return self.model(pixel_values)
hf_wrapper/unet_epoch20.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:179e3e327b9ed30ad9a869d97088bda713d9de51ffd2fe4f7f1bccbbb439607e
3
+ size 124262517
unet.py CHANGED
@@ -1,6 +1,7 @@
1
  import torch
2
  import torch.nn as nn
3
  import torch.nn.functional as F
 
4
 
5
  class DoubleConv(nn.Module):
6
  """
@@ -94,4 +95,25 @@ class UNet(nn.Module):
94
  dec1 = self.dec1(torch.cat([up1, d1], dim=1))
95
 
96
  # Output
97
- return self.out_conv(dec1)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import torch
2
  import torch.nn as nn
3
  import torch.nn.functional as F
4
+ from transformers import PreTrainedModel, PretrainedConfig
5
 
6
  class DoubleConv(nn.Module):
7
  """
 
95
  dec1 = self.dec1(torch.cat([up1, d1], dim=1))
96
 
97
  # Output
98
+ return self.out_conv(dec1)
99
+
100
+ # --- Config ---
101
+ class UNetConfig(PretrainedConfig):
102
+ model_type = "unet"
103
+
104
+ def __init__(self, in_channels=1, out_channels=3, image_size=256, **kwargs):
105
+ super().__init__(**kwargs)
106
+ self.in_channels = in_channels
107
+ self.out_channels = out_channels
108
+ self.image_size = image_size
109
+
110
+ # --- Transformer-compatible Model ---
111
+ class UNetTransformerModel(PreTrainedModel):
112
+ config_class = UNetConfig
113
+
114
+ def __init__(self, config):
115
+ super().__init__(config)
116
+ self.model = UNet(config.in_channels, config.out_channels)
117
+
118
+ def forward(self, pixel_values):
119
+ return self.model(pixel_values)