sancho10 commited on
Commit
6cbe3c4
Β·
verified Β·
1 Parent(s): e8ce9de

Upload 5 files

Browse files
Files changed (5) hide show
  1. Brain_Tumor_model.pt +3 -0
  2. README.md +5 -5
  3. app.py +124 -0
  4. gitattributes +35 -0
  5. requirements.txt +7 -0
Brain_Tumor_model.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c167c5800f7d5db8d02760faad44a6362f8005664238c006b19c48d35e356568
3
+ size 5123085
README.md CHANGED
@@ -1,14 +1,14 @@
1
  ---
2
- title: Tumor HENA
3
- emoji: πŸ“Š
4
  colorFrom: indigo
5
- colorTo: green
6
  sdk: gradio
7
- sdk_version: 5.4.0
8
  app_file: app.py
9
  pinned: false
10
  license: mit
11
- short_description: brain tumor detention
12
  ---
13
 
14
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: Brain Tumor Detection
3
+ emoji: 🌍
4
  colorFrom: indigo
5
+ colorTo: purple
6
  sdk: gradio
7
+ sdk_version: 5.1.0
8
  app_file: app.py
9
  pinned: false
10
  license: mit
11
+ short_description: Brain Tumor Detection by CNN (PyTorch).
12
  ---
13
 
14
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from PIL import Image
3
+ import torchvision.transforms as transforms
4
+ import gradio as gr
5
+ import torch.nn.functional as F
6
+ from torch import nn
7
+ import numpy as np
8
+
9
+
10
+ # Preprocess and load the model
11
+ def preprocess_image(image):
12
+ transform = transforms.Compose([
13
+ transforms.Resize((256, 256)),
14
+ transforms.ToTensor(),
15
+ transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
16
+ ])
17
+ return transform(image).unsqueeze(0)
18
+
19
+ def findConv2dOutShape(hin,win,conv,pool=2):
20
+ # get conv arguments
21
+ kernel_size = conv.kernel_size
22
+ stride=conv.stride
23
+ padding=conv.padding
24
+ dilation=conv.dilation
25
+
26
+ hout=np.floor((hin+2*padding[0]-dilation[0]*(kernel_size[0]-1)-1)/stride[0]+1)
27
+ wout=np.floor((win+2*padding[1]-dilation[1]*(kernel_size[1]-1)-1)/stride[1]+1)
28
+
29
+ if pool:
30
+ hout/=pool
31
+ wout/=pool
32
+ return int(hout),int(wout)
33
+
34
+ # Define Architecture For CNN_TUMOR Model
35
+ class CNN_TUMOR(nn.Module):
36
+
37
+ # Network Initialization
38
+ def __init__(self, params):
39
+ super(CNN_TUMOR, self).__init__()
40
+
41
+ Cin,Hin,Win = params["shape_in"]
42
+ init_f = params["initial_filters"]
43
+ num_fc1 = params["num_fc1"]
44
+ num_classes = params["num_classes"]
45
+ self.dropout_rate = params["dropout_rate"]
46
+
47
+ # Convolution Layers
48
+ self.conv1 = nn.Conv2d(Cin, init_f, kernel_size=3)
49
+ h,w=findConv2dOutShape(Hin,Win,self.conv1)
50
+ self.conv2 = nn.Conv2d(init_f, 2*init_f, kernel_size=3)
51
+ h,w=findConv2dOutShape(h,w,self.conv2)
52
+ self.conv3 = nn.Conv2d(2*init_f, 4*init_f, kernel_size=3)
53
+ h,w=findConv2dOutShape(h,w,self.conv3)
54
+ self.conv4 = nn.Conv2d(4*init_f, 8*init_f, kernel_size=3)
55
+ h,w=findConv2dOutShape(h,w,self.conv4)
56
+
57
+ # compute the flatten size
58
+ self.num_flatten=h*w*8*init_f
59
+ self.fc1 = nn.Linear(self.num_flatten, num_fc1)
60
+ self.fc2 = nn.Linear(num_fc1, num_classes)
61
+
62
+ def forward(self,X):
63
+ # Convolution & Pool Layers
64
+ X = F.relu(self.conv1(X));
65
+ X = F.max_pool2d(X, 2, 2)
66
+ X = F.relu(self.conv2(X))
67
+ X = F.max_pool2d(X, 2, 2)
68
+ X = F.relu(self.conv3(X))
69
+ X = F.max_pool2d(X, 2, 2)
70
+ X = F.relu(self.conv4(X))
71
+ X = F.max_pool2d(X, 2, 2)
72
+ X = X.view(-1, self.num_flatten)
73
+ X = F.relu(self.fc1(X))
74
+ X = F.dropout(X, self.dropout_rate)
75
+ X = self.fc2(X)
76
+ return F.log_softmax(X, dim=1)
77
+
78
+ # Define the parameters for the model
79
+ params = {
80
+ "shape_in": (3,256,256),
81
+ "initial_filters": 8,
82
+ "num_fc1": 100,
83
+ "dropout_rate": 0.25,
84
+ "num_classes": 2
85
+ }
86
+
87
+
88
+ # Load the state dictionary
89
+ model = CNN_TUMOR(params) # Initialize the model with the correct architecture and parameters
90
+ # Load the entire model
91
+ model = torch.load('Brain_Tumor_model.pt', map_location=torch.device('cpu'))
92
+ model.eval()
93
+
94
+
95
+ # Function to make predictions
96
+ def predict(image):
97
+ image_tensor = preprocess_image(image)
98
+ with torch.no_grad():
99
+ output = model(image_tensor)
100
+ _, predicted = torch.max(output, 1)
101
+
102
+ # Get the confidence score
103
+ confidence = F.softmax(output, dim=1)[0][predicted.item()].item() * 100 # Convert to percentage
104
+
105
+ # Generate a message based on the prediction
106
+ if predicted.item() == 1:
107
+ return f"No Tumor Detected (Confidence: {confidence:.2f}%)"
108
+ else:
109
+ return f"Tumor Detected (Confidence: {confidence:.2f}%)"
110
+
111
+ # Create the Gradio interface
112
+ interface = gr.Interface(
113
+ fn=predict,
114
+ inputs=gr.Image(type="pil"),
115
+ outputs="text",
116
+ title="Brain Tumor Detection",
117
+ description="<div style='font-size: 24px; color: blue;'>"
118
+ "<strong>Upload a brain MRI image to detect if there is a tumor.</strong>"
119
+ "</div><br>",
120
+ article="<div style='font-size: 16px;'><strong><em>Sattorov Abdullah</strong></em></div>"
121
+ )
122
+
123
+ # Launch the interface
124
+ interface.launch()
gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ torch
2
+ torchvision
3
+ gradio
4
+ numpy
5
+ Pillow
6
+ matplotlib
7
+