Tumor_HENA / app.py
sancho10's picture
Update app.py
af688b1 verified
import torch
from PIL import Image
import torchvision.transforms as transforms
import gradio as gr
import torch.nn.functional as F
from torch import nn
import numpy as np
# Preprocess and load the model
def preprocess_image(image):
transform = transforms.Compose([
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
return transform(image).unsqueeze(0)
def findConv2dOutShape(hin,win,conv,pool=2):
# get conv arguments
kernel_size = conv.kernel_size
stride=conv.stride
padding=conv.padding
dilation=conv.dilation
hout=np.floor((hin+2*padding[0]-dilation[0]*(kernel_size[0]-1)-1)/stride[0]+1)
wout=np.floor((win+2*padding[1]-dilation[1]*(kernel_size[1]-1)-1)/stride[1]+1)
if pool:
hout/=pool
wout/=pool
return int(hout),int(wout)
# Define Architecture For CNN_TUMOR Model
class CNN_TUMOR(nn.Module):
# Network Initialization
def __init__(self, params):
super(CNN_TUMOR, self).__init__()
Cin,Hin,Win = params["shape_in"]
init_f = params["initial_filters"]
num_fc1 = params["num_fc1"]
num_classes = params["num_classes"]
self.dropout_rate = params["dropout_rate"]
# Convolution Layers
self.conv1 = nn.Conv2d(Cin, init_f, kernel_size=3)
h,w=findConv2dOutShape(Hin,Win,self.conv1)
self.conv2 = nn.Conv2d(init_f, 2*init_f, kernel_size=3)
h,w=findConv2dOutShape(h,w,self.conv2)
self.conv3 = nn.Conv2d(2*init_f, 4*init_f, kernel_size=3)
h,w=findConv2dOutShape(h,w,self.conv3)
self.conv4 = nn.Conv2d(4*init_f, 8*init_f, kernel_size=3)
h,w=findConv2dOutShape(h,w,self.conv4)
# compute the flatten size
self.num_flatten=h*w*8*init_f
self.fc1 = nn.Linear(self.num_flatten, num_fc1)
self.fc2 = nn.Linear(num_fc1, num_classes)
def forward(self,X):
# Convolution & Pool Layers
X = F.relu(self.conv1(X));
X = F.max_pool2d(X, 2, 2)
X = F.relu(self.conv2(X))
X = F.max_pool2d(X, 2, 2)
X = F.relu(self.conv3(X))
X = F.max_pool2d(X, 2, 2)
X = F.relu(self.conv4(X))
X = F.max_pool2d(X, 2, 2)
X = X.view(-1, self.num_flatten)
X = F.relu(self.fc1(X))
X = F.dropout(X, self.dropout_rate)
X = self.fc2(X)
return F.log_softmax(X, dim=1)
# Define the parameters for the model
params = {
"shape_in": (3,256,256),
"initial_filters": 8,
"num_fc1": 100,
"dropout_rate": 0.25,
"num_classes": 2
}
# Load the state dictionary
model = CNN_TUMOR(params) # Initialize the model with the correct architecture and parameters
# Load the entire model
model = torch.load('Brain_Tumor_model.pt', map_location=torch.device('cpu'))
model.eval()
# Function to make predictions
def predict(image):
image_tensor = preprocess_image(image)
with torch.no_grad():
output = model(image_tensor)
_, predicted = torch.max(output, 1)
# Get the confidence score
confidence = F.softmax(output, dim=1)[0][predicted.item()].item() * 100 # Convert to percentage
# Generate a message based on the prediction
if predicted.item() == 1:
return f"No Tumor Detected (Confidence: {confidence:.2f}%)"
else:
return f"Tumor Detected (Confidence: {confidence:.2f}%)"
# Create the Gradio interface
interface = gr.Interface(
fn=predict,
inputs=gr.Image(type="pil"),
outputs="text",
title="Brain Tumor Detection",
description="<div style='font-size: 24px; color: blue;'>"
"<strong>Upload a brain MRI image to detect if there is a tumor.</strong>"
"</div><br>",
article="<div style='font-size: 16px;'><strong><em>HENA FATMA</strong></em></div>"
)
# Launch the interface
interface.launch()