Spaces:
Build error
Build error
Updated Cuda again
Browse files- Model_Class.py +5 -5
- Model_Seg.py +1 -2
Model_Class.py
CHANGED
|
@@ -67,25 +67,25 @@ model.eval()
|
|
| 67 |
|
| 68 |
def load_and_classify_image(image_path, device):
|
| 69 |
|
| 70 |
-
|
| 71 |
image = val_transforms_416x628(image_path)
|
| 72 |
image = image.unsqueeze(0).to(device)
|
| 73 |
|
| 74 |
with torch.no_grad():
|
| 75 |
-
prediction =
|
| 76 |
prediction = torch.nn.functional.softmax(prediction, dim=1).squeeze(0)
|
| 77 |
return prediction.to('cpu'), image.to('cpu')
|
| 78 |
|
| 79 |
|
| 80 |
def make_GradCAM(image, device):
|
| 81 |
|
| 82 |
-
|
| 83 |
image = image.to(device)
|
| 84 |
model.eval()
|
| 85 |
-
target_layers = [
|
| 86 |
|
| 87 |
arr = image.numpy().squeeze()
|
| 88 |
-
cam = GradCAM(model=
|
| 89 |
targets = None
|
| 90 |
grayscale_cam = cam(
|
| 91 |
input_tensor=image,
|
|
|
|
| 67 |
|
| 68 |
def load_and_classify_image(image_path, device):
|
| 69 |
|
| 70 |
+
gpu_model = model.to(device)
|
| 71 |
image = val_transforms_416x628(image_path)
|
| 72 |
image = image.unsqueeze(0).to(device)
|
| 73 |
|
| 74 |
with torch.no_grad():
|
| 75 |
+
prediction = gpu_model(image)
|
| 76 |
prediction = torch.nn.functional.softmax(prediction, dim=1).squeeze(0)
|
| 77 |
return prediction.to('cpu'), image.to('cpu')
|
| 78 |
|
| 79 |
|
| 80 |
def make_GradCAM(image, device):
|
| 81 |
|
| 82 |
+
gpu_model = model.to(device)
|
| 83 |
image = image.to(device)
|
| 84 |
model.eval()
|
| 85 |
+
target_layers = [gpu_model.gpu_model.layer4[-1]]
|
| 86 |
|
| 87 |
arr = image.numpy().squeeze()
|
| 88 |
+
cam = GradCAM(model=gpu_model, target_layers=target_layers)
|
| 89 |
targets = None
|
| 90 |
grayscale_cam = cam(
|
| 91 |
input_tensor=image,
|
Model_Seg.py
CHANGED
|
@@ -73,14 +73,13 @@ post_transforms = Compose([
|
|
| 73 |
|
| 74 |
def load_and_segment_image(input_image_path, device):
|
| 75 |
|
| 76 |
-
model = model.to(device)
|
| 77 |
image_tensor = pre_transforms(input_image_path)
|
| 78 |
image_tensor = image_tensor.unsqueeze(0).to(device)
|
| 79 |
|
| 80 |
# Inference using SlidingWindowInferer
|
| 81 |
inferer = SlidingWindowInferer(roi_size=(512, 512), sw_batch_size=16, overlap=0.75)
|
| 82 |
with torch.no_grad():
|
| 83 |
-
outputs = inferer(image_tensor, model)
|
| 84 |
|
| 85 |
|
| 86 |
outputs = outputs.squeeze(0)
|
|
|
|
| 73 |
|
| 74 |
def load_and_segment_image(input_image_path, device):
|
| 75 |
|
|
|
|
| 76 |
image_tensor = pre_transforms(input_image_path)
|
| 77 |
image_tensor = image_tensor.unsqueeze(0).to(device)
|
| 78 |
|
| 79 |
# Inference using SlidingWindowInferer
|
| 80 |
inferer = SlidingWindowInferer(roi_size=(512, 512), sw_batch_size=16, overlap=0.75)
|
| 81 |
with torch.no_grad():
|
| 82 |
+
outputs = inferer(image_tensor, model.to(device))
|
| 83 |
|
| 84 |
|
| 85 |
outputs = outputs.squeeze(0)
|