Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -3,59 +3,106 @@ from ultralytics.utils.plotting import Annotator
|
|
3 |
import numpy as np
|
4 |
import cv2
|
5 |
import gradio as gr
|
6 |
-
import os
|
7 |
import yolov9
|
8 |
|
9 |
-
|
10 |
-
|
11 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
|
13 |
def Predict(img):
|
14 |
objects_name = []
|
15 |
cropped_images = []
|
16 |
-
img_name_list=[]
|
17 |
-
results = model(img, size=640)
|
18 |
-
annotator = Annotator(img, line_width=2, example=str('Organ'))
|
19 |
|
20 |
-
|
|
|
21 |
|
22 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
xmin, ymin, xmax, ymax, confidence, class_id = result
|
24 |
-
label =
|
25 |
confidence = float(confidence)
|
26 |
|
27 |
-
if label not in
|
28 |
-
|
29 |
'box': [xmin, ymin, xmax, ymax],
|
30 |
'confidence': confidence
|
31 |
}
|
32 |
|
33 |
-
|
|
|
|
|
|
|
34 |
xmin, ymin, xmax, ymax = data['box']
|
35 |
confidence = data['confidence']
|
36 |
|
37 |
-
# Cropping the detected object
|
38 |
-
cropped_img =
|
39 |
-
|
|
|
|
|
40 |
|
41 |
-
|
42 |
-
annotator.box_label([xmin, ymin, xmax, ymax], f"{label} {confidence:.2f}", color=(255, 0, 0))
|
43 |
|
44 |
# Convert the cropped image from BGR to RGB before saving
|
45 |
-
cropped_img_rgb = cv2.cvtColor(
|
46 |
|
47 |
# Save the cropped image
|
48 |
crop_filename = f"{label}.jpg"
|
49 |
img_name_list.append(crop_filename)
|
50 |
cv2.imwrite(crop_filename, cropped_img_rgb)
|
51 |
|
52 |
-
|
53 |
-
|
54 |
-
|
|
|
|
|
55 |
labels = [{"label": label, "confidence": confidence} for label, confidence in objects_name]
|
56 |
-
|
57 |
return annotated_img, cropped_images, objects_name
|
58 |
-
#return img_name_list,labels
|
59 |
|
60 |
def output_display(img):
|
61 |
annotated_img, cropped_images, objects_name = Predict(img)
|
|
|
3 |
import numpy as np
|
4 |
import cv2
|
5 |
import gradio as gr
|
|
|
6 |
import yolov9
|
7 |
|
8 |
+
# Load the first YOLOv9 model
|
9 |
+
model1 = yolov9.load('Organ_detection.pt', device="cpu")
|
10 |
+
model1.conf = 0.40
|
11 |
+
model1.iou = 0.45
|
12 |
+
|
13 |
+
# Load the second YOLO model (assuming you have a second YOLOv9 model or another YOLO model)
|
14 |
+
model2 = yolov9.load('/content/update_best.pt', device="cpu")
|
15 |
+
model2.conf = 0.40
|
16 |
+
model2.iou = 0.45
|
17 |
+
|
18 |
+
def remove_lines(img):
|
19 |
+
# Convert the image to grayscale
|
20 |
+
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
|
21 |
+
|
22 |
+
# Apply edge detection
|
23 |
+
edges = cv2.Canny(gray, 50, 150, apertureSize=3)
|
24 |
+
|
25 |
+
# Detect lines using Hough Transform
|
26 |
+
lines = cv2.HoughLinesP(edges, 1, np.pi/180, threshold=100, minLineLength=100, maxLineGap=10)
|
27 |
+
|
28 |
+
if lines is not None:
|
29 |
+
for line in lines:
|
30 |
+
for x1, y1, x2, y2 in line:
|
31 |
+
cv2.line(img, (x1, y1), (x2, y2), (255, 255, 255), 2)
|
32 |
+
|
33 |
+
return img
|
34 |
|
35 |
def Predict(img):
|
36 |
objects_name = []
|
37 |
cropped_images = []
|
38 |
+
img_name_list = []
|
|
|
|
|
39 |
|
40 |
+
# Make a copy of the image for cropping
|
41 |
+
img_for_cropping = img.copy()
|
42 |
|
43 |
+
# Run inference using the first model
|
44 |
+
results1 = model1(img, size=224)
|
45 |
+
annotator1 = Annotator(img, line_width=2, example=str('Organ'))
|
46 |
+
|
47 |
+
detections1 = {}
|
48 |
+
for result in results1.xyxy[0]:
|
49 |
+
xmin, ymin, xmax, ymax, confidence, class_id = result
|
50 |
+
label = results1.names[int(class_id)]
|
51 |
+
confidence = float(confidence)
|
52 |
+
|
53 |
+
if label not in detections1 or detections1[label]['confidence'] < confidence:
|
54 |
+
detections1[label] = {
|
55 |
+
'box': [xmin, ymin, xmax, ymax],
|
56 |
+
'confidence': confidence
|
57 |
+
}
|
58 |
+
|
59 |
+
# Run inference using the second model
|
60 |
+
results2 = model2(img, size=224)
|
61 |
+
annotator2 = Annotator(img, line_width=2, example=str('Organ'))
|
62 |
+
|
63 |
+
detections2 = {}
|
64 |
+
for result in results2.xyxy[0]:
|
65 |
xmin, ymin, xmax, ymax, confidence, class_id = result
|
66 |
+
label = results2.names[int(class_id)]
|
67 |
confidence = float(confidence)
|
68 |
|
69 |
+
if label not in detections2 or detections2[label]['confidence'] < confidence:
|
70 |
+
detections2[label] = {
|
71 |
'box': [xmin, ymin, xmax, ymax],
|
72 |
'confidence': confidence
|
73 |
}
|
74 |
|
75 |
+
# Combine detections from both models
|
76 |
+
combined_detections = {**detections1, **detections2}
|
77 |
+
|
78 |
+
for label, data in combined_detections.items():
|
79 |
xmin, ymin, xmax, ymax = data['box']
|
80 |
confidence = data['confidence']
|
81 |
|
82 |
+
# Cropping the detected object from the original image
|
83 |
+
cropped_img = img_for_cropping[int(ymin):int(ymax), int(xmin):int(xmax)]
|
84 |
+
|
85 |
+
# Remove lines from the cropped image
|
86 |
+
cropped_img_cleaned = remove_lines(cropped_img)
|
87 |
|
88 |
+
cropped_images.append((label, confidence, cropped_img_cleaned))
|
|
|
89 |
|
90 |
# Convert the cropped image from BGR to RGB before saving
|
91 |
+
cropped_img_rgb = cv2.cvtColor(cropped_img_cleaned, cv2.COLOR_BGR2RGB)
|
92 |
|
93 |
# Save the cropped image
|
94 |
crop_filename = f"{label}.jpg"
|
95 |
img_name_list.append(crop_filename)
|
96 |
cv2.imwrite(crop_filename, cropped_img_rgb)
|
97 |
|
98 |
+
# Annotating the image (after cropping to ensure the line is not in the cropped images)
|
99 |
+
annotator1.box_label([xmin, ymin, xmax, ymax], f"{label} {confidence:.2f}", color=(255, 0, 0))
|
100 |
+
|
101 |
+
annotated_img = annotator1.result()
|
102 |
+
objects_name = [(label, data['confidence']) for label, data in combined_detections.items()]
|
103 |
labels = [{"label": label, "confidence": confidence} for label, confidence in objects_name]
|
104 |
+
|
105 |
return annotated_img, cropped_images, objects_name
|
|
|
106 |
|
107 |
def output_display(img):
|
108 |
annotated_img, cropped_images, objects_name = Predict(img)
|