Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -6,21 +6,70 @@ import gradio as gr
|
|
6 |
import os
|
7 |
import yolov9
|
8 |
|
9 |
-
model=yolov9.load('Organ_detection.pt',device="cpu")
|
10 |
model.conf = 0.40
|
11 |
model.iou = 0.45
|
12 |
|
13 |
def Predict(img):
|
14 |
-
|
15 |
objects_name = []
|
16 |
cropped_images = []
|
17 |
-
|
18 |
results = model(img, size=640)
|
19 |
-
|
20 |
-
output = results.render()
|
21 |
-
return output[0]
|
22 |
|
23 |
-
|
24 |
-
|
25 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
26 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
6 |
import os
|
7 |
import yolov9
|
8 |
|
9 |
+
model = yolov9.load('Organ_detection.pt', device="cpu")
|
10 |
model.conf = 0.40
|
11 |
model.iou = 0.45
|
12 |
|
13 |
def Predict(img):
|
|
|
14 |
objects_name = []
|
15 |
cropped_images = []
|
16 |
+
img_name_list=[]
|
17 |
results = model(img, size=640)
|
18 |
+
annotator = Annotator(img, line_width=2, example=str('Organ'))
|
|
|
|
|
19 |
|
20 |
+
detections = {}
|
21 |
+
|
22 |
+
for result in results.xyxy[0]:
|
23 |
+
xmin, ymin, xmax, ymax, confidence, class_id = result
|
24 |
+
label = results.names[int(class_id)]
|
25 |
+
confidence = float(confidence)
|
26 |
+
|
27 |
+
if label not in detections or detections[label]['confidence'] < confidence:
|
28 |
+
detections[label] = {
|
29 |
+
'box': [xmin, ymin, xmax, ymax],
|
30 |
+
'confidence': confidence
|
31 |
+
}
|
32 |
+
|
33 |
+
for label, data in detections.items():
|
34 |
+
xmin, ymin, xmax, ymax = data['box']
|
35 |
+
confidence = data['confidence']
|
36 |
+
|
37 |
+
# Cropping the detected object
|
38 |
+
cropped_img = img[int(ymin):int(ymax), int(xmin):int(xmax)]
|
39 |
+
cropped_images.append((label, confidence, cropped_img))
|
40 |
+
|
41 |
+
# Annotating the image
|
42 |
+
annotator.box_label([xmin, ymin, xmax, ymax], f"{label} {confidence:.2f}", color=(255, 0, 0))
|
43 |
+
|
44 |
+
# Convert the cropped image from BGR to RGB before saving
|
45 |
+
cropped_img_rgb = cv2.cvtColor(cropped_img, cv2.COLOR_BGR2RGB)
|
46 |
|
47 |
+
# Save the cropped image
|
48 |
+
crop_filename = f"{label}.jpg"
|
49 |
+
img_name_list.append(crop_filename)
|
50 |
+
cv2.imwrite(crop_filename, cropped_img_rgb)
|
51 |
+
|
52 |
+
annotated_img = annotator.result()
|
53 |
+
#print(img_name_list)
|
54 |
+
objects_name = [(label, data['confidence']) for label, data in detections.items()]
|
55 |
+
labels = [{"label": label, "confidence": confidence} for label, confidence in objects_name]
|
56 |
+
#print(len(labels))
|
57 |
+
return annotated_img, cropped_images, objects_name
|
58 |
+
#return img_name_list,labels
|
59 |
+
|
60 |
+
def output_display(img):
|
61 |
+
annotated_img, cropped_images, objects_name = Predict(img)
|
62 |
+
|
63 |
+
# Extract cropped images and labels separately
|
64 |
+
crops = [crop for _, _, crop in cropped_images]
|
65 |
+
labels = [{"label": label, "confidence": confidence} for label, confidence in objects_name]
|
66 |
+
|
67 |
+
return annotated_img, crops, labels
|
68 |
+
|
69 |
+
interface = gr.Interface(fn=output_display,
|
70 |
+
inputs=["image"],
|
71 |
+
outputs=[gr.Image(label="Annotated Image"),
|
72 |
+
gr.Gallery(label="Cropped Images"),
|
73 |
+
gr.JSON(label="Labels and Confidence")])
|
74 |
+
|
75 |
+
interface.launch(debug=True)
|