johnlockejrr commited on
Commit
139a78d
·
verified ·
1 Parent(s): 0eacbf2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -29
app.py CHANGED
@@ -2,17 +2,12 @@ from typing import Tuple, Dict
2
  import gradio as gr
3
  import supervision as sv
4
  import numpy as np
5
- from PIL import Image
6
  from huggingface_hub import hf_hub_download
7
  from ultralytics import YOLO
8
 
9
  # Define models
10
  MODEL_OPTIONS = {
11
- #"YOLOv11-Nano": "medieval-yolov11n.pt",
12
  "YOLOv11-Small": "medieval-yolo11s-seg.pt"
13
- #"YOLOv11-Medium": "medieval-yolov11m.pt",
14
- #"YOLOv11-Large": "medieval-yolov11l.pt",
15
- #"YOLOv11-XLarge": "medieval-yolov11x.pt"
16
  }
17
 
18
  # Dictionary to store loaded models
@@ -55,8 +50,17 @@ def detect_and_annotate(
55
  masks = None
56
  if results.masks is not None:
57
  masks = results.masks.data.cpu().numpy()
58
- # Convert masks to boolean type
59
- masks = masks.astype(bool)
 
 
 
 
 
 
 
 
 
60
 
61
  # Create Detections object
62
  detections = sv.Detections(
@@ -73,7 +77,7 @@ def detect_and_annotate(
73
  in zip(class_ids, confidence)
74
  ]
75
 
76
- # Annotate image with masks and labels
77
  annotated_image = image.copy()
78
  if masks is not None:
79
  annotated_image = MASK_ANNOTATOR.annotate(scene=annotated_image, detections=detections)
@@ -83,14 +87,11 @@ def detect_and_annotate(
83
 
84
  # Create Gradio interface
85
  with gr.Blocks() as demo:
86
- gr.Markdown("# Medieval Manuscript Detection with YOLO")
87
 
88
  with gr.Row():
89
  with gr.Column():
90
- input_image = gr.Image(
91
- label="Input Image",
92
- type='numpy'
93
- )
94
  with gr.Accordion("Detection Settings", open=True):
95
  model_selector = gr.Dropdown(
96
  choices=list(MODEL_OPTIONS.keys()),
@@ -119,17 +120,9 @@ with gr.Blocks() as demo:
119
  detect_btn = gr.Button("Detect", variant="primary")
120
 
121
  with gr.Column():
122
- output_image = gr.Image(
123
- label="Detection Result",
124
- type='numpy'
125
- )
126
 
127
- def process_image(
128
- image: np.ndarray,
129
- model_name: str,
130
- conf_threshold: float,
131
- iou_threshold: float
132
- ) -> Tuple[np.ndarray, np.ndarray]:
133
  if image is None:
134
  return None, None
135
  annotated_image = detect_and_annotate(image, model_name, conf_threshold, iou_threshold)
@@ -138,17 +131,12 @@ with gr.Blocks() as demo:
138
  def clear():
139
  return None, None
140
 
141
- # Connect buttons to functions
142
  detect_btn.click(
143
  process_image,
144
  inputs=[input_image, model_selector, conf_threshold, iou_threshold],
145
  outputs=[input_image, output_image]
146
  )
147
- clear_btn.click(
148
- clear,
149
- inputs=None,
150
- outputs=[input_image, output_image]
151
- )
152
 
153
  if __name__ == "__main__":
154
  demo.launch(debug=True, show_error=True)
 
2
  import gradio as gr
3
  import supervision as sv
4
  import numpy as np
 
5
  from huggingface_hub import hf_hub_download
6
  from ultralytics import YOLO
7
 
8
  # Define models
9
  MODEL_OPTIONS = {
 
10
  "YOLOv11-Small": "medieval-yolo11s-seg.pt"
 
 
 
11
  }
12
 
13
  # Dictionary to store loaded models
 
50
  masks = None
51
  if results.masks is not None:
52
  masks = results.masks.data.cpu().numpy()
53
+ # Resize masks to match original image dimensions
54
+ h, w = image.shape[:2]
55
+ masks = [
56
+ cv2.resize(mask.astype(float), (w, h),
57
+ interpolation=cv2.INTER_LINEAR
58
+ ).astype(bool)
59
+ for mask in masks
60
+ ]
61
+ masks = np.array(masks)
62
+ # Transpose to (H, W, num_masks)
63
+ masks = np.transpose(masks, (1, 2, 0))
64
 
65
  # Create Detections object
66
  detections = sv.Detections(
 
77
  in zip(class_ids, confidence)
78
  ]
79
 
80
+ # Annotate image
81
  annotated_image = image.copy()
82
  if masks is not None:
83
  annotated_image = MASK_ANNOTATOR.annotate(scene=annotated_image, detections=detections)
 
87
 
88
  # Create Gradio interface
89
  with gr.Blocks() as demo:
90
+ gr.Markdown("# Medieval Manuscript Segmentation with YOLO")
91
 
92
  with gr.Row():
93
  with gr.Column():
94
+ input_image = gr.Image(label="Input Image", type='numpy')
 
 
 
95
  with gr.Accordion("Detection Settings", open=True):
96
  model_selector = gr.Dropdown(
97
  choices=list(MODEL_OPTIONS.keys()),
 
120
  detect_btn = gr.Button("Detect", variant="primary")
121
 
122
  with gr.Column():
123
+ output_image = gr.Image(label="Segmentation Result", type='numpy')
 
 
 
124
 
125
+ def process_image(image, model_name, conf_threshold, iou_threshold):
 
 
 
 
 
126
  if image is None:
127
  return None, None
128
  annotated_image = detect_and_annotate(image, model_name, conf_threshold, iou_threshold)
 
131
  def clear():
132
  return None, None
133
 
 
134
  detect_btn.click(
135
  process_image,
136
  inputs=[input_image, model_selector, conf_threshold, iou_threshold],
137
  outputs=[input_image, output_image]
138
  )
139
+ clear_btn.click(clear, inputs=None, outputs=[input_image, output_image])
 
 
 
 
140
 
141
  if __name__ == "__main__":
142
  demo.launch(debug=True, show_error=True)