Spaces:
Sleeping
Sleeping
import gradio as gr | |
import cv2 | |
import numpy as np | |
from PIL import Image | |
from ultralytics import YOLO # YOLOv8 from Ultralytics | |
# Load YOLOv8 model (pre-trained on COCO dataset) | |
model = YOLO("yolov8n.pt") # Using the "nano" model (fast & lightweight) | |
#apply smoothing using OpenCV's medianBlur | |
def smooth_image(image): | |
image = np.array(image) # Convert PIL image to NumPy array | |
smoothed = cv2.medianBlur(image, 15) # Apply median blur with kernel size 5 | |
return Image.fromarray(smoothed) # Convert back to PIL image | |
#apply Erosion Morphological Transformation | |
def erode_image(image): | |
image = np.array(image) | |
kernel = np.ones((3, 3), np.uint8) # Define a 3x3 kernel | |
eroded = cv2.erode(image, kernel, iterations=1) # Apply erosion | |
return Image.fromarray(eroded) # Convert back to PIL image | |
#apply image segmentation using Otsu's Thresholding | |
def segment_image(image): | |
image = np.array(image) | |
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) # Convert to grayscale | |
_, segmented = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) # Apply Otsu's thresholding | |
return Image.fromarray(segmented) # Convert back to PIL image | |
#apply Fourier Transform and display magnitude spectrum | |
def fourier_transform(image): | |
image = np.array(image) | |
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) # Convert to grayscale | |
dft = np.fft.fft2(gray) # Compute Fourier Transform | |
dft_shift = np.fft.fftshift(dft) # Shift zero frequency to center | |
magnitude_spectrum = 20 * np.log(np.abs(dft_shift) + 1) # Compute magnitude spectrum | |
magnitude_spectrum = np.uint8(255 * (magnitude_spectrum / np.max(magnitude_spectrum))) # Normalize for display | |
return Image.fromarray(magnitude_spectrum) | |
def detect_objects(image): | |
image = np.array(image) # Convert PIL image to NumPy array | |
# Perform object detection | |
results = model(image) | |
# Process detections | |
for result in results: | |
boxes = result.boxes.xyxy # Bounding boxes (x1, y1, x2, y2) | |
confidences = result.boxes.conf # Confidence scores | |
class_ids = result.boxes.cls.int().tolist() # Class labels | |
for box, conf, class_id in zip(boxes, confidences, class_ids): | |
x1, y1, x2, y2 = map(int, box.tolist()) | |
label = f"{model.names[class_id]} ({conf:.2f})" | |
# Draw bounding box & label | |
cv2.rectangle(image, (x1, y1), (x2, y2), (0, 255, 0), 2) | |
cv2.putText(image, label, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2) | |
return Image.fromarray(image) # Convert back to PIL Image for Gradio | |
def create_interface(): | |
with gr.Blocks() as demo: | |
with gr.Row(): | |
with gr.Column(): | |
image_input = gr.Image(label="Upload Image", type="pil") | |
with gr.Column(): | |
output_image = gr.Image(label="Processed Image", type="pil") | |
with gr.Row(): | |
smoothing_button = gr.Button("Smoothing/ Blurring") | |
morphological_transform_button = gr.Button("Morphological Transformations") | |
fourier_transform_button = gr.Button("Fourier Transform") | |
segmentation_button = gr.Button("Segmentation") | |
object_recognition_button = gr.Button("Object Recognition (YOLO)") | |
# Link buttons to their respective functions | |
smoothing_button.click(smooth_image, inputs=image_input, outputs=output_image) | |
morphological_transform_button.click(erode_image, inputs=image_input, outputs=output_image) | |
fourier_transform_button.click(fourier_transform, inputs=image_input, outputs=output_image) | |
segmentation_button.click(segment_image, inputs=image_input, outputs=output_image) | |
object_recognition_button.click(detect_objects, inputs=image_input, outputs=output_image) | |
return demo | |
# Launch the Gradio app | |
app = create_interface() | |
app.launch() | |