Spaces:
Sleeping
Sleeping
File size: 2,574 Bytes
8e56e9a 9e4e41a 8e56e9a 08d45c6 2b412f8 777f592 35a6dad 518bedc 0c75867 956dd18 777f592 a07398d 777f592 6d363b6 777f592 956dd18 777f592 956dd18 6d363b6 777f592 e8b1101 777f592 e8b1101 777f592 e8b1101 777f592 e8b1101 777f592 e8b1101 777f592 8e56e9a 0c75867 777f592 6d363b6 777f592 a32fa13 6a9bf1d 777f592 6a9bf1d 8e56e9a 0c75867 777f592 0c75867 777f592 956dd18 777f592 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 |
import cv2
import gradio as gr
import uuid
import os
import torch
from ultralytics import YOLO
# Download model if not present
model_path ="best.pt"
def detect(image):
results = model(image)
annotated_image = results[0].plot() # draw boxes
return annotated_image
def detect_faults(video_path):
# Generate unique run directory
unique_id = str(uuid.uuid4())[:8]
output_dir = os.path.join("runs", "detect", unique_id)
os.makedirs(output_dir, exist_ok=True)
output_path = os.path.join(output_dir, "output.mp4")
# Run YOLO prediction
model.predict(
source=video_path,
save=True,
save_txt=False,
conf=0.5,
project="runs/detect",
name=unique_id
)
# Open the input video
cap = cv2.VideoCapture(video_path)
if not cap.isOpened():
return "Error: Could not open video."
fps = cap.get(cv2.CAP_PROP_FPS)
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
out = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
while True:
ret, frame = cap.read()
if not ret:
break
results = model(frame)
annotated_frame = results[0].plot()
out.write(annotated_frame)
cap.release()
out.release()
def detect_faults_in_image(image):
try:
results = model(image)
annotated = results[0].plot()
return annotated
except Exception as e:
print(f"❌ Error processing image: {e}")
return "Image processing error."
# Gradio Interface
video_ui = gr.Interface(
fn=detect_faults,
inputs=gr.Video(label="Upload Drone Video (MP4 only)"),
outputs=gr.Video(label="Detected Faults Video"),
title="Solar Panel Fault Detection (Video)",
description="Upload a drone video of solar panels. The model detects faults and returns an annotated video."
)
# Gradio Interface
demo = gr.Interface(
fn=detect,
inputs=gr.Image(type="filepath", label="Upload an Image"),
outputs=gr.Image(type="numpy", label="Detected Output"),
title="Solar Panel Fault Detector - YOLOv8",
description="Upload a drone image of solar panels. YOLOv8 will detect any faults."
)
demo.launch()
app = gr.TabbedInterface([image_ui, video_ui], ["Image Detection"], "Video Detection")
if __name__ == "__main__":
app.launch()
|