imperiusrex commited on
Commit
f2f00f4
·
verified ·
1 Parent(s): 81185a4

Upload 5 files

Browse files
Files changed (5) hide show
  1. app.py +123 -4
  2. best.pt +3 -0
  3. requirements.txt +10 -0
  4. runtime.txt +1 -0
  5. yolov8n.pt +3 -0
app.py CHANGED
@@ -1,7 +1,126 @@
1
  import gradio as gr
 
 
 
 
 
 
 
2
 
3
- def greet(name):
4
- return "Hello " + name + "!!"
5
 
6
- demo = gr.Interface(fn=greet, inputs="text", outputs="text")
7
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
+ import os
3
+ import cv2
4
+ import numpy as np
5
+ import torch
6
+ import spaces
7
+ from ultralytics import YOLO
8
+ from tqdm import tqdm
9
 
10
+ # Fix for Ultralytics config write error in Hugging Face environment
11
+ os.environ["YOLO_CONFIG_DIR"] = "/tmp"
12
 
13
+ # Use GPU if available
14
+ device = "cuda" if torch.cuda.is_available() else "cpu"
15
+
16
+ # Load models onto the appropriate device
17
+ extract_model = YOLO("best.pt").to(device)
18
+ detect_model = YOLO("yolov8n.pt").to(device)
19
+
20
+ @spaces.GPU
21
+ def process_video(video_path):
22
+ os.makedirs("frames", exist_ok=True)
23
+
24
+ # Step 1: Extract board-only frames
25
+ cap = cv2.VideoCapture(video_path)
26
+ frames, idx = [], 0
27
+ while cap.isOpened():
28
+ ret, frame = cap.read()
29
+ if not ret:
30
+ break
31
+ results = extract_model(frame)
32
+ labels = [extract_model.names[int(c)] for c in results[0].boxes.cls.cpu().numpy()]
33
+ if "board" in labels and "person" not in labels:
34
+ frames.append(frame)
35
+ cv2.imwrite(f"frames/frame_{idx:04d}.jpg", frame)
36
+ idx += 1
37
+ cap.release()
38
+ if not frames:
39
+ raise RuntimeError("No frames with only 'board' and no 'person' found.")
40
+
41
+ # Step 2: Align
42
+ def align_frames(ref, tgt):
43
+ orb = cv2.ORB_create(500)
44
+ k1, d1 = orb.detectAndCompute(ref, None)
45
+ k2, d2 = orb.detectAndCompute(tgt, None)
46
+ if d1 is None or d2 is None:
47
+ return None
48
+ matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
49
+ matches = matcher.match(d1, d2)
50
+ if len(matches) < 10:
51
+ return None
52
+ src = np.float32([k2[m.trainIdx].pt for m in matches]).reshape(-1, 1, 2)
53
+ dst = np.float32([k1[m.queryIdx].pt for m in matches]).reshape(-1, 1, 2)
54
+ H, _ = cv2.findHomography(src, dst, cv2.RANSAC)
55
+ return None if H is None else cv2.warpPerspective(tgt, H, (ref.shape[1], ref.shape[0]))
56
+
57
+ base = frames[0]
58
+ aligned = [base]
59
+ for f in tqdm(frames[1:], desc="Aligning"):
60
+ a = align_frames(base, f)
61
+ if a is not None:
62
+ aligned.append(a)
63
+ if not aligned:
64
+ raise RuntimeError("Alignment failed for all frames.")
65
+
66
+ # Step 3: Median-fuse
67
+ stack = np.stack(aligned, axis=0).astype(np.float32)
68
+ median_board = np.median(stack, axis=0).astype(np.uint8)
69
+ cv2.imwrite("clean_board.jpg", median_board)
70
+
71
+ # Step 4: Mask persons & selective fuse
72
+ sum_img = np.zeros_like(aligned[0], dtype=np.float32)
73
+ count = np.zeros(aligned[0].shape[:2], dtype=np.float32)
74
+ for f in tqdm(aligned, desc="Masking persons"):
75
+ res = detect_model(f, verbose=False)
76
+ m = np.zeros(f.shape[:2], dtype=np.uint8)
77
+ for box in res[0].boxes:
78
+ if detect_model.names[int(box.cls)] == "person":
79
+ x1, y1, x2, y2 = map(int, box.xyxy[0])
80
+ cv2.rectangle(m, (x1, y1), (x2, y2), 255, -1)
81
+ inv = cv2.bitwise_not(m)
82
+ masked = cv2.bitwise_and(f, f, mask=inv)
83
+ sum_img += masked.astype(np.float32)
84
+ count += (inv > 0).astype(np.float32)
85
+
86
+ count[count == 0] = 1
87
+ selective = (sum_img / count[:, :, None]).astype(np.uint8)
88
+ cv2.imwrite("fused_board_selective.jpg", selective)
89
+
90
+ # Step 5: Sharpen
91
+ blur = cv2.GaussianBlur(selective, (5, 5), 0)
92
+ sharp = cv2.addWeighted(selective, 1.5, blur, -0.5, 0)
93
+ cv2.imwrite("sharpened_board_color.jpg", sharp)
94
+
95
+ return "clean_board.jpg", "fused_board_selective.jpg", "sharpened_board_color.jpg"
96
+
97
+
98
+ demo = gr.Interface(
99
+ fn=process_video,
100
+ inputs=[
101
+ gr.File(
102
+ label="Upload Classroom Video (.mp4)",
103
+ file_types=['.mp4'],
104
+ file_count="single",
105
+ type="filepath"
106
+ )
107
+ ],
108
+ outputs=[
109
+ gr.Image(label="Median-Fused Clean Board"),
110
+ gr.Image(label="Selective Fusion (No Persons)"),
111
+ gr.Image(label="Sharpened Final Board")
112
+ ],
113
+ title="📹 Classroom Board Cleaner",
114
+ description=(
115
+ "1️⃣ Upload your classroom video (.mp4)\n"
116
+ "2️⃣ Automatic extraction, alignment, masking, fusion & sharpening\n"
117
+ "3️⃣ View three stages of the cleaned board output"
118
+ )
119
+ )
120
+
121
+ if __name__ == "__main__":
122
+ if device == "cuda":
123
+ print(f"[INFO] ✅ Using GPU: {torch.cuda.get_device_name(0)}")
124
+ else:
125
+ print("[INFO] ⚠️ Using CPU (GPU not available or not assigned)")
126
+ demo.launch()
best.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6550797c45403b12a25ba9a88bb1f8ef075ef235f884257ee28a4c5b7aa758c0
3
+ size 6249123
requirements.txt ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ gradio
2
+ ultralytics
3
+ opencv-python-headless
4
+ numpy
5
+ Pillow
6
+ tqdm
7
+ opencv-python
8
+ scikit-image
9
+ matplotlib
10
+ spaces
runtime.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ accelerator: gpu
yolov8n.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f59b3d833e2ff32e194b5bb8e08d211dc7c5bdf144b90d2c8412c47ccfc83b36
3
+ size 6549796