imperiusrex commited on
Commit
8d27bbd
·
verified ·
1 Parent(s): cf55f73

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -35
app.py CHANGED
@@ -5,22 +5,19 @@ import numpy as np
5
  from ultralytics import YOLO
6
  from tqdm import tqdm
7
 
8
- # Pre-load models from the repo folder
9
  extract_model = YOLO("best.pt")
10
  detect_model = YOLO("yolov8n.pt")
11
 
12
  def process_video(video_path):
13
- # Prepare output folder
14
  os.makedirs("frames", exist_ok=True)
15
 
16
- # --- Step 1: Extract clean frames ---
17
  cap = cv2.VideoCapture(video_path)
18
- frames = []
19
- idx = 0
20
  while cap.isOpened():
21
  ret, frame = cap.read()
22
- if not ret:
23
- break
24
  results = extract_model(frame)
25
  labels = [extract_model.names[int(c)] for c in results[0].boxes.cls.cpu().numpy()]
26
  if "board" in labels and "person" not in labels:
@@ -29,42 +26,37 @@ def process_video(video_path):
29
  idx += 1
30
  cap.release()
31
  if not frames:
32
- raise RuntimeError("No frames found with only 'board' and no 'person'.")
33
 
34
- # --- Step 2: Align frames ---
35
  def align_frames(ref, tgt):
36
  orb = cv2.ORB_create(500)
37
  k1, d1 = orb.detectAndCompute(ref, None)
38
  k2, d2 = orb.detectAndCompute(tgt, None)
39
- if d1 is None or d2 is None:
40
- return None
41
  matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
42
  matches = matcher.match(d1, d2)
43
- if len(matches) < 10:
44
- return None
45
  src = np.float32([k2[m.trainIdx].pt for m in matches]).reshape(-1,1,2)
46
  dst = np.float32([k1[m.queryIdx].pt for m in matches]).reshape(-1,1,2)
47
  H, _ = cv2.findHomography(src, dst, cv2.RANSAC)
48
- if H is None:
49
- return None
50
- return cv2.warpPerspective(tgt, H, (ref.shape[1], ref.shape[0]))
51
 
52
- base = frames[0]
53
- aligned = [base]
54
  for f in tqdm(frames[1:], desc="Aligning"):
55
  a = align_frames(base, f)
56
- if a is not None:
57
- aligned.append(a)
58
  if not aligned:
59
- raise RuntimeError("No frames aligned successfully.")
60
 
61
- # --- Step 3: Median fuse all aligned frames ---
62
  stack = np.stack(aligned, axis=0).astype(np.float32)
63
  median_board = np.median(stack, axis=0).astype(np.uint8)
64
  cv2.imwrite("clean_board.jpg", median_board)
65
 
66
- # --- Step 4: Build person masks and selective fuse ---
67
- masks = []
 
68
  for f in tqdm(aligned, desc="Masking persons"):
69
  res = detect_model(f, verbose=False)
70
  m = np.zeros(f.shape[:2], dtype=np.uint8)
@@ -72,43 +64,41 @@ def process_video(video_path):
72
  if detect_model.names[int(box.cls)] == "person":
73
  x1,y1,x2,y2 = map(int, box.xyxy[0])
74
  cv2.rectangle(m, (x1,y1), (x2,y2), 255, -1)
75
- masks.append(m)
76
-
77
- sum_img = np.zeros_like(aligned[0], dtype=np.float32)
78
- count = np.zeros(aligned[0].shape[:2], dtype=np.float32)
79
- for f, m in zip(aligned, masks):
80
- inv = cv2.bitwise_not(m)
81
  masked = cv2.bitwise_and(f, f, mask=inv)
82
  sum_img += masked.astype(np.float32)
83
- count += (inv>0).astype(np.float32)
84
 
85
  count[count==0] = 1
86
  selective = (sum_img / count[:,:,None]).astype(np.uint8)
87
  cv2.imwrite("fused_board_selective.jpg", selective)
88
 
89
- # --- Step 5: Sharpen final result ---
90
  blur = cv2.GaussianBlur(selective, (5,5), 0)
91
  sharp = cv2.addWeighted(selective, 1.5, blur, -0.5, 0)
92
  cv2.imwrite("sharpened_board_color.jpg", sharp)
93
 
94
  return "clean_board.jpg", "fused_board_selective.jpg", "sharpened_board_color.jpg"
95
 
 
96
  demo = gr.Interface(
97
  fn=process_video,
98
  inputs=[
99
- gr.Video(
100
  label="Upload Classroom Video (.mp4)",
 
 
101
  type="filepath"
102
  )
103
  ],
104
  outputs=[
105
  gr.Image(label="Median-Fused Clean Board"),
106
- gr.Image(label="Selective Fusion (No Persons)"),
107
  gr.Image(label="Sharpened Final Board")
108
  ],
109
  title="📹 Classroom Board Cleaner",
110
  description=(
111
- "1️⃣ Upload your classroom video (no model upload needed)\n"
112
  "2️⃣ Automatic extraction, alignment, masking, fusion & sharpening\n"
113
  "3️⃣ View three stages of the cleaned board output"
114
  )
 
5
  from ultralytics import YOLO
6
  from tqdm import tqdm
7
 
8
+ # Preload models from your repo root
9
  extract_model = YOLO("best.pt")
10
  detect_model = YOLO("yolov8n.pt")
11
 
12
  def process_video(video_path):
 
13
  os.makedirs("frames", exist_ok=True)
14
 
15
+ # Step 1: Extract board‐only frames
16
  cap = cv2.VideoCapture(video_path)
17
+ frames, idx = [], 0
 
18
  while cap.isOpened():
19
  ret, frame = cap.read()
20
+ if not ret: break
 
21
  results = extract_model(frame)
22
  labels = [extract_model.names[int(c)] for c in results[0].boxes.cls.cpu().numpy()]
23
  if "board" in labels and "person" not in labels:
 
26
  idx += 1
27
  cap.release()
28
  if not frames:
29
+ raise RuntimeError("No frames with only 'board' and no 'person' found.")
30
 
31
+ # Step 2: Align
32
  def align_frames(ref, tgt):
33
  orb = cv2.ORB_create(500)
34
  k1, d1 = orb.detectAndCompute(ref, None)
35
  k2, d2 = orb.detectAndCompute(tgt, None)
36
+ if d1 is None or d2 is None: return None
 
37
  matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
38
  matches = matcher.match(d1, d2)
39
+ if len(matches) < 10: return None
 
40
  src = np.float32([k2[m.trainIdx].pt for m in matches]).reshape(-1,1,2)
41
  dst = np.float32([k1[m.queryIdx].pt for m in matches]).reshape(-1,1,2)
42
  H, _ = cv2.findHomography(src, dst, cv2.RANSAC)
43
+ return None if H is None else cv2.warpPerspective(tgt, H, (ref.shape[1], ref.shape[0]))
 
 
44
 
45
+ base = frames[0]; aligned = [base]
 
46
  for f in tqdm(frames[1:], desc="Aligning"):
47
  a = align_frames(base, f)
48
+ if a is not None: aligned.append(a)
 
49
  if not aligned:
50
+ raise RuntimeError("Alignment failed for all frames.")
51
 
52
+ # Step 3: Medianfuse
53
  stack = np.stack(aligned, axis=0).astype(np.float32)
54
  median_board = np.median(stack, axis=0).astype(np.uint8)
55
  cv2.imwrite("clean_board.jpg", median_board)
56
 
57
+ # Step 4: Mask persons & selective fuse
58
+ masks, sum_img = [], np.zeros_like(aligned[0], dtype=np.float32)
59
+ count = np.zeros(aligned[0].shape[:2], dtype=np.float32)
60
  for f in tqdm(aligned, desc="Masking persons"):
61
  res = detect_model(f, verbose=False)
62
  m = np.zeros(f.shape[:2], dtype=np.uint8)
 
64
  if detect_model.names[int(box.cls)] == "person":
65
  x1,y1,x2,y2 = map(int, box.xyxy[0])
66
  cv2.rectangle(m, (x1,y1), (x2,y2), 255, -1)
67
+ inv = cv2.bitwise_not(m)
 
 
 
 
 
68
  masked = cv2.bitwise_and(f, f, mask=inv)
69
  sum_img += masked.astype(np.float32)
70
+ count += (inv>0).astype(np.float32)
71
 
72
  count[count==0] = 1
73
  selective = (sum_img / count[:,:,None]).astype(np.uint8)
74
  cv2.imwrite("fused_board_selective.jpg", selective)
75
 
76
+ # Step 5: Sharpen
77
  blur = cv2.GaussianBlur(selective, (5,5), 0)
78
  sharp = cv2.addWeighted(selective, 1.5, blur, -0.5, 0)
79
  cv2.imwrite("sharpened_board_color.jpg", sharp)
80
 
81
  return "clean_board.jpg", "fused_board_selective.jpg", "sharpened_board_color.jpg"
82
 
83
+
84
  demo = gr.Interface(
85
  fn=process_video,
86
  inputs=[
87
+ gr.File(
88
  label="Upload Classroom Video (.mp4)",
89
+ file_types=['.mp4'],
90
+ file_count="single",
91
  type="filepath"
92
  )
93
  ],
94
  outputs=[
95
  gr.Image(label="Median-Fused Clean Board"),
96
+ gr.Image(label="Selective Fusion (No Persons)"),
97
  gr.Image(label="Sharpened Final Board")
98
  ],
99
  title="📹 Classroom Board Cleaner",
100
  description=(
101
+ "1️⃣ Upload your classroom video (.mp4)\n"
102
  "2️⃣ Automatic extraction, alignment, masking, fusion & sharpening\n"
103
  "3️⃣ View three stages of the cleaned board output"
104
  )