Abraham E. Tavarez
commited on
Commit
·
a448f8b
1
Parent(s):
bebc6f9
video scanning offloaded to Modal
Browse files- app.py +18 -6
- modal_app/modal_app.py +78 -0
app.py
CHANGED
@@ -4,9 +4,12 @@ from detector.video import verify_faces_in_video
|
|
4 |
from reports.pdf_report import generate_pdf_report
|
5 |
from utils.youtube_utils import download_youtube_video
|
6 |
import modal
|
|
|
7 |
verify_faces_remote = modal.Function.lookup("deepface-agent", "verify_faces_remote")
|
8 |
verify_voices_remote = modal.Function.lookup("deepface-agent", "verify_voices_remote")
|
9 |
-
|
|
|
|
|
10 |
|
11 |
|
12 |
# Holds latest results
|
@@ -14,6 +17,7 @@ last_face_result = None
|
|
14 |
last_voice_result = None
|
15 |
last_video_results = None
|
16 |
|
|
|
17 |
def compare_faces(img1_path: str, img2_path: str) -> str:
|
18 |
"""Use this tool to compare to faces for a match
|
19 |
Args:
|
@@ -21,12 +25,12 @@ def compare_faces(img1_path: str, img2_path: str) -> str:
|
|
21 |
img2_path: The path to the second image
|
22 |
"""
|
23 |
global last_face_result
|
24 |
-
|
25 |
# Read image files as bytes
|
26 |
with open(img1_path, "rb") as f1, open(img2_path, "rb") as f2:
|
27 |
img1_bytes = f1.read()
|
28 |
img2_bytes = f2.read()
|
29 |
-
|
30 |
result = verify_faces_remote.remote(img1_bytes, img2_bytes)
|
31 |
result_text = ""
|
32 |
|
@@ -51,12 +55,12 @@ def compare_voices(audio1: str, audio2: str) -> str:
|
|
51 |
audio2: The path to the second audio file
|
52 |
"""
|
53 |
global last_voice_result
|
54 |
-
|
55 |
try:
|
56 |
with open(audio1, "rb") as a1, open(audio2, "rb") as a2:
|
57 |
audio1_bytes = a1.read()
|
58 |
audio2_bytes = a2.read()
|
59 |
-
|
60 |
result = verify_voices_remote.remote(audio1_bytes, audio2_bytes)
|
61 |
result_text = ""
|
62 |
|
@@ -90,7 +94,15 @@ def scan_video(video_file: str, ref_img: str, youtube_url="") -> str:
|
|
90 |
except Exception as e:
|
91 |
return f"❌ Error downloading YouTube video: {str(e)}"
|
92 |
|
93 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
94 |
report = ""
|
95 |
last_video_results = results
|
96 |
for r in results:
|
|
|
4 |
from reports.pdf_report import generate_pdf_report
|
5 |
from utils.youtube_utils import download_youtube_video
|
6 |
import modal
|
7 |
+
|
8 |
verify_faces_remote = modal.Function.lookup("deepface-agent", "verify_faces_remote")
|
9 |
verify_voices_remote = modal.Function.lookup("deepface-agent", "verify_voices_remote")
|
10 |
+
verify_faces_in_video_remote = modal.Function.lookup(
|
11 |
+
"deepface-agent", "verify_faces_in_video_remote"
|
12 |
+
)
|
13 |
|
14 |
|
15 |
# Holds latest results
|
|
|
17 |
last_voice_result = None
|
18 |
last_video_results = None
|
19 |
|
20 |
+
|
21 |
def compare_faces(img1_path: str, img2_path: str) -> str:
|
22 |
"""Use this tool to compare to faces for a match
|
23 |
Args:
|
|
|
25 |
img2_path: The path to the second image
|
26 |
"""
|
27 |
global last_face_result
|
28 |
+
|
29 |
# Read image files as bytes
|
30 |
with open(img1_path, "rb") as f1, open(img2_path, "rb") as f2:
|
31 |
img1_bytes = f1.read()
|
32 |
img2_bytes = f2.read()
|
33 |
+
|
34 |
result = verify_faces_remote.remote(img1_bytes, img2_bytes)
|
35 |
result_text = ""
|
36 |
|
|
|
55 |
audio2: The path to the second audio file
|
56 |
"""
|
57 |
global last_voice_result
|
58 |
+
|
59 |
try:
|
60 |
with open(audio1, "rb") as a1, open(audio2, "rb") as a2:
|
61 |
audio1_bytes = a1.read()
|
62 |
audio2_bytes = a2.read()
|
63 |
+
|
64 |
result = verify_voices_remote.remote(audio1_bytes, audio2_bytes)
|
65 |
result_text = ""
|
66 |
|
|
|
94 |
except Exception as e:
|
95 |
return f"❌ Error downloading YouTube video: {str(e)}"
|
96 |
|
97 |
+
with open(video_file, "rb") as vf, open(ref_img, "rb") as rf:
|
98 |
+
video_bytes = vf.read()
|
99 |
+
ref_img_bytes = rf.read()
|
100 |
+
try:
|
101 |
+
results = verify_faces_in_video_remote.remote(video_bytes, ref_img_bytes, interval=30)
|
102 |
+
|
103 |
+
except Exception as e:
|
104 |
+
return f"❌ Error processing video: {str(e)}"
|
105 |
+
|
106 |
report = ""
|
107 |
last_video_results = results
|
108 |
for r in results:
|
modal_app/modal_app.py
CHANGED
@@ -77,3 +77,81 @@ def verify_voices_remote(audio1_bytes, audio2_bytes):
|
|
77 |
finally:
|
78 |
pathlib.Path(audio1_path).unlink()
|
79 |
pathlib.Path(audio2_path).unlink()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
77 |
finally:
|
78 |
pathlib.Path(audio1_path).unlink()
|
79 |
pathlib.Path(audio2_path).unlink()
|
80 |
+
|
81 |
+
|
82 |
+
@app.function(image=image, gpu="any", timeout=600)
|
83 |
+
def verify_faces_in_video_remote(video_bytes: bytes, ref_img_bytes: bytes, interval: int = 30):
|
84 |
+
import cv2
|
85 |
+
import tempfile
|
86 |
+
import os
|
87 |
+
import numpy as np
|
88 |
+
from PIL import Image
|
89 |
+
from deepface import DeepFace
|
90 |
+
from io import BytesIO
|
91 |
+
|
92 |
+
results = []
|
93 |
+
frame_paths = []
|
94 |
+
|
95 |
+
with (
|
96 |
+
tempfile.NamedTemporaryFile(delete=False, suffix=".mp4") as video_temp,
|
97 |
+
tempfile.NamedTemporaryFile(delete=False, suffix=".jpg") as ref_img_temp,
|
98 |
+
):
|
99 |
+
video_temp.write(video_bytes)
|
100 |
+
ref_img_temp.write(ref_img_bytes)
|
101 |
+
video_path = video_temp.name
|
102 |
+
ref_img_path = ref_img_temp.name
|
103 |
+
|
104 |
+
try:
|
105 |
+
# --- FRAME EXTRACTION ---
|
106 |
+
cap = cv2.VideoCapture(video_path)
|
107 |
+
frame_count = 0
|
108 |
+
temp_dir = tempfile.gettempdir()
|
109 |
+
frame_paths = []
|
110 |
+
|
111 |
+
while cap.isOpened():
|
112 |
+
ret, frame = cap.read()
|
113 |
+
if not ret:
|
114 |
+
break
|
115 |
+
|
116 |
+
if frame_count % interval == 0:
|
117 |
+
frame_path = os.path.join(temp_dir, f"frame_{frame_count}.jpg")
|
118 |
+
success = cv2.imwrite(frame_path, frame)
|
119 |
+
if success:
|
120 |
+
frame_paths.append((frame_count, frame_path))
|
121 |
+
|
122 |
+
frame_count += 1
|
123 |
+
|
124 |
+
cap.release()
|
125 |
+
|
126 |
+
# --- FACE VERIFICATION ---
|
127 |
+
for frame_id, frame_path in frame_paths:
|
128 |
+
try:
|
129 |
+
result = DeepFace.verify(
|
130 |
+
img1_path=ref_img_path,
|
131 |
+
img2_path=frame_path,
|
132 |
+
enforce_detection=False,
|
133 |
+
)
|
134 |
+
results.append({
|
135 |
+
"frame": frame_id,
|
136 |
+
"distance": round(result["distance"], 4),
|
137 |
+
"verified": result["verified"]
|
138 |
+
})
|
139 |
+
except Exception as e:
|
140 |
+
results.append({
|
141 |
+
"frame": frame_id,
|
142 |
+
"error": str(e)
|
143 |
+
})
|
144 |
+
|
145 |
+
return results
|
146 |
+
|
147 |
+
except Exception as e:
|
148 |
+
return [{"error": str(e)}]
|
149 |
+
|
150 |
+
finally:
|
151 |
+
os.remove(video_path)
|
152 |
+
os.remove(ref_img_path)
|
153 |
+
for _, frame_path in frame_paths:
|
154 |
+
try:
|
155 |
+
os.remove(frame_path)
|
156 |
+
except Exception:
|
157 |
+
pass
|