Abraham E. Tavarez commited on
Commit
342ea4d
·
1 Parent(s): 76b7c37

video detector

Browse files
Files changed (2) hide show
  1. app.py +82 -9
  2. detector/video.py +57 -0
app.py CHANGED
@@ -1,22 +1,95 @@
1
  import gradio as gr
 
 
 
 
2
 
3
  def start_scan(image, audio):
4
  return "Scanning in progress...", None
5
 
6
- with gr.Blocks(title="Deepfake Watchdog") as app:
7
- gr.Markdown("## 🛡️ Deepfake Watchdog\nUpload your image and/or voice to scan for deepfake misuse online.")
8
-
9
- with gr.Row():
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
  image = gr.Image(label="Upload your face", type="filepath")
11
  audio = gr.Audio(label="Upload your voice (optional)", type="filepath")
12
-
13
  run_button = gr.Button("Start Scan")
14
  output_text = gr.Textbox(label="Status")
15
  output_gallery = gr.Gallery(label="Matched Results")
16
-
17
- run_button.click(start_scan, inputs=[image, audio], outputs=[output_text, output_gallery])
18
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
 
20
 
21
 
22
- app.launch(mcp_server=True)
 
1
  import gradio as gr
2
+ from detector.face import verify_faces, analyze_face
3
+ from detector.voice import verify_voices
4
+ from detector.video import verify_faces_in_video
5
+
6
 
7
  def start_scan(image, audio):
8
  return "Scanning in progress...", None
9
 
10
+
11
+ def compare_faces(img1_path, img2_path):
12
+ result = verify_faces(img1_path, img2_path)
13
+
14
+ if "error" in result:
15
+ return f"❌ Error: {result['error']}"
16
+
17
+ if result["verified"]:
18
+ return f"✅ Match! Distance: {result['distance']:.4f} (Threshold: {result['threshold']})"
19
+ else:
20
+ return f"❌ No Match. Distance: {result['distance']:.4f} (Threshold: {result['threshold']})"
21
+
22
+
23
+ def compare_voices(audio1, audio2):
24
+ result = verify_voices(audio1, audio2)
25
+
26
+ if "error" in result:
27
+ return f"❌ Error: {result['error']}"
28
+
29
+ if result["match"]:
30
+ return f"✅ Same speaker detected. Similarity: {result['similarity']} (Threshold: {result['threshold']})"
31
+ else:
32
+ return f"❌ Different speakers. Similarity: {result['similarity']} (Threshold: {result['threshold']})"
33
+
34
+
35
+ def scan_video(video_path, ref_img):
36
+ results = verify_faces_in_video(video_path, ref_img)
37
+ report = ""
38
+
39
+ for r in results:
40
+ if "error" in r:
41
+ report += f"\n⚠️ Frame {r['frame']}: {r['error']}"
42
+ else:
43
+ status = "✅ Match" if r["verified"] else "❌ Mismatch"
44
+ report += f"\n🖼 Frame {r['frame']}: {status} (Distance: {r['distance']})"
45
+
46
+ return report
47
+
48
+
49
+
50
+ with gr.Blocks(title="Deepfake Watchdog") as demo:
51
+ gr.Markdown("# 🛡️Deepfake Watchdog 🤗")
52
+ gr.Markdown("## Upload your image and/or voice to scan for deepfake misuse online.")
53
+
54
+ # Face Verification
55
+ gr.Markdown("### 📷 Face Verification")
56
+ with gr.Tab("Face Verification"):
57
  image = gr.Image(label="Upload your face", type="filepath")
58
  audio = gr.Audio(label="Upload your voice (optional)", type="filepath")
59
+
60
  run_button = gr.Button("Start Scan")
61
  output_text = gr.Textbox(label="Status")
62
  output_gallery = gr.Gallery(label="Matched Results")
63
+
64
+ run_button.click(
65
+ start_scan, inputs=[image, audio], outputs=[output_text, output_gallery]
66
+ )
67
+
68
+ # Voice Verification
69
+ gr.Markdown("### 🎤 Voice Verification")
70
+ with gr.Tab("🎤 Voice Verification"):
71
+ gr.Markdown("Upload two audio files to check if the voices match.")
72
+
73
+ audio1 = gr.Audio(type="filepath", label="Voice Sample 1")
74
+ audio2 = gr.Audio(type="filepath", label="Voice Sample 2")
75
+
76
+ voice_btn = gr.Button("Compare Voices")
77
+ voice_output = gr.Textbox(label="Result")
78
+
79
+ voice_btn.click(compare_voices, inputs=[audio1, audio2], outputs=voice_output)
80
+
81
+ # Video DeepFake Scan
82
+ gr.Markdown("### 📹 Video Deepfake Scan")
83
+ with gr.Tab("📹 Video Deepfake Scan"):
84
+ gr.Markdown("Upload a video and a reference image. We'll scan for deepfake face mismatches.")
85
+
86
+ ref_img = gr.Image(type="filepath", label="Reference Face")
87
+ video_input = gr.Video(label="Video File")
88
+ scan_btn = gr.Button("Scan Video")
89
+ scan_output = gr.Textbox(label="Scan Results", lines=10)
90
+
91
+ scan_btn.click(scan_video, inputs=[video_input, ref_img], outputs=scan_output)
92
 
93
 
94
 
95
+ demo.launch(mcp_server=True)
detector/video.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from deepface import DeepFace
2
+ import cv2
3
+ import os
4
+
5
+
6
+ def extract_frames(video_path, interval=30):
7
+ """
8
+ Extracts frames from a video at a specified interval.
9
+ """
10
+ # Open the video file
11
+ cap = cv2.VideoCapture(video_path)
12
+ frames = []
13
+ count = 0
14
+
15
+ # Loop through the video frames
16
+ while cap.isOpened():
17
+ ret, frame = cap.read()
18
+ if not ret:
19
+ break
20
+
21
+ #
22
+ if count % interval == 0:
23
+ frame_path = f"/tmp/frame_{count}.jpg"
24
+ cv2.imwrite(frame_path, frame)
25
+ frames.append(frame_path)
26
+ count += 1
27
+
28
+ # Release the video capture object
29
+ cap.release()
30
+
31
+ return frames
32
+
33
+
34
+ def verify_faces_in_video(video_path, reference_img, interval=30, threshold=0.7):
35
+ """
36
+ Verifies if faces in a video match a reference image.
37
+ """
38
+ results = []
39
+ # Extract frames from the video
40
+ frames = extract_frames(video_path, interval)
41
+
42
+ # Loop through the frames
43
+ for frame_path in frames:
44
+ # Perform face verification
45
+ try:
46
+ # Perform face verification
47
+ result = DeepFace.verify(
48
+ img1_path=reference_img, img2_path=frame_path, enforce_detection=False
49
+ )
50
+ score = result["distance"]
51
+ verified = result["verified"]
52
+ results.append(
53
+ {"frame": frame_path, "distance": round(score, 4), "verified": verified}
54
+ )
55
+ except Exception as e:
56
+ results.append({"frame": frame_path, "error": str(e)})
57
+ return results