import gradio as gr from detector.face import verify_faces, analyze_face from detector.voice import verify_voices from detector.video import verify_faces_in_video from reports.pdf_report import generate_pdf_report from utils.youtube_utils import download_youtube_video # Holds latest results last_face_result = None last_voice_result = None last_video_results = None def start_scan(image, audio): return "Scanning in progress...", None def compare_faces(img1_path, img2_path): global last_face_result result = verify_faces(img1_path, img2_path) result_text = "" if "error" in result: return f"❌ Error: {result['error']}" if result["verified"]: result_text = f"✅ Match! Distance: {result['distance']:.4f} (Threshold: {result['threshold']})" last_face_result = result_text return result_text else: result_text = f"❌ No Match. Distance: {result['distance']:.4f} (Threshold: {result['threshold']})" last_face_result = result_text return result_text def compare_voices(audio1, audio2): global last_voice_result result = verify_voices(audio1, audio2) result_text = "" if "error" in result: return f"❌ Error: {result['error']}" if result["match"]: result_text = f"✅ Same speaker detected. Similarity: {result['similarity']} (Threshold: {result['threshold']})" last_voice_result = result_text return result_text else: result_text = f"❌ Different speakers. Similarity: {result['similarity']} (Threshold: {result['threshold']})" last_voice_result = result_text return result_text def scan_video(video_file, ref_img, youtube_url=""): global last_video_results if youtube_url: try: video_file = download_youtube_video(youtube_url) except Exception as e: return f"❌ Error downloading YouTube video: {str(e)}" results = verify_faces_in_video(video_file, ref_img) report = "" last_video_results = results for r in results: if "error" in r: report += f"\n⚠️ Frame {r['frame']}: {r['error']}" # last_video_results.append(report) else: status = "✅ Match" if r["verified"] else "❌ Mismatch" report += f"\n🖼 Frame {r['frame']}: {status} (Distance: {r['distance']})" # last_video_results.append(report) return report # def scan_video(video_path, ref_img): # global last_video_results # results = verify_faces_in_video(video_path, ref_img) # report = "" # last_video_results = results # for r in results: # if "error" in r: # report += f"\n⚠️ Frame {r['frame']}: {r['error']}" # # last_video_results.append(report) # else: # status = "✅ Match" if r["verified"] else "❌ Mismatch" # report += f"\n🖼 Frame {r['frame']}: {status} (Distance: {r['distance']})" # # last_video_results.append(report) # return report def generate_report(): return generate_pdf_report(last_face_result, last_voice_result, last_video_results) with gr.Blocks(title="Deepfake Watchdog") as demo: gr.Markdown("# 🛡️Deepfake Watchdog 🤗") gr.Markdown("## Upload your image and/or voice to scan for deepfake misuse online.") # Face Verification with gr.Tab("Face Verification"): image1 = gr.Image(label="Upload your face", type="filepath") # audio = gr.Audio(label="Upload your voice (optional)", type="filepath") image2 = gr.Image(label="Upload another face", type="filepath") # face_btn = gr.Button("Compare Faces") run_button = gr.Button("Compare Faces") output_text = gr.Textbox(label="Result") # output_gallery = gr.Gallery(label="Matched Results") run_button.click( compare_faces, inputs=[image1, image2], outputs=[output_text] ) # Voice Verification with gr.Tab("🎤 Voice Verification"): gr.Markdown("Upload two audio files to check if the voices match.") audio1 = gr.Audio(type="filepath", label="Voice Sample 1") audio2 = gr.Audio(type="filepath", label="Voice Sample 2") voice_btn = gr.Button("Compare Voices") voice_output = gr.Textbox(label="Result") voice_btn.click(compare_voices, inputs=[audio1, audio2], outputs=voice_output) # Video DeepFake Scan # gr.Markdown("### 📹 Video Deepfake Scan") # with gr.Tab("📹 Video Deepfake Scan"): # gr.Markdown("Upload a video and a reference image. We'll scan for deepfake face mismatches.") # ref_img = gr.Image(type="filepath", label="Reference Face") # video_input = gr.Video(label="Video File") # scan_btn = gr.Button("Scan Video") # scan_output = gr.Textbox(label="Scan Results", lines=10) # scan_btn.click(scan_video, inputs=[video_input, ref_img], outputs=scan_output) with gr.Tab("📹 Video Deepfake Scan"): gr.Markdown("🔍 Upload a video or paste a YouTube link and we'll analyze it for deepfake face swaps.") ref_img = gr.Image(type="filepath", label="Reference Face") video_input = gr.Video(label="Video File (optional)") youtube_url = gr.Textbox(label="YouTube URL (optional)") scan_btn = gr.Button("Scan Video") scan_output = gr.Textbox(label="Scan Results", lines=10) scan_btn.click(scan_video, inputs=[video_input, ref_img, youtube_url], outputs=scan_output) with gr.Tab("📄 Generate Report"): report_btn = gr.Button("Generate PDF Report") report_output = gr.File(label="Download Report") report_btn.click(generate_report, outputs=report_output) demo.launch(mcp_server=True)