Abraham E. Tavarez
llama agent created
a340f3e
raw
history blame
3.5 kB
import gradio as gr
from tools import compare_faces, compare_voices, scan_video, generate_report
from llama_agent import chat_with_agent
with gr.Blocks(title="Deepfake Watchdog") as demo:
gr.Markdown("# 🛡️Deepfake Watchdog 🤗")
gr.Markdown("## Upload your image and/or voice to scan for deepfake misuse online.")
# Chat
with gr.Tab("🤖 Chat with Agent"):
gr.Markdown(
"Chat with the AI agent. Ask it to compare faces, voices, or scan videos naturally."
)
user_prompt = gr.Textbox(label="Prompt", lines=3)
gr.Markdown("### Example Prompts:")
gr.Markdown("##### Compare the two uploaded faces and tell me if they match")
gr.Markdown("##### Check if the two voices are from the same speaker")
gr.Markdown(
"##### Scan a video for a provided reference image to check for deepfakes"
)
with gr.Row():
img1 = gr.Image(type="filepath", label="Image 1")
img2 = gr.Image(type="filepath", label="Image 2")
with gr.Row():
audio1 = gr.Audio(type="filepath", label="Audio 1")
audio2 = gr.Audio(type="filepath", label="Audio 2")
ref_img = gr.Image(type="filepath", label="Reference Image")
video = gr.Video(label="Video file")
chat_btn = gr.Button("Chat")
chat_output = gr.Textbox(label="Chat Output", lines=10)
chat_btn.click(
chat_with_agent,
inputs=[user_prompt, img1, img2, audio1, audio2, video, ref_img],
outputs=[chat_output],
)
# Face Verification
with gr.Tab("Face Verification"):
image1 = gr.Image(label="Upload your face", type="filepath")
# audio = gr.Audio(label="Upload your voice (optional)", type="filepath")
image2 = gr.Image(label="Upload another face", type="filepath")
# face_btn = gr.Button("Compare Faces")
run_button = gr.Button("Compare Faces")
output_text = gr.Textbox(label="Result")
# output_gallery = gr.Gallery(label="Matched Results")
run_button.click(compare_faces, inputs=[image1, image2], outputs=[output_text])
# Voice Verification
with gr.Tab("🎤 Voice Verification"):
gr.Markdown("Upload two audio files to check if the voices match.")
audio1 = gr.Audio(type="filepath", label="Voice Sample 1")
audio2 = gr.Audio(type="filepath", label="Voice Sample 2")
voice_btn = gr.Button("Compare Voices")
voice_output = gr.Textbox(label="Result")
voice_btn.click(compare_voices, inputs=[audio1, audio2], outputs=voice_output)
with gr.Tab("📹 Video Deepfake Scan"):
gr.Markdown(
"🔍 Upload a video or paste a YouTube link and we'll analyze it for deepfake face swaps."
)
ref_img = gr.Image(type="filepath", label="Reference Face")
video_input = gr.Video(label="Video File (optional)")
youtube_url = gr.Textbox(label="YouTube URL (optional)")
scan_btn = gr.Button("Scan Video")
scan_output = gr.Textbox(label="Scan Results", lines=10)
scan_btn.click(
scan_video, inputs=[video_input, ref_img, youtube_url], outputs=scan_output
)
with gr.Tab("📄 Generate Report"):
report_btn = gr.Button("Generate PDF Report")
report_output = gr.File(label="Download Report")
report_btn.click(generate_report, outputs=report_output)
demo.launch(mcp_server=True)