Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,112 +1,89 @@
|
|
1 |
import gradio as gr
|
2 |
-
from transformers import pipeline,
|
|
|
3 |
from PIL import Image
|
4 |
-
import torch
|
5 |
-
import cv2
|
6 |
-
import os
|
7 |
-
import tempfile
|
8 |
|
9 |
# Load models
|
10 |
-
|
11 |
-
|
12 |
-
news_model = pipeline("text-classification", model="
|
13 |
-
# AI Image Detection
|
14 |
-
def analyze_image(image):
|
15 |
-
inputs = clip_processor(text=["a real photo", "an AI-generated image"], images=image, return_tensors="pt", padding=True)
|
16 |
-
outputs = clip_model(**inputs)
|
17 |
-
logits_per_image = outputs.logits_per_image
|
18 |
-
probs = logits_per_image.softmax(dim=1).tolist()[0]
|
19 |
-
prediction = "AI-generated" if probs[1] > probs[0] else "Real"
|
20 |
-
confidence = round(max(probs) * 100, 2)
|
21 |
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
|
|
|
|
|
|
|
|
|
|
28 |
)
|
29 |
-
return f"
|
30 |
|
31 |
-
#
|
32 |
def analyze_news(text):
|
33 |
-
|
34 |
-
label =
|
35 |
-
score = round(
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
"
|
40 |
-
|
41 |
-
else "✅ The article seems factual and based on credible information."
|
42 |
)
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
if frame_count % frame_rate == 0:
|
66 |
-
pil_image = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
|
67 |
-
result, _ = analyze_image(pil_image)
|
68 |
-
results.append(result)
|
69 |
-
|
70 |
-
frame_count += 1
|
71 |
-
|
72 |
cap.release()
|
73 |
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
"No strong signs of AI generation were detected in any frame. "
|
82 |
-
"Visuals appear natural and consistent with real-world content."
|
83 |
-
)
|
84 |
|
85 |
-
# Gradio App
|
86 |
with gr.Blocks() as demo:
|
87 |
-
gr.Markdown("##
|
88 |
-
|
89 |
with gr.Row():
|
90 |
with gr.Column():
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
|
96 |
with gr.Column():
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
|
102 |
with gr.Row():
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
img_btn.click(analyze_image, inputs=img_input, outputs=[img_result, img_explanation])
|
109 |
-
news_btn.click(analyze_news, inputs=news_input, outputs=[news_result, news_explanation])
|
110 |
-
video_btn.click(analyze_video, inputs=video_input, outputs=[video_result, video_explanation])
|
111 |
|
112 |
demo.launch()
|
|
|
1 |
import gradio as gr
|
2 |
+
from transformers import pipeline, AutoImageProcessor, SiglipForImageClassification
|
3 |
+
import cv2, tempfile, os
|
4 |
from PIL import Image
|
|
|
|
|
|
|
|
|
5 |
|
6 |
# Load models
|
7 |
+
img_model = SiglipForImageClassification.from_pretrained("prithivMLmods/open-deepfake-detection")
|
8 |
+
img_proc = AutoImageProcessor.from_pretrained("prithivMLmods/open-deepfake-detection")
|
9 |
+
news_model = pipeline("text-classification", model="jy46604790/Fake-News-Bert-Detect")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
|
11 |
+
# Image analysis
|
12 |
+
def analyze_image(image):
|
13 |
+
inputs = img_proc(images=image, return_tensors="pt")
|
14 |
+
outputs = img_model(**inputs)
|
15 |
+
probs = outputs.logits.softmax(dim=1).tolist()[0]
|
16 |
+
pred = "Fake (AI-generated)" if probs[0] > probs[1] else "Real"
|
17 |
+
conf = round(max(probs)*100,2)
|
18 |
+
expl = (
|
19 |
+
"⚠️ Likely AI-generated due to visual artifacts or unnatural features."
|
20 |
+
if pred.startswith("Fake")
|
21 |
+
else "✅ Appears authentic with realistic textures and lighting."
|
22 |
)
|
23 |
+
return f"{pred} ({conf}%)", expl
|
24 |
|
25 |
+
# News analysis
|
26 |
def analyze_news(text):
|
27 |
+
res = news_model(text)[0]
|
28 |
+
label = res['label']
|
29 |
+
score = round(res['score']*100,2)
|
30 |
+
pred = "Fake News" if label=="LABEL_0" else "Real News"
|
31 |
+
expl = (
|
32 |
+
"⚠️ This article likely contains misinformation or misleading claims."
|
33 |
+
if pred=="Fake News"
|
34 |
+
else "✅ The text appears credible and factually consistent."
|
|
|
35 |
)
|
36 |
+
return f"{pred} ({score}%)", expl
|
37 |
+
|
38 |
+
# Video analysis (≤10s)
|
39 |
+
def analyze_video(file):
|
40 |
+
tmp = tempfile.mkdtemp()
|
41 |
+
path = os.path.join(tmp, "vid.mp4")
|
42 |
+
with open(path,"wb") as f: f.write(file.read())
|
43 |
+
|
44 |
+
cap = cv2.VideoCapture(path)
|
45 |
+
fps = cap.get(cv2.CAP_PROP_FPS) or 1
|
46 |
+
frame_idx, ai_detected = 0, False
|
47 |
+
|
48 |
+
while True:
|
49 |
+
ret, frame = cap.read()
|
50 |
+
if not ret or frame_idx/fps>10: break
|
51 |
+
if frame_idx % int(fps)==0:
|
52 |
+
img = Image.fromarray(cv2.cvtColor(frame,cv2.COLOR_BGR2RGB))
|
53 |
+
pred, _ = analyze_image(img)
|
54 |
+
if "Fake" in pred:
|
55 |
+
ai_detected = True
|
56 |
+
break
|
57 |
+
frame_idx += 1
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
58 |
cap.release()
|
59 |
|
60 |
+
pred = "Video likely contains AI-generated content" if ai_detected else "Video appears authentic"
|
61 |
+
expl = (
|
62 |
+
"⚠️ At least one frame shows AI generation traits."
|
63 |
+
if ai_detected
|
64 |
+
else "✅ No strong signs of AI generation across frames."
|
65 |
+
)
|
66 |
+
return pred, expl
|
|
|
|
|
|
|
67 |
|
|
|
68 |
with gr.Blocks() as demo:
|
69 |
+
gr.Markdown("## 🤖 Real-Time AI & Fake News Validator")
|
|
|
70 |
with gr.Row():
|
71 |
with gr.Column():
|
72 |
+
img = gr.Image(type="pil")
|
73 |
+
btn_img = gr.Button("Analyze Image")
|
74 |
+
out_img, expl_img = gr.Textbox(), gr.Textbox()
|
75 |
+
btn_img.click(analyze_image, inputs=img, outputs=[out_img, expl_img])
|
76 |
|
77 |
with gr.Column():
|
78 |
+
txt = gr.Textbox(lines=4, placeholder="Paste news or claim text")
|
79 |
+
btn_txt = gr.Button("Analyze News")
|
80 |
+
out_txt, expl_txt = gr.Textbox(), gr.Textbox()
|
81 |
+
btn_txt.click(analyze_news, inputs=txt, outputs=[out_txt, expl_txt])
|
82 |
|
83 |
with gr.Row():
|
84 |
+
vid = gr.File(file_types=[".mp4"])
|
85 |
+
btn_vid = gr.Button("Analyze Video")
|
86 |
+
out_vid, expl_vid = gr.Textbox(), gr.Textbox()
|
87 |
+
btn_vid.click(analyze_video, inputs=vid, outputs=[out_vid, expl_vid])
|
|
|
|
|
|
|
|
|
88 |
|
89 |
demo.launch()
|