Spaces:
Sleeping
Sleeping
Commit
Β·
4743b44
1
Parent(s):
a6546c1
ai detector new
Browse files- app.py +351 -175
- requirements.txt +8 -8
app.py
CHANGED
@@ -1,232 +1,384 @@
|
|
1 |
import gradio as gr
|
2 |
import torch
|
3 |
-
from transformers import AutoTokenizer, AutoModelForSequenceClassification
|
4 |
import numpy as np
|
5 |
from scipy import stats
|
6 |
import re
|
7 |
from collections import Counter
|
8 |
import math
|
|
|
|
|
9 |
|
10 |
class AdvancedAITextDetector:
|
11 |
def __init__(self):
|
12 |
"""Initialize the AI Text Detector with multiple detection methods"""
|
13 |
-
|
14 |
-
self.model_name = "Hello-SimpleAI/chatgpt-detector-roberta"
|
15 |
-
try:
|
16 |
-
self.tokenizer = AutoTokenizer.from_pretrained(self.model_name)
|
17 |
-
self.model = AutoModelForSequenceClassification.from_pretrained(self.model_name)
|
18 |
-
self.model.eval()
|
19 |
-
self.model_loaded = True
|
20 |
-
except:
|
21 |
-
print("Warning: Could not load transformer model. Using statistical methods only.")
|
22 |
-
self.model_loaded = False
|
23 |
-
|
24 |
-
def calculate_perplexity_score(self, text):
|
25 |
-
"""Calculate perplexity-based features"""
|
26 |
-
words = text.split()
|
27 |
-
if len(words) < 2:
|
28 |
-
return 0.5
|
29 |
|
30 |
-
#
|
31 |
-
|
32 |
-
unique_bigrams = len(set(bigrams))
|
33 |
-
total_bigrams = len(bigrams)
|
34 |
|
35 |
-
|
36 |
-
|
37 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
38 |
|
39 |
-
def
|
40 |
-
"""Calculate
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
|
|
|
|
|
|
|
|
|
|
54 |
|
55 |
-
def
|
56 |
-
"""
|
57 |
-
|
|
|
58 |
|
59 |
-
#
|
60 |
-
|
61 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
62 |
|
63 |
-
|
64 |
-
|
|
|
|
|
65 |
|
66 |
-
|
67 |
-
|
68 |
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
return 0.5
|
76 |
|
77 |
-
|
78 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
79 |
|
80 |
-
|
81 |
-
return diversity
|
82 |
|
83 |
-
def
|
84 |
-
"""
|
85 |
sentences = re.split(r'[.!?]+', text)
|
|
|
86 |
|
87 |
-
punct_variance = []
|
88 |
for sentence in sentences:
|
89 |
if sentence.strip():
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
|
|
|
|
94 |
|
95 |
-
if
|
96 |
return 0.5
|
97 |
|
98 |
-
#
|
99 |
-
|
100 |
-
return 1 - min(
|
101 |
|
102 |
-
def
|
103 |
-
"""
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
# Calculate various features
|
108 |
-
perplexity_score = self.calculate_perplexity_score(text)
|
109 |
-
burstiness = self.calculate_burstiness(text)
|
110 |
-
repetition = self.calculate_repetition_score(text)
|
111 |
-
vocab_diversity = self.calculate_vocabulary_diversity(text)
|
112 |
-
punct_patterns = self.calculate_punctuation_patterns(text)
|
113 |
-
|
114 |
-
# Weighted combination of features
|
115 |
-
# Lower perplexity, lower burstiness, higher repetition, lower diversity = more likely AI
|
116 |
-
ai_score = (
|
117 |
-
(1 - perplexity_score) * 0.2 + # Low diversity in bigrams
|
118 |
-
(1 - burstiness) * 0.25 + # Low burstiness
|
119 |
-
repetition * 0.2 + # High repetition
|
120 |
-
(1 - vocab_diversity) * 0.2 + # Low vocabulary diversity
|
121 |
-
punct_patterns * 0.15 # Regular punctuation
|
122 |
-
)
|
123 |
|
124 |
-
|
125 |
-
|
126 |
-
"burstiness": burstiness,
|
127 |
-
"repetition": repetition,
|
128 |
-
"vocab_diversity": vocab_diversity,
|
129 |
-
"punct_patterns": punct_patterns
|
130 |
-
}
|
131 |
-
|
132 |
-
def detect_ai_transformer(self, text):
|
133 |
-
"""Use transformer model for AI detection"""
|
134 |
-
if not self.model_loaded:
|
135 |
-
return 0.5, "Model not loaded"
|
136 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
137 |
try:
|
138 |
-
inputs = self.
|
139 |
-
|
140 |
|
141 |
with torch.no_grad():
|
142 |
-
outputs = self.
|
143 |
-
|
144 |
-
|
|
|
|
|
145 |
|
146 |
-
|
147 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
148 |
|
149 |
-
return ai_probability
|
150 |
-
except
|
151 |
-
return
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
152 |
|
153 |
def detect(self, text):
|
154 |
-
"""Main detection method
|
155 |
if not text or len(text.strip()) < 20:
|
156 |
return {
|
157 |
-
"ai_probability": 0
|
158 |
"classification": "Undetermined",
|
159 |
"confidence": "Low",
|
160 |
"explanation": "Text too short for accurate analysis. Please provide at least 50 characters.",
|
161 |
"detailed_scores": {}
|
162 |
}
|
163 |
|
164 |
-
|
165 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
166 |
|
167 |
-
# Get
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
172 |
else:
|
173 |
-
final_score =
|
174 |
|
175 |
-
#
|
176 |
-
if final_score >= 0.
|
177 |
-
classification = "AI-Generated"
|
178 |
confidence = "High"
|
179 |
-
elif final_score >= 0.
|
180 |
-
classification = "
|
181 |
-
confidence = "Medium"
|
182 |
-
elif final_score >= 0.
|
183 |
-
classification = "Uncertain"
|
184 |
confidence = "Low"
|
185 |
-
elif final_score >= 0.
|
186 |
-
classification = "
|
187 |
confidence = "Medium"
|
188 |
else:
|
189 |
classification = "Human-Written"
|
190 |
confidence = "High"
|
191 |
|
192 |
-
#
|
193 |
-
explanation = self._generate_explanation(final_score, stat_details
|
|
|
|
|
|
|
|
|
194 |
|
195 |
return {
|
196 |
"ai_probability": round(final_score * 100, 2),
|
197 |
"classification": classification,
|
198 |
"confidence": confidence,
|
199 |
"explanation": explanation,
|
200 |
-
"detailed_scores": stat_details
|
|
|
|
|
|
|
|
|
|
|
|
|
201 |
}
|
202 |
|
203 |
-
def _generate_explanation(self, score,
|
204 |
-
"""Generate
|
205 |
explanations = []
|
206 |
|
207 |
-
|
208 |
-
|
209 |
-
|
210 |
-
|
|
|
|
|
|
|
|
|
|
|
211 |
else:
|
212 |
-
explanations.append("
|
213 |
|
214 |
-
|
215 |
-
|
216 |
-
|
217 |
-
|
218 |
-
|
219 |
-
|
220 |
-
|
221 |
-
|
222 |
-
|
223 |
-
|
224 |
-
|
225 |
-
|
226 |
-
|
227 |
-
|
228 |
-
|
229 |
-
|
|
|
|
|
230 |
|
231 |
return " ".join(explanations)
|
232 |
|
@@ -239,35 +391,48 @@ def analyze_text(text):
|
|
239 |
|
240 |
# Format output for Gradio
|
241 |
output = f"""
|
242 |
-
## Detection Result
|
243 |
|
244 |
**Classification:** {result['classification']}
|
245 |
**AI Probability:** {result['ai_probability']}%
|
246 |
**Confidence Level:** {result['confidence']}
|
247 |
|
248 |
-
### Analysis Details
|
249 |
{result['explanation']}
|
250 |
|
251 |
-
###
|
252 |
"""
|
253 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
254 |
if result['detailed_scores']:
|
255 |
for metric, value in result['detailed_scores'].items():
|
256 |
metric_name = metric.replace('_', ' ').title()
|
257 |
-
|
|
|
258 |
|
259 |
-
# Create
|
260 |
ai_prob = result['ai_probability']
|
261 |
human_prob = 100 - ai_prob
|
262 |
|
263 |
bar_chart = f"""
|
264 |
-
### Probability Distribution
|
265 |
```
|
266 |
AI-Generated: {'β' * int(ai_prob/5)}{'β' * (20-int(ai_prob/5))} {ai_prob}%
|
267 |
Human-Written: {'β' * int(human_prob/5)}{'β' * (20-int(human_prob/5))} {human_prob}%
|
268 |
```
|
269 |
"""
|
270 |
|
|
|
|
|
|
|
|
|
271 |
return output + bar_chart
|
272 |
|
273 |
# Create Gradio interface
|
@@ -279,19 +444,30 @@ interface = gr.Interface(
|
|
279 |
label="Input Text"
|
280 |
),
|
281 |
outputs=gr.Markdown(label="Analysis Result"),
|
282 |
-
title="π Advanced AI Text Detector",
|
283 |
description="""
|
284 |
-
This
|
285 |
-
|
286 |
-
|
287 |
-
- **
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
288 |
|
289 |
-
|
290 |
-
|
291 |
""",
|
292 |
examples=[
|
293 |
-
["The impact of artificial intelligence on modern society
|
294 |
-
["So I was walking down the street yesterday, right? And this crazy thing happened - I mean, you won't believe it. There was this dog, just a regular golden retriever, but it was wearing these ridiculous sunglasses. Like, who puts sunglasses on a dog? Anyway, the owner was this old lady, must've been like 80 or something, and she was just chatting away on her phone, completely oblivious. The dog looked so confused! I couldn't help but laugh. Sometimes you see the weirdest stuff when you're just out and about, you know?"]
|
|
|
295 |
],
|
296 |
theme=gr.themes.Soft(),
|
297 |
analytics_enabled=False
|
|
|
1 |
import gradio as gr
|
2 |
import torch
|
3 |
+
from transformers import AutoTokenizer, AutoModelForSequenceClassification, GPT2LMHeadModel, GPT2TokenizerFast
|
4 |
import numpy as np
|
5 |
from scipy import stats
|
6 |
import re
|
7 |
from collections import Counter
|
8 |
import math
|
9 |
+
import warnings
|
10 |
+
warnings.filterwarnings('ignore')
|
11 |
|
12 |
class AdvancedAITextDetector:
|
13 |
def __init__(self):
|
14 |
"""Initialize the AI Text Detector with multiple detection methods"""
|
15 |
+
self.models_loaded = {}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
|
17 |
+
# Load multiple models for ensemble detection
|
18 |
+
self.load_models()
|
|
|
|
|
19 |
|
20 |
+
def load_models(self):
|
21 |
+
"""Load multiple detection models for ensemble approach"""
|
22 |
+
try:
|
23 |
+
# Model 1: RoBERTa-based detector (more accurate)
|
24 |
+
self.roberta_tokenizer = AutoTokenizer.from_pretrained("roberta-base-openai-detector")
|
25 |
+
self.roberta_model = AutoModelForSequenceClassification.from_pretrained("roberta-base-openai-detector")
|
26 |
+
self.roberta_model.eval()
|
27 |
+
self.models_loaded['roberta'] = True
|
28 |
+
except:
|
29 |
+
print("Warning: Could not load RoBERTa detector")
|
30 |
+
self.models_loaded['roberta'] = False
|
31 |
+
|
32 |
+
try:
|
33 |
+
# Model 2: Alternative detector
|
34 |
+
self.alt_tokenizer = AutoTokenizer.from_pretrained("Hello-SimpleAI/chatgpt-detector-roberta")
|
35 |
+
self.alt_model = AutoModelForSequenceClassification.from_pretrained("Hello-SimpleAI/chatgpt-detector-roberta")
|
36 |
+
self.alt_model.eval()
|
37 |
+
self.models_loaded['alt'] = True
|
38 |
+
except:
|
39 |
+
print("Warning: Could not load alternative detector")
|
40 |
+
self.models_loaded['alt'] = False
|
41 |
+
|
42 |
+
try:
|
43 |
+
# GPT-2 for perplexity calculation
|
44 |
+
self.gpt2_tokenizer = GPT2TokenizerFast.from_pretrained("gpt2")
|
45 |
+
self.gpt2_model = GPT2LMHeadModel.from_pretrained("gpt2")
|
46 |
+
self.gpt2_model.eval()
|
47 |
+
self.models_loaded['gpt2'] = True
|
48 |
+
except:
|
49 |
+
print("Warning: Could not load GPT-2 for perplexity")
|
50 |
+
self.models_loaded['gpt2'] = False
|
51 |
|
52 |
+
def calculate_gpt2_perplexity(self, text):
|
53 |
+
"""Calculate perplexity using GPT-2 - lower perplexity suggests AI text"""
|
54 |
+
if not self.models_loaded.get('gpt2', False):
|
55 |
+
return None
|
56 |
+
|
57 |
+
try:
|
58 |
+
encodings = self.gpt2_tokenizer(text, return_tensors='pt', truncation=True, max_length=512)
|
59 |
+
max_length = encodings.input_ids.size(1)
|
60 |
+
|
61 |
+
with torch.no_grad():
|
62 |
+
outputs = self.gpt2_model(**encodings, labels=encodings.input_ids)
|
63 |
+
loss = outputs.loss
|
64 |
+
perplexity = torch.exp(loss).item()
|
65 |
+
|
66 |
+
# Normalize perplexity to 0-1 scale (lower perplexity = more likely AI)
|
67 |
+
# Typical human text: 20-60, AI text: 10-30
|
68 |
+
normalized = 1 - min(max((perplexity - 10) / 50, 0), 1)
|
69 |
+
return normalized
|
70 |
+
except:
|
71 |
+
return None
|
72 |
|
73 |
+
def detect_chatgpt_patterns(self, text):
|
74 |
+
"""Detect specific ChatGPT writing patterns"""
|
75 |
+
patterns_score = 0
|
76 |
+
pattern_count = 0
|
77 |
|
78 |
+
# ChatGPT often uses these phrases
|
79 |
+
chatgpt_phrases = [
|
80 |
+
r'\bI understand\b',
|
81 |
+
r'\bIt\'s important to note\b',
|
82 |
+
r'\bIt\'s worth noting\b',
|
83 |
+
r'\bIn conclusion\b',
|
84 |
+
r'\bHowever,\s',
|
85 |
+
r'\bMoreover,\s',
|
86 |
+
r'\bFurthermore,\s',
|
87 |
+
r'\bAdditionally,\s',
|
88 |
+
r'\bIn summary\b',
|
89 |
+
r'\bTo summarize\b',
|
90 |
+
r'\boverall,\s',
|
91 |
+
r'\bGenerally speaking\b',
|
92 |
+
r'\bTypically,\s',
|
93 |
+
r'\bEssentially,\s',
|
94 |
+
r'\bFundamentally,\s',
|
95 |
+
r'\bIt\'s crucial\b',
|
96 |
+
r'\bIt\'s essential\b',
|
97 |
+
r'\bRemember that\b',
|
98 |
+
r'\bKeep in mind\b',
|
99 |
+
r'\bThis means that\b',
|
100 |
+
r'\bThis suggests that\b',
|
101 |
+
r'\bwhich means\b',
|
102 |
+
r'\bthat being said\b',
|
103 |
+
r'\bon the other hand\b',
|
104 |
+
]
|
105 |
|
106 |
+
text_lower = text.lower()
|
107 |
+
for pattern in chatgpt_phrases:
|
108 |
+
if re.search(pattern.lower(), text_lower):
|
109 |
+
pattern_count += 1
|
110 |
|
111 |
+
# Calculate pattern density
|
112 |
+
patterns_score = min(pattern_count / 5, 1.0) # Normalize to 0-1
|
113 |
|
114 |
+
# Check for numbered or bulleted lists (common in ChatGPT)
|
115 |
+
has_numbered_list = bool(re.search(r'\n\d+\.', text))
|
116 |
+
has_bullets = bool(re.search(r'\n[-β’*]\s', text))
|
117 |
+
|
118 |
+
if has_numbered_list or has_bullets:
|
119 |
+
patterns_score = min(patterns_score + 0.2, 1.0)
|
|
|
120 |
|
121 |
+
# Check for balanced paragraph structure (AI characteristic)
|
122 |
+
paragraphs = text.split('\n\n')
|
123 |
+
if len(paragraphs) > 2:
|
124 |
+
lengths = [len(p.split()) for p in paragraphs if p.strip()]
|
125 |
+
if lengths:
|
126 |
+
cv = np.std(lengths) / np.mean(lengths) if np.mean(lengths) > 0 else 1
|
127 |
+
if cv < 0.3: # Low variation in paragraph lengths
|
128 |
+
patterns_score = min(patterns_score + 0.15, 1.0)
|
129 |
|
130 |
+
return patterns_score
|
|
|
131 |
|
132 |
+
def calculate_sentence_complexity_variance(self, text):
|
133 |
+
"""Calculate variance in sentence complexity - AI text is more uniform"""
|
134 |
sentences = re.split(r'[.!?]+', text)
|
135 |
+
complexities = []
|
136 |
|
|
|
137 |
for sentence in sentences:
|
138 |
if sentence.strip():
|
139 |
+
words = sentence.split()
|
140 |
+
if len(words) > 0:
|
141 |
+
# Calculate complexity based on word length and sentence length
|
142 |
+
avg_word_length = np.mean([len(w) for w in words])
|
143 |
+
complexity = len(words) * (avg_word_length / 5)
|
144 |
+
complexities.append(complexity)
|
145 |
|
146 |
+
if len(complexities) < 2:
|
147 |
return 0.5
|
148 |
|
149 |
+
# Lower variance suggests AI (more uniform complexity)
|
150 |
+
cv = np.std(complexities) / np.mean(complexities) if np.mean(complexities) > 0 else 0
|
151 |
+
return 1 - min(cv / 0.5, 1.0) # Normalize and invert
|
152 |
|
153 |
+
def calculate_word_frequency_distribution(self, text):
|
154 |
+
"""Analyze word frequency distribution - AI text follows Zipf's law more closely"""
|
155 |
+
words = re.findall(r'\b\w+\b', text.lower())
|
156 |
+
word_freq = Counter(words)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
157 |
|
158 |
+
if len(word_freq) < 10:
|
159 |
+
return 0.5
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
160 |
|
161 |
+
frequencies = sorted(word_freq.values(), reverse=True)[:50] # Top 50 words
|
162 |
+
ranks = range(1, len(frequencies) + 1)
|
163 |
+
|
164 |
+
# Calculate how well it fits Zipf's law (AI text fits better)
|
165 |
+
if len(frequencies) > 1:
|
166 |
+
log_ranks = np.log(ranks)
|
167 |
+
log_freqs = np.log(frequencies)
|
168 |
+
|
169 |
+
# Calculate correlation with Zipf's law
|
170 |
+
correlation = abs(np.corrcoef(log_ranks, log_freqs)[0, 1])
|
171 |
+
|
172 |
+
# Higher correlation suggests AI
|
173 |
+
return correlation
|
174 |
+
|
175 |
+
return 0.5
|
176 |
+
|
177 |
+
def detect_roberta(self, text):
|
178 |
+
"""Use RoBERTa OpenAI detector"""
|
179 |
+
if not self.models_loaded.get('roberta', False):
|
180 |
+
return None
|
181 |
+
|
182 |
try:
|
183 |
+
inputs = self.roberta_tokenizer(text, return_tensors="pt", truncation=True,
|
184 |
+
max_length=512, padding=True)
|
185 |
|
186 |
with torch.no_grad():
|
187 |
+
outputs = self.roberta_model(**inputs)
|
188 |
+
predictions = torch.softmax(outputs.logits, dim=-1)
|
189 |
+
|
190 |
+
# Class 0 is "Real", Class 1 is "Fake" for this model
|
191 |
+
ai_probability = predictions[0][1].item()
|
192 |
|
193 |
+
return ai_probability
|
194 |
+
except:
|
195 |
+
return None
|
196 |
+
|
197 |
+
def detect_alternative(self, text):
|
198 |
+
"""Use alternative detector model"""
|
199 |
+
if not self.models_loaded.get('alt', False):
|
200 |
+
return None
|
201 |
+
|
202 |
+
try:
|
203 |
+
inputs = self.alt_tokenizer(text, return_tensors="pt", truncation=True,
|
204 |
+
max_length=512, padding=True)
|
205 |
+
|
206 |
+
with torch.no_grad():
|
207 |
+
outputs = self.alt_model(**inputs)
|
208 |
+
predictions = torch.softmax(outputs.logits, dim=-1)
|
209 |
+
ai_probability = predictions[0][1].item()
|
210 |
|
211 |
+
return ai_probability
|
212 |
+
except:
|
213 |
+
return None
|
214 |
+
|
215 |
+
def enhanced_statistical_analysis(self, text):
|
216 |
+
"""Enhanced statistical analysis specifically tuned for ChatGPT detection"""
|
217 |
+
|
218 |
+
# Calculate all metrics
|
219 |
+
chatgpt_patterns = self.detect_chatgpt_patterns(text)
|
220 |
+
sentence_complexity = self.calculate_sentence_complexity_variance(text)
|
221 |
+
word_freq_dist = self.calculate_word_frequency_distribution(text)
|
222 |
+
|
223 |
+
# Existing metrics with adjusted weights
|
224 |
+
words = text.split()
|
225 |
+
sentences = re.split(r'[.!?]+', text)
|
226 |
+
|
227 |
+
# Sentence length consistency (AI is more consistent)
|
228 |
+
sentence_lengths = [len(s.split()) for s in sentences if s.strip()]
|
229 |
+
if len(sentence_lengths) > 1:
|
230 |
+
cv_sentence = np.std(sentence_lengths) / np.mean(sentence_lengths)
|
231 |
+
sentence_consistency = 1 - min(cv_sentence / 0.5, 1.0)
|
232 |
+
else:
|
233 |
+
sentence_consistency = 0.5
|
234 |
+
|
235 |
+
# Vocabulary repetition rate
|
236 |
+
word_counts = Counter(words)
|
237 |
+
words_used_once = sum(1 for count in word_counts.values() if count == 1)
|
238 |
+
repetition_rate = 1 - (words_used_once / len(words)) if words else 0.5
|
239 |
+
|
240 |
+
# Conjunction and transition word density
|
241 |
+
transitions = ['however', 'therefore', 'moreover', 'furthermore', 'additionally',
|
242 |
+
'consequently', 'nevertheless', 'nonetheless', 'meanwhile', 'subsequently']
|
243 |
+
transition_count = sum(1 for w in words if w.lower() in transitions)
|
244 |
+
transition_density = min(transition_count / len(words) * 100, 1.0) if words else 0
|
245 |
+
|
246 |
+
# Combine all statistical features with optimized weights
|
247 |
+
statistical_score = (
|
248 |
+
chatgpt_patterns * 0.35 + # Strongest indicator
|
249 |
+
sentence_complexity * 0.20 + # Uniform complexity
|
250 |
+
word_freq_dist * 0.15 + # Zipf's law adherence
|
251 |
+
sentence_consistency * 0.15 + # Consistent sentence lengths
|
252 |
+
repetition_rate * 0.10 + # Word repetition
|
253 |
+
transition_density * 0.05 # Transition word usage
|
254 |
+
)
|
255 |
+
|
256 |
+
return statistical_score, {
|
257 |
+
'chatgpt_patterns': chatgpt_patterns,
|
258 |
+
'sentence_uniformity': sentence_complexity,
|
259 |
+
'zipf_correlation': word_freq_dist,
|
260 |
+
'sentence_consistency': sentence_consistency,
|
261 |
+
'repetition_rate': repetition_rate,
|
262 |
+
'transition_density': transition_density
|
263 |
+
}
|
264 |
|
265 |
def detect(self, text):
|
266 |
+
"""Main detection method with ensemble approach"""
|
267 |
if not text or len(text.strip()) < 20:
|
268 |
return {
|
269 |
+
"ai_probability": 50.0,
|
270 |
"classification": "Undetermined",
|
271 |
"confidence": "Low",
|
272 |
"explanation": "Text too short for accurate analysis. Please provide at least 50 characters.",
|
273 |
"detailed_scores": {}
|
274 |
}
|
275 |
|
276 |
+
scores = []
|
277 |
+
weights = []
|
278 |
+
|
279 |
+
# Get RoBERTa OpenAI detector score (most accurate for ChatGPT)
|
280 |
+
roberta_score = self.detect_roberta(text)
|
281 |
+
if roberta_score is not None:
|
282 |
+
scores.append(roberta_score)
|
283 |
+
weights.append(0.4) # Highest weight for most accurate model
|
284 |
+
|
285 |
+
# Get alternative model score
|
286 |
+
alt_score = self.detect_alternative(text)
|
287 |
+
if alt_score is not None:
|
288 |
+
scores.append(alt_score)
|
289 |
+
weights.append(0.2)
|
290 |
|
291 |
+
# Get GPT-2 perplexity score
|
292 |
+
perplexity_score = self.calculate_gpt2_perplexity(text)
|
293 |
+
if perplexity_score is not None:
|
294 |
+
scores.append(perplexity_score)
|
295 |
+
weights.append(0.15)
|
296 |
+
|
297 |
+
# Get enhanced statistical analysis
|
298 |
+
stat_score, stat_details = self.enhanced_statistical_analysis(text)
|
299 |
+
scores.append(stat_score)
|
300 |
+
weights.append(0.25 if len(scores) == 1 else 0.25)
|
301 |
+
|
302 |
+
# Calculate weighted average
|
303 |
+
if scores:
|
304 |
+
# Normalize weights
|
305 |
+
weights = [w / sum(weights) for w in weights]
|
306 |
+
final_score = sum(s * w for s, w in zip(scores, weights))
|
307 |
else:
|
308 |
+
final_score = 0.5
|
309 |
|
310 |
+
# Adjust classification thresholds for better ChatGPT detection
|
311 |
+
if final_score >= 0.75:
|
312 |
+
classification = "AI-Generated (Likely ChatGPT)"
|
313 |
confidence = "High"
|
314 |
+
elif final_score >= 0.55:
|
315 |
+
classification = "Probably AI-Generated"
|
316 |
+
confidence = "Medium-High"
|
317 |
+
elif final_score >= 0.45:
|
318 |
+
classification = "Uncertain (Mixed Signals)"
|
319 |
confidence = "Low"
|
320 |
+
elif final_score >= 0.25:
|
321 |
+
classification = "Probably Human-Written"
|
322 |
confidence = "Medium"
|
323 |
else:
|
324 |
classification = "Human-Written"
|
325 |
confidence = "High"
|
326 |
|
327 |
+
# Generate detailed explanation
|
328 |
+
explanation = self._generate_explanation(final_score, stat_details, {
|
329 |
+
'roberta': roberta_score,
|
330 |
+
'alternative': alt_score,
|
331 |
+
'perplexity': perplexity_score
|
332 |
+
})
|
333 |
|
334 |
return {
|
335 |
"ai_probability": round(final_score * 100, 2),
|
336 |
"classification": classification,
|
337 |
"confidence": confidence,
|
338 |
"explanation": explanation,
|
339 |
+
"detailed_scores": stat_details,
|
340 |
+
"model_scores": {
|
341 |
+
'roberta_openai': roberta_score,
|
342 |
+
'alternative': alt_score,
|
343 |
+
'perplexity': perplexity_score,
|
344 |
+
'statistical': stat_score
|
345 |
+
}
|
346 |
}
|
347 |
|
348 |
+
def _generate_explanation(self, score, stat_details, model_scores):
|
349 |
+
"""Generate detailed explanation of the detection result"""
|
350 |
explanations = []
|
351 |
|
352 |
+
# Overall assessment
|
353 |
+
if score >= 0.75:
|
354 |
+
explanations.append("π€ Strong indicators of AI generation detected, consistent with ChatGPT patterns.")
|
355 |
+
elif score >= 0.55:
|
356 |
+
explanations.append("β οΈ Multiple AI characteristics detected, suggesting probable AI generation.")
|
357 |
+
elif score >= 0.45:
|
358 |
+
explanations.append("β Mixed characteristics - could be AI-assisted or heavily edited human text.")
|
359 |
+
elif score >= 0.25:
|
360 |
+
explanations.append("βοΈ Predominantly human characteristics with some regularities.")
|
361 |
else:
|
362 |
+
explanations.append("π€ Strong human writing characteristics detected.")
|
363 |
|
364 |
+
# Model-specific insights
|
365 |
+
if model_scores.get('roberta') is not None:
|
366 |
+
if model_scores['roberta'] > 0.7:
|
367 |
+
explanations.append("\nβ’ OpenAI detector: Strong AI signature")
|
368 |
+
elif model_scores['roberta'] < 0.3:
|
369 |
+
explanations.append("\nβ’ OpenAI detector: Strong human signature")
|
370 |
+
|
371 |
+
# Pattern analysis
|
372 |
+
if stat_details.get('chatgpt_patterns', 0) > 0.5:
|
373 |
+
explanations.append("\nβ’ High density of ChatGPT-typical phrases and structures")
|
374 |
+
|
375 |
+
if stat_details.get('sentence_uniformity', 0) > 0.7:
|
376 |
+
explanations.append("\nβ’ Unusually uniform sentence complexity (AI characteristic)")
|
377 |
+
elif stat_details.get('sentence_uniformity', 0) < 0.3:
|
378 |
+
explanations.append("\nβ’ Variable sentence complexity (human characteristic)")
|
379 |
+
|
380 |
+
if stat_details.get('zipf_correlation', 0) > 0.8:
|
381 |
+
explanations.append("\nβ’ Word frequency distribution closely follows Zipf's law (AI-like)")
|
382 |
|
383 |
return " ".join(explanations)
|
384 |
|
|
|
391 |
|
392 |
# Format output for Gradio
|
393 |
output = f"""
|
394 |
+
## π Detection Result
|
395 |
|
396 |
**Classification:** {result['classification']}
|
397 |
**AI Probability:** {result['ai_probability']}%
|
398 |
**Confidence Level:** {result['confidence']}
|
399 |
|
400 |
+
### π Analysis Details
|
401 |
{result['explanation']}
|
402 |
|
403 |
+
### π Model Scores
|
404 |
"""
|
405 |
|
406 |
+
if result.get('model_scores'):
|
407 |
+
for model, score in result['model_scores'].items():
|
408 |
+
if score is not None:
|
409 |
+
model_name = model.replace('_', ' ').title()
|
410 |
+
output += f"- {model_name}: {round(score * 100, 2)}%\n"
|
411 |
+
|
412 |
+
output += "\n### π¬ Statistical Metrics\n"
|
413 |
+
|
414 |
if result['detailed_scores']:
|
415 |
for metric, value in result['detailed_scores'].items():
|
416 |
metric_name = metric.replace('_', ' ').title()
|
417 |
+
percentage = round(value * 100, 1)
|
418 |
+
output += f"- {metric_name}: {percentage}%\n"
|
419 |
|
420 |
+
# Create visual probability bar
|
421 |
ai_prob = result['ai_probability']
|
422 |
human_prob = 100 - ai_prob
|
423 |
|
424 |
bar_chart = f"""
|
425 |
+
### π Probability Distribution
|
426 |
```
|
427 |
AI-Generated: {'β' * int(ai_prob/5)}{'β' * (20-int(ai_prob/5))} {ai_prob}%
|
428 |
Human-Written: {'β' * int(human_prob/5)}{'β' * (20-int(human_prob/5))} {human_prob}%
|
429 |
```
|
430 |
"""
|
431 |
|
432 |
+
# Add warning for edge cases
|
433 |
+
if result['confidence'] == "Low":
|
434 |
+
bar_chart += "\nβ οΈ **Note:** Low confidence - results may be unreliable. Consider additional verification."
|
435 |
+
|
436 |
return output + bar_chart
|
437 |
|
438 |
# Create Gradio interface
|
|
|
444 |
label="Input Text"
|
445 |
),
|
446 |
outputs=gr.Markdown(label="Analysis Result"),
|
447 |
+
title="π Advanced ChatGPT & AI Text Detector",
|
448 |
description="""
|
449 |
+
This enhanced AI text detector uses state-of-the-art techniques specifically optimized for detecting ChatGPT and similar AI-generated content:
|
450 |
+
|
451 |
+
### π Key Features:
|
452 |
+
- **Multiple AI Detection Models** including OpenAI's RoBERTa detector
|
453 |
+
- **GPT-2 Perplexity Analysis** to measure text predictability
|
454 |
+
- **ChatGPT Pattern Recognition** detecting characteristic phrases and structures
|
455 |
+
- **Advanced Statistical Analysis** including Zipf's law correlation and sentence uniformity
|
456 |
+
- **Ensemble Method** combining multiple approaches for maximum accuracy
|
457 |
+
|
458 |
+
### π Usage Tips:
|
459 |
+
- Provide at least 100 words for best results
|
460 |
+
- The detector is specifically tuned for ChatGPT/GPT-4 content
|
461 |
+
- Works best with English text
|
462 |
+
- Longer texts generally yield more reliable results
|
463 |
|
464 |
+
### β οΈ Important:
|
465 |
+
This tool provides probabilistic analysis, not absolute certainty. Use it as one of multiple factors in your assessment.
|
466 |
""",
|
467 |
examples=[
|
468 |
+
["The impact of artificial intelligence on modern society is profound and multifaceted. As we navigate this technological revolution, it's important to consider both the opportunities and challenges that AI presents. On one hand, AI systems are enhancing productivity, improving healthcare outcomes, and enabling new forms of creativity. On the other hand, concerns about job displacement, privacy, and algorithmic bias require careful consideration. Moving forward, it will be crucial for policymakers, technologists, and society as a whole to work together in shaping the development and deployment of AI in ways that benefit humanity while mitigating potential risks."],
|
469 |
+
["So I was walking down the street yesterday, right? And this crazy thing happened - I mean, you won't believe it. There was this dog, just a regular golden retriever, but it was wearing these ridiculous sunglasses. Like, who puts sunglasses on a dog? Anyway, the owner was this old lady, must've been like 80 or something, and she was just chatting away on her phone, completely oblivious. The dog looked so confused! I couldn't help but laugh. Sometimes you see the weirdest stuff when you're just out and about, you know? Made my whole day, honestly. Still cracks me up thinking about it."],
|
470 |
+
["Machine learning has revolutionized data analysis. Furthermore, deep learning algorithms have shown remarkable success in computer vision tasks. Additionally, natural language processing has made significant strides. It's worth noting that transformer architectures have been particularly influential. Moreover, these developments have practical applications across industries. In conclusion, the continued advancement of ML techniques promises further innovations."]
|
471 |
],
|
472 |
theme=gr.themes.Soft(),
|
473 |
analytics_enabled=False
|
requirements.txt
CHANGED
@@ -1,8 +1,8 @@
|
|
1 |
-
gradio
|
2 |
-
torch
|
3 |
-
transformers
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
|
|
1 |
+
gradio==4.44.0
|
2 |
+
torch==2.1.0
|
3 |
+
transformers==4.35.0
|
4 |
+
scipy==1.11.4
|
5 |
+
numpy==1.24.3
|
6 |
+
huggingface-hub==0.19.4
|
7 |
+
sentencepiece==0.1.99
|
8 |
+
protobuf==3.20.3
|