GVAmaresh commited on
Commit
1a7861d
·
1 Parent(s): c666a1c

dev check working

Browse files
Files changed (1) hide show
  1. app.py +143 -0
app.py CHANGED
@@ -6,3 +6,146 @@ app = FastAPI()
6
  @app.get("/")
7
  def greet_json():
8
  return {"Hello": "World!"}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
  @app.get("/")
7
  def greet_json():
8
  return {"Hello": "World!"}
9
+
10
+
11
+ #--------------------------------------------------------------------------------------------------------------------
12
+
13
+ import os
14
+ import numpy as np
15
+ import tensorflow as tf
16
+ import tensorflow
17
+ import librosa
18
+ import matplotlib.pyplot as plt
19
+ # import gradio as gr
20
+
21
+ import os
22
+ os.environ["TORCH_HOME"] = "/tmp/torch_cache"
23
+
24
+
25
+ from tensorflow.keras.applications.vgg16 import VGG16, preprocess_input
26
+ from tensorflow.keras.models import Model
27
+ from tensorflow.keras.layers import Dense, GlobalAveragePooling2D, Dropout
28
+ from tensorflow.keras.optimizers import Adam
29
+ from transformers import pipeline
30
+
31
+ class UnifiedDeepfakeDetector:
32
+ def __init__(self):
33
+ self.input_shape = (224, 224, 3)
34
+ self.vgg_model = self.build_vgg16_model()
35
+ self.dense_model = tf.keras.models.load_model('deepfake_detection_model.h5')
36
+ self.cnn_model = tf.keras.models.load_model('audio_deepfake_detection_model_cnn.h5')
37
+ self.melody_machine = pipeline(model="MelodyMachine/Deepfake-audio-detection-V2")
38
+
39
+ def build_vgg16_model(self):
40
+ base_model = VGG16(weights='imagenet', include_top=False, input_shape=self.input_shape)
41
+ for layer in base_model.layers:
42
+ layer.trainable = False
43
+
44
+ x = base_model.output
45
+ x = GlobalAveragePooling2D()(x)
46
+ x = Dense(512, activation='relu')(x)
47
+ x = Dropout(0.5)(x)
48
+ x = Dense(256, activation='relu')(x)
49
+ x = Dropout(0.3)(x)
50
+ output = Dense(1, activation='sigmoid')(x)
51
+
52
+ model = Model(inputs=base_model.input, outputs=output)
53
+ model.compile(optimizer=Adam(learning_rate=0.0001),
54
+ loss='binary_crossentropy',
55
+ metrics=['accuracy'])
56
+ return model
57
+
58
+ def audio_to_spectrogram(self, file_path, plot=False):
59
+ try:
60
+ audio, sr = librosa.load(file_path, duration=5.0, sr=22050)
61
+ spectrogram = librosa.feature.melspectrogram(y=audio, sr=sr, n_mels=224, fmax=8000)
62
+ spectrogram_db = librosa.power_to_db(spectrogram, ref=np.max)
63
+
64
+ if plot:
65
+ plt.figure(figsize=(12, 6))
66
+ librosa.display.specshow(spectrogram_db, y_axis='mel', x_axis='time', cmap='viridis')
67
+ plt.colorbar(format='%+2.0f dB')
68
+ plt.title('Mel Spectrogram Analysis')
69
+ plot_path = 'spectrogram_plot.png'
70
+ plt.savefig(plot_path, dpi=300, bbox_inches='tight')
71
+ plt.close()
72
+ return plot_path
73
+
74
+ spectrogram_norm = (spectrogram_db - spectrogram_db.min()) / (spectrogram_db.max() - spectrogram_db.min())
75
+ spectrogram_rgb = np.stack([spectrogram_norm]*3, axis=-1)
76
+ spectrogram_resized = tf.image.resize(spectrogram_rgb, (224, 224))
77
+ return preprocess_input(spectrogram_resized * 255)
78
+
79
+ except Exception as e:
80
+ print(f"Spectrogram error: {e}")
81
+ return None
82
+
83
+ def analyze_audio_rf(self, audio_path, model_choice="all"):
84
+ results = {}
85
+ plots = {}
86
+ r = []
87
+ audio_features = {}
88
+
89
+ try:
90
+ # Load audio and extract basic features
91
+ audio, sr = librosa.load(audio_path, res_type="kaiser_fast")
92
+ audio_features = {
93
+ "sample_rate": sr,
94
+ "duration": librosa.get_duration(y=audio, sr=sr),
95
+ "rms_energy": float(np.mean(librosa.feature.rms(y=audio))),
96
+ "zero_crossing_rate": float(np.mean(librosa.feature.zero_crossing_rate(y=audio)))
97
+ }
98
+
99
+ # VGG16 Analysis
100
+ if model_choice in ["VGG16", "all"]:
101
+ spec = self.audio_to_spectrogram(audio_path)
102
+ if spec is not None:
103
+ pred = self.vgg_model.predict(np.expand_dims(spec, axis=0))[0][0]
104
+ results["VGG16"] = {
105
+ "prediction": "FAKE" if pred > 0.5 else "REAL",
106
+ "confidence": float(pred if pred > 0.5 else 1 - pred),
107
+ "raw_score": float(pred)
108
+ }
109
+ plots["spectrogram"] = self.audio_to_spectrogram(audio_path, plot=True)
110
+ r.append("FAKE" if pred > 0.5 else "REAL")
111
+
112
+ # Dense Model Analysis
113
+ if model_choice in ["Dense", "all"]:
114
+ mfcc = librosa.feature.mfcc(y=audio, sr=sr, n_mfcc=40)
115
+ mfcc_scaled = np.mean(mfcc.T, axis=0).reshape(1, -1)
116
+ pred = self.dense_model.predict(mfcc_scaled)
117
+ results["Dense"] = {
118
+ "prediction": "FAKE" if np.argmax(pred[0]) == 0 else "REAL",
119
+ "confidence": float(np.max(pred[0])),
120
+ "raw_scores": pred[0].tolist()
121
+ }
122
+ r.append("FAKE" if np.argmax(pred[0]) == 0 else "REAL")
123
+
124
+ # CNN Model Analysis
125
+ if model_choice in ["CNN", "all"]:
126
+ mfcc = librosa.feature.mfcc(y=audio, sr=sr, n_mfcc=40)
127
+ mfcc_scaled = np.mean(mfcc.T, axis=0).reshape(1, 40, 1, 1)
128
+ pred = self.cnn_model.predict(mfcc_scaled)
129
+ results["CNN"] = {
130
+ "prediction": "FAKE" if np.argmax(pred[0]) == 0 else "REAL",
131
+ "confidence": float(np.max(pred[0])),
132
+ "raw_scores": pred[0].tolist()
133
+ }
134
+ r.append("FAKE" if np.argmax(pred[0]) == 0 else "REAL")
135
+
136
+ # Melody Machine Analysis
137
+ if model_choice in ["MelodyMachine", "all"]:
138
+ result = self.melody_machine(audio_path)
139
+ best_pred = max(result, key=lambda x: x['score'])
140
+ results["MelodyMachine"] = {
141
+ "prediction": best_pred['label'].upper(),
142
+ "confidence": float(best_pred['score']),
143
+ "all_predictions": result
144
+ }
145
+ r.append(best_pred['label'].upper())
146
+
147
+ return r
148
+
149
+ except Exception as e:
150
+ print(f"Analysis error: {e}")
151
+ return None, None, None