Spaces:
Running
on
Zero
Running
on
Zero
Upload 2 files
Browse files- app.py +47 -25
- dataset.py +1 -1
app.py
CHANGED
@@ -102,18 +102,27 @@ def detect_on_gpu(dataset):
|
|
102 |
return result
|
103 |
|
104 |
# 修改音频伪造检测主函数
|
105 |
-
def audio_deepfake_detection(demonstrations, query_audio_path):
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
114 |
|
115 |
# 数据集处理
|
116 |
-
audio_dataset = dataset.DemoDataset(
|
117 |
|
118 |
# 调用 GPU 检测函数
|
119 |
result = detect_on_gpu(audio_dataset)
|
@@ -125,28 +134,41 @@ def audio_deepfake_detection(demonstrations, query_audio_path):
|
|
125 |
|
126 |
# Gradio 界面
|
127 |
def gradio_ui():
|
128 |
-
def detection_wrapper(demonstration_audio1, label1, demonstration_audio2, label2, demonstration_audio3, label3, query_audio):
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
135 |
|
136 |
interface = gr.Interface(
|
137 |
fn=detection_wrapper,
|
138 |
inputs=[
|
139 |
-
|
140 |
-
# gr.Dropdown(choices=["bonafide", "spoof"], value="bonafide", label="Label 1"),
|
141 |
-
# gr.Audio(sources=["upload"], type="filepath", label="Demonstration Audio 2"),
|
142 |
-
# gr.Dropdown(choices=["bonafide", "spoof"], value="bonafide", label="Label 2"),
|
143 |
-
# gr.Audio(sources=["upload"], type="filepath", label="Demonstration Audio 3"),
|
144 |
-
# gr.Dropdown(choices=["bonafide", "spoof"], value="bonafide", label="Label 3"),
|
145 |
-
gr.Audio(sources=["upload"], type="filepath", label="Query Audio (Audio for Detection)")
|
146 |
],
|
147 |
outputs=gr.JSON(label="Detection Results"),
|
148 |
title="Audio Deepfake Detection System",
|
149 |
-
# description="Upload demonstration audios and a query audio to detect whether the query is AI-generated.",
|
150 |
description="Upload a test audio to detect whether the audio is AI-generated.",
|
151 |
)
|
152 |
return interface
|
|
|
102 |
return result
|
103 |
|
104 |
# 修改音频伪造检测主函数
|
105 |
+
# def audio_deepfake_detection(demonstrations, query_audio_path):
|
106 |
+
# demonstration_paths = [audio[0] for audio in demonstrations if audio[0] is not None]
|
107 |
+
# demonstration_labels = [audio[1] for audio in demonstrations if audio[1] is not None]
|
108 |
+
# if len(demonstration_paths) != len(demonstration_labels):
|
109 |
+
# demonstration_labels = demonstration_labels[:len(demonstration_paths)]
|
110 |
+
|
111 |
+
# # 数据集处理
|
112 |
+
# audio_dataset = dataset.DemoDataset(demonstration_paths, demonstration_labels, query_audio_path)
|
113 |
+
|
114 |
+
# # 调用 GPU 检测函数
|
115 |
+
# result = detect_on_gpu(audio_dataset)
|
116 |
+
|
117 |
+
# return {
|
118 |
+
# "Is AI Generated": result["is_fake"],
|
119 |
+
# "Confidence": f"{100*result['confidence']:.2f}%"
|
120 |
+
# }
|
121 |
+
# 0 demonstrations
|
122 |
+
def audio_deepfake_detection(query_audio_path):
|
123 |
|
124 |
# 数据集处理
|
125 |
+
audio_dataset = dataset.DemoDataset([], [], query_audio_path)
|
126 |
|
127 |
# 调用 GPU 检测函数
|
128 |
result = detect_on_gpu(audio_dataset)
|
|
|
134 |
|
135 |
# Gradio 界面
|
136 |
def gradio_ui():
|
137 |
+
# def detection_wrapper(demonstration_audio1, label1, demonstration_audio2, label2, demonstration_audio3, label3, query_audio):
|
138 |
+
# demonstrations = [
|
139 |
+
# (demonstration_audio1, label1),
|
140 |
+
# (demonstration_audio2, label2),
|
141 |
+
# (demonstration_audio3, label3),
|
142 |
+
# ]
|
143 |
+
# return audio_deepfake_detection(demonstrations,query_audio)
|
144 |
+
|
145 |
+
# interface = gr.Interface(
|
146 |
+
# fn=detection_wrapper,
|
147 |
+
# inputs=[
|
148 |
+
# gr.Audio(sources=["upload"], type="filepath", label="Demonstration Audio 1"),
|
149 |
+
# gr.Dropdown(choices=["bonafide", "spoof"], value="bonafide", label="Label 1"),
|
150 |
+
# gr.Audio(sources=["upload"], type="filepath", label="Demonstration Audio 2"),
|
151 |
+
# gr.Dropdown(choices=["bonafide", "spoof"], value="bonafide", label="Label 2"),
|
152 |
+
# gr.Audio(sources=["upload"], type="filepath", label="Demonstration Audio 3"),
|
153 |
+
# gr.Dropdown(choices=["bonafide", "spoof"], value="bonafide", label="Label 3"),
|
154 |
+
# gr.Audio(sources=["upload"], type="filepath", label="Query Audio (Audio for Detection)")
|
155 |
+
# ],
|
156 |
+
# outputs=gr.JSON(label="Detection Results"),
|
157 |
+
# title="Audio Deepfake Detection System",
|
158 |
+
# description="Upload demonstration audios and a query audio to detect whether the query is AI-generated.",
|
159 |
+
# )
|
160 |
+
# return interface
|
161 |
+
|
162 |
+
def detection_wrapper(query_audio):
|
163 |
+
return audio_deepfake_detection(query_audio)
|
164 |
|
165 |
interface = gr.Interface(
|
166 |
fn=detection_wrapper,
|
167 |
inputs=[
|
168 |
+
gr.Audio(sources=["upload"], type="filepath", label="Test Audio")
|
|
|
|
|
|
|
|
|
|
|
|
|
169 |
],
|
170 |
outputs=gr.JSON(label="Detection Results"),
|
171 |
title="Audio Deepfake Detection System",
|
|
|
172 |
description="Upload a test audio to detect whether the audio is AI-generated.",
|
173 |
)
|
174 |
return interface
|
dataset.py
CHANGED
@@ -6,7 +6,7 @@ import librosa
|
|
6 |
import numpy as np
|
7 |
|
8 |
class DemoDataset(Dataset):
|
9 |
-
def __init__(self, demonstration_paths
|
10 |
self.sample_rate = sample_rate
|
11 |
self.query_path = query_path
|
12 |
|
|
|
6 |
import numpy as np
|
7 |
|
8 |
class DemoDataset(Dataset):
|
9 |
+
def __init__(self, demonstration_paths, demonstration_labels, query_path, sample_rate=16000):
|
10 |
self.sample_rate = sample_rate
|
11 |
self.query_path = query_path
|
12 |
|