Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -38,7 +38,7 @@ def detect_on_gpu(dataset):
|
|
38 |
print(f"正在加载模型权重: {checkpoint_path}")
|
39 |
checkpoint = torch.load(checkpoint_path, map_location=device)
|
40 |
model_state_dict = checkpoint['model_state_dict']
|
41 |
-
threshold = 0.
|
42 |
print(f"检测阈值设置为: {threshold}")
|
43 |
|
44 |
# 处理模型状态字典的 key
|
@@ -126,10 +126,12 @@ def audio_deepfake_detection(query_audio_path):
|
|
126 |
|
127 |
# 调用 GPU 检测函数
|
128 |
result = detect_on_gpu(audio_dataset)
|
|
|
|
|
129 |
|
130 |
return {
|
131 |
-
"Is AI Generated":
|
132 |
-
"Confidence":
|
133 |
}
|
134 |
|
135 |
# Gradio 界面
|
|
|
38 |
print(f"正在加载模型权重: {checkpoint_path}")
|
39 |
checkpoint = torch.load(checkpoint_path, map_location=device)
|
40 |
model_state_dict = checkpoint['model_state_dict']
|
41 |
+
threshold = 0.8
|
42 |
print(f"检测阈值设置为: {threshold}")
|
43 |
|
44 |
# 处理模型状态字典的 key
|
|
|
126 |
|
127 |
# 调用 GPU 检测函数
|
128 |
result = detect_on_gpu(audio_dataset)
|
129 |
+
is_fake = "是/Yes" if result["is_fake"] else "否/No"
|
130 |
+
confidence = f"{100*result['confidence']:.2f}%" if result["is_fake"] else f"{100*(1-result['confidence']):.2f}%"
|
131 |
|
132 |
return {
|
133 |
+
"是否为AI生成/Is AI Generated": is_fake,
|
134 |
+
"检测可信度/Confidence": confidence
|
135 |
}
|
136 |
|
137 |
# Gradio 界面
|