Spaces:
Build error
Build error
Commit
•
7551cd5
1
Parent(s):
028a426
update app
Browse files- app.py +16 -14
- examples/03bSnISJMiM_1.mp4 +0 -0
- examples/03bSnISJMiM_5.mp4 +0 -0
- requirements.txt +2 -1
app.py
CHANGED
@@ -29,10 +29,7 @@ state_dict = torch.load(ckpts[0], map_location=torch.device(device))['state_dict
|
|
29 |
net = Model_LA(args, len(token_to_ix), pretrained_emb).to(device)
|
30 |
net.load_state_dict(state_dict)
|
31 |
|
32 |
-
|
33 |
-
|
34 |
def inference(video_path, text):
|
35 |
-
|
36 |
# data preprocessing
|
37 |
# text
|
38 |
def clean(w):
|
@@ -54,9 +51,18 @@ def inference(video_path, text):
|
|
54 |
A = pad_feature(mel, a_max_len)
|
55 |
V = pad_feature(mel, v_max_len)
|
56 |
# print shapes
|
57 |
-
print("Processed text shape
|
58 |
-
print("Processed audio shape
|
59 |
-
print("Processed video shape
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
60 |
return out
|
61 |
|
62 |
|
@@ -69,18 +75,14 @@ description="This is a demo implementation of EfficientNetV2 Deepfakes Image Det
|
|
69 |
"
|
70 |
|
71 |
examples = [
|
72 |
-
['
|
73 |
-
['
|
74 |
-
['Video3-fake-3-ff.mp4'],
|
75 |
-
['Video8-real-3-ff.mp4'],
|
76 |
-
['real-1.mp4'],
|
77 |
-
['fake-1.mp4'],
|
78 |
]
|
79 |
|
80 |
gr.Interface(inference,
|
81 |
inputs = ["video", "text"],
|
82 |
-
outputs=["
|
83 |
title=title,
|
84 |
description=description,
|
85 |
examples=examples
|
86 |
-
).launch()
|
|
|
29 |
net = Model_LA(args, len(token_to_ix), pretrained_emb).to(device)
|
30 |
net.load_state_dict(state_dict)
|
31 |
|
|
|
|
|
32 |
def inference(video_path, text):
|
|
|
33 |
# data preprocessing
|
34 |
# text
|
35 |
def clean(w):
|
|
|
51 |
A = pad_feature(mel, a_max_len)
|
52 |
V = pad_feature(mel, v_max_len)
|
53 |
# print shapes
|
54 |
+
print(f"Processed text shape from {len(s)} to {L.shape}")
|
55 |
+
print(f"Processed audio shape from {mel.shape} to {A.shape}")
|
56 |
+
print(f"Processed video shape from {mel.shape} to {V.shape}")
|
57 |
+
|
58 |
+
net.train(False)
|
59 |
+
x = np.expand_dims(L,axis=0)
|
60 |
+
y = np.expand_dims(A,axis=0)
|
61 |
+
z = np.expand_dims(V,axis=0)
|
62 |
+
x, y, z = torch.from_numpy(x).to(device), torch.from_numpy(y).to(device), torch.from_numpy(z).float().to(device)
|
63 |
+
pred = net(x, y, z).cpu().data.numpy()
|
64 |
+
label_to_ix = ['happy', 'sad', 'angry', 'fear', 'disgust', 'surprise']
|
65 |
+
result_dict = dict(zip(label_to_ix, pred[0]))
|
66 |
return out
|
67 |
|
68 |
|
|
|
75 |
"
|
76 |
|
77 |
examples = [
|
78 |
+
['examples/03bSnISJMiM_1.mp4', "IT WAS REALLY GOOD "],
|
79 |
+
['examples/03bSnISJMiM_5.mp4', "AND THEY SHOULDVE I GUESS "],
|
|
|
|
|
|
|
|
|
80 |
]
|
81 |
|
82 |
gr.Interface(inference,
|
83 |
inputs = ["video", "text"],
|
84 |
+
outputs=["label"],
|
85 |
title=title,
|
86 |
description=description,
|
87 |
examples=examples
|
88 |
+
).launch(debug=True)
|
examples/03bSnISJMiM_1.mp4
ADDED
Binary file (193 kB). View file
|
|
examples/03bSnISJMiM_5.mp4
ADDED
Binary file (62.1 kB). View file
|
|
requirements.txt
CHANGED
@@ -1 +1,2 @@
|
|
1 |
-
https://github.com/explosion/spacy-models/releases/download/en_vectors_web_lg-2.1.0/en_vectors_web_lg-2.1.0.tar.gz
|
|
|
|
1 |
+
https://github.com/explosion/spacy-models/releases/download/en_vectors_web_lg-2.1.0/en_vectors_web_lg-2.1.0.tar.gz
|
2 |
+
torch==1.9.1
|