Spaces:
Sleeping
Sleeping
Kieran Fraser
commited on
Commit
·
4ae31ed
1
Parent(s):
7dd25bd
Updated poisoning attack with clean prediction gallery
Browse filesSigned-off-by: Kieran Fraser <[email protected]>
app.py
CHANGED
@@ -11,10 +11,7 @@ import gradio as gr
|
|
11 |
import numpy as np
|
12 |
from carbon_theme import Carbon
|
13 |
|
14 |
-
import os
|
15 |
-
|
16 |
import numpy as np
|
17 |
-
import matplotlib.pyplot as plt
|
18 |
import torch
|
19 |
import transformers
|
20 |
|
@@ -164,6 +161,7 @@ def clf_poison_evaluate(*args):
|
|
164 |
target_class = args[3]
|
165 |
data_type = args[-1]
|
166 |
|
|
|
167 |
if model_type == "Example":
|
168 |
model = transformers.AutoModelForImageClassification.from_pretrained(
|
169 |
'facebook/deit-tiny-distilled-patch16-224',
|
@@ -235,6 +233,7 @@ def clf_poison_evaluate(*args):
|
|
235 |
)
|
236 |
backdoor = PoisoningAttackBackdoor(poison_func)
|
237 |
source_class = 0
|
|
|
238 |
poison_percent = 0.5
|
239 |
|
240 |
x_poison = np.copy(x_subset)
|
@@ -259,6 +258,10 @@ def clf_poison_evaluate(*args):
|
|
259 |
clean_preds = np.argmax(outputs, axis=1)
|
260 |
clean_acc = np.mean(clean_preds == clean_y)
|
261 |
|
|
|
|
|
|
|
|
|
262 |
poison_x = x_poison[is_poison]
|
263 |
poison_y = y_poison[is_poison]
|
264 |
|
@@ -269,8 +272,9 @@ def clf_poison_evaluate(*args):
|
|
269 |
poison_out = []
|
270 |
for i, im in enumerate(poison_x):
|
271 |
poison_out.append( (im.transpose(1,2,0), label_names[poison_preds[i]]) )
|
|
|
272 |
|
273 |
-
return poison_out, clean_acc, poison_acc
|
274 |
|
275 |
|
276 |
def show_params(type):
|
@@ -504,25 +508,36 @@ with gr.Blocks(css=css, theme=gr.themes.Base()) as demo:
|
|
504 |
with gr.Row():
|
505 |
with gr.Column(scale=1):
|
506 |
attack = gr.Textbox(visible=True, value="Backdoor", label="Attack", interactive=False)
|
507 |
-
target_class = gr.
|
508 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
509 |
trigger_image = gr.Image(label="Trigger Image", value="./baby-on-board.png")
|
510 |
eval_btn_patch = gr.Button("Evaluate")
|
511 |
with gr.Column(scale=2):
|
512 |
-
|
513 |
-
with gr.Column(scale=2):
|
514 |
clean_accuracy = gr.Number(label="Clean Accuracy", precision=2)
|
|
|
|
|
515 |
poison_success = gr.Number(label="Poison Success", precision=2)
|
516 |
|
517 |
eval_btn_patch.click(clf_poison_evaluate, inputs=[attack, model_type, trigger_image, target_class, data_type],
|
518 |
-
outputs=[poison_gallery, clean_accuracy, poison_success])
|
519 |
|
520 |
if __name__ == "__main__":
|
521 |
|
522 |
-
#
|
523 |
'''demo.launch(show_api=False, debug=True, share=False,
|
524 |
server_name="0.0.0.0",
|
525 |
server_port=7777,
|
526 |
ssl_verify=False,
|
527 |
max_threads=20)'''
|
|
|
|
|
528 |
demo.launch(share=True, ssl_verify=False)
|
|
|
11 |
import numpy as np
|
12 |
from carbon_theme import Carbon
|
13 |
|
|
|
|
|
14 |
import numpy as np
|
|
|
15 |
import torch
|
16 |
import transformers
|
17 |
|
|
|
161 |
target_class = args[3]
|
162 |
data_type = args[-1]
|
163 |
|
164 |
+
|
165 |
if model_type == "Example":
|
166 |
model = transformers.AutoModelForImageClassification.from_pretrained(
|
167 |
'facebook/deit-tiny-distilled-patch16-224',
|
|
|
233 |
)
|
234 |
backdoor = PoisoningAttackBackdoor(poison_func)
|
235 |
source_class = 0
|
236 |
+
target_class = label_names.index(target_class)
|
237 |
poison_percent = 0.5
|
238 |
|
239 |
x_poison = np.copy(x_subset)
|
|
|
258 |
clean_preds = np.argmax(outputs, axis=1)
|
259 |
clean_acc = np.mean(clean_preds == clean_y)
|
260 |
|
261 |
+
clean_out = []
|
262 |
+
for i, im in enumerate(clean_x):
|
263 |
+
clean_out.append( (im.transpose(1,2,0), label_names[clean_preds[i]]) )
|
264 |
+
|
265 |
poison_x = x_poison[is_poison]
|
266 |
poison_y = y_poison[is_poison]
|
267 |
|
|
|
272 |
poison_out = []
|
273 |
for i, im in enumerate(poison_x):
|
274 |
poison_out.append( (im.transpose(1,2,0), label_names[poison_preds[i]]) )
|
275 |
+
|
276 |
|
277 |
+
return clean_out, poison_out, clean_acc, poison_acc
|
278 |
|
279 |
|
280 |
def show_params(type):
|
|
|
508 |
with gr.Row():
|
509 |
with gr.Column(scale=1):
|
510 |
attack = gr.Textbox(visible=True, value="Backdoor", label="Attack", interactive=False)
|
511 |
+
target_class = gr.Radio(label="Target class", info="The class you wish to force the model to predict.",
|
512 |
+
choices=['dog',
|
513 |
+
'cassette player',
|
514 |
+
'chainsaw',
|
515 |
+
'church',
|
516 |
+
'french horn',
|
517 |
+
'garbage truck',
|
518 |
+
'gas pump',
|
519 |
+
'golf ball',
|
520 |
+
'parachutte',], value='dog')
|
521 |
trigger_image = gr.Image(label="Trigger Image", value="./baby-on-board.png")
|
522 |
eval_btn_patch = gr.Button("Evaluate")
|
523 |
with gr.Column(scale=2):
|
524 |
+
clean_gallery = gr.Gallery(label="Clean", preview=False, show_download_button=True)
|
|
|
525 |
clean_accuracy = gr.Number(label="Clean Accuracy", precision=2)
|
526 |
+
with gr.Column(scale=2):
|
527 |
+
poison_gallery = gr.Gallery(label="Poisoned", preview=False, show_download_button=True)
|
528 |
poison_success = gr.Number(label="Poison Success", precision=2)
|
529 |
|
530 |
eval_btn_patch.click(clf_poison_evaluate, inputs=[attack, model_type, trigger_image, target_class, data_type],
|
531 |
+
outputs=[clean_gallery, poison_gallery, clean_accuracy, poison_success])
|
532 |
|
533 |
if __name__ == "__main__":
|
534 |
|
535 |
+
# For development
|
536 |
'''demo.launch(show_api=False, debug=True, share=False,
|
537 |
server_name="0.0.0.0",
|
538 |
server_port=7777,
|
539 |
ssl_verify=False,
|
540 |
max_threads=20)'''
|
541 |
+
|
542 |
+
# For deployment
|
543 |
demo.launch(share=True, ssl_verify=False)
|