linyq commited on
Commit
3552447
1 Parent(s): 5f886b3

Update app.py

Browse files

update

init gradio space

app.py ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import open_clip
3
+ import numpy as np
4
+ import torch
5
+ import pandas as pd
6
+ import os
7
+
8
+ open_clip_model, _, preprocess = open_clip.create_model_and_transforms(
9
+ 'ViT-B-32',
10
+ pretrained='./open_clip_pytorch_model.bin')
11
+ debiased_model, _, _ = open_clip.create_model_and_transforms(
12
+ 'ViT-B-32',
13
+ pretrained='./debiased_openclip.pt')
14
+ open_clip_model.eval()
15
+ debiased_model.eval()
16
+ tokenizer = open_clip.get_tokenizer('ViT-B-32')
17
+
18
+ def get_clip_scores(images, candidates, w=1):
19
+ images = images / np.sqrt(np.sum(images**2, axis=1, keepdims=True))
20
+ candidates = candidates / np.sqrt(np.sum(candidates**2, axis=1, keepdims=True))
21
+ per = w*np.clip(np.sum(images * candidates, axis=1), 0, None)
22
+ return per
23
+
24
+ def predict(text1, text2, input_img):
25
+ with torch.no_grad():
26
+ image = preprocess(input_img)
27
+ image= image.unsqueeze(0)
28
+ image_features = open_clip_model.encode_image(image)
29
+ debiased_image_features = debiased_model.encode_image(image)
30
+ texts = tokenizer([text1])
31
+ texts2 = tokenizer([text2])
32
+ text_features = open_clip_model.encode_text(texts)
33
+ debiased_text_features = debiased_model.encode_text(texts)
34
+ # print(image_features.size(), text_features.size())
35
+ # print(debiased_image_features.size(), debiased_text_features.size())
36
+ score = get_clip_scores(image_features.numpy(), text_features.numpy())
37
+ debiased_score = get_clip_scores(debiased_image_features.numpy(), debiased_text_features.numpy())
38
+ text_features2 = open_clip_model.encode_text(texts2)
39
+ debiased_text_features2 = debiased_model.encode_text(texts2)
40
+ score2 = get_clip_scores(image_features.numpy(), text_features2.numpy())
41
+ debiased_score2 = get_clip_scores(debiased_image_features.numpy(), debiased_text_features2.numpy())
42
+ print(score, score2)
43
+ data = {'label': ["OpenCLIP for text1", "Debiased CLIP for text1",
44
+ "OpenCLIP for text2", "Debiased CLIP for text2"
45
+ ],
46
+ 'score': [score[0], debiased_score[0], score2[0], debiased_score2[0]]
47
+ }
48
+ print(pd.DataFrame.from_dict(data))
49
+ return pd.DataFrame.from_dict(data)
50
+
51
+ # gradio_app = gr.Interface(
52
+ # predict,
53
+ # inputs=["text", "text",
54
+ # gr.Image(label="Select Image", sources=['upload', 'webcam'], type="pil"),
55
+ # ],
56
+ # outputs=gr.BarPlot(x="label",
57
+ # y="score",
58
+ # title="CLIP Score and Debiased Score",
59
+ # vertical=False,
60
+ # x_title=None
61
+ # ),
62
+ # title="Parrot Bias in CLIP!! (Both CLIP models are ViT-B-32)",
63
+ # )
64
+ with gr.Blocks() as demo:
65
+ gr.Markdown("# Parrot Bias in CLIP!! (Both CLIP models are ViT-B-32)")
66
+ with gr.Row():
67
+ im = gr.Image(label="Select Image",
68
+ sources=['upload', 'webcam'],
69
+ type="pil",
70
+ height=450)
71
+ with gr.Row():
72
+ txt_1 = gr.Textbox(label="Input Text")
73
+ txt_2 = gr.Textbox(label="Input Text 2")
74
+ bar = gr.BarPlot(x="label", y="score",
75
+ title="CLIP Score and Debiased Score",
76
+ vertical=False, x_title=None)
77
+ btn = gr.Button(value="Submit")
78
+ btn.click(predict, inputs=[txt_1, txt_2, im], outputs=[bar])
79
+
80
+ gr.Markdown("## Examples (from https://joaanna.github.io/disentangling_spelling_in_clip/)")
81
+ gr.Examples(
82
+ [["A mug cup", "An iPad",os.path.join(os.path.dirname(__file__), "examples/IMG_2938.jpg")],
83
+ ["A hat", "bad",os.path.join(os.path.dirname(__file__), "examples/IMG_3066.jpg")]],
84
+ [txt_1, txt_2, im],
85
+ fn=predict,
86
+ outputs=bar,
87
+ cache_examples=True,
88
+ )
89
+
90
+ if __name__ == "__main__":
91
+ demo.launch(show_api=False,share=True)
debiased_openclip.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:275df1f6c23201b78f9cce5a4e319182a403364772a0ce6c9be5895a04070186
3
+ size 1815703758
open_clip_pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1bd3c7172de5b207ceac554f5ab5266166f3b9baccc9af5989bc801016d080ad
3
+ size 605219813
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ open_clip_torch
2
+ gradio