File size: 7,827 Bytes
593d914
 
 
 
 
 
 
 
 
 
 
105fcc0
 
 
 
 
 
593d914
 
 
 
 
 
 
 
 
 
 
 
 
b3d2735
105fcc0
593d914
 
 
 
 
 
 
 
 
 
105fcc0
 
 
 
593d914
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
105fcc0
593d914
 
105fcc0
 
 
 
593d914
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
105fcc0
593d914
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b3d2735
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
# -*- coding: utf-8 -*-
"""evaluate_gan_gradio.ipynb

Automatically generated by Colaboratory.

Original file is located at
    https://colab.research.google.com/drive/1ckZU76dq3XWcpa5PpQF8a6qJwkTttg8v

# โš™๏ธ Setup
"""

#!pip install gradio -q
#!pip install wget -q
#!pip install tensorflow_addons -q

"""## Fix random seeds"""

SEED = 11
import os
os.environ['PYTHONHASHSEED']=str(SEED)
import random
import numpy as np
import tensorflow as tf

random.seed(SEED)
np.random.seed(SEED)
tf.random.set_seed(SEED)

"""## Imports"""

import gradio as gr
import wget
import pandas as pd
import gdown
from zipfile import ZipFile

"""## Download CelebA attributes

We'll use face images from the CelebA dataset, resized to 64x64.
"""

#Download labels from public github, they have been processed in a 0,1 csv file
os.mkdir("/content/celeba_gan")
wget.download(url="https://github.com/buoi/conditional-face-GAN/blob/main/list_attr_celeba01.csv.zip?raw=true", out="/content/celeba_gan/list_attr_celeba01.csv.zip")
import shutil
shutil.unpack_archive(filename="/content/celeba_gan/list_attr_celeba01.csv.zip", extract_dir="/content/celeba_gan")

"""## Dataset preprocessing functions"""

# image utils functions

def conv_range(in_range=(-1,1), out_range=(0,255)):
    """ Returns range conversion function"""

    # compute means and spans once
    in_mean, out_mean = np.mean(in_range), np.mean(out_range)
    in_span, out_span = np.ptp(in_range), np.ptp(out_range)

    # return function
    def convert_img_range(in_img):
        out_img = (in_img - in_mean) / in_span
        out_img = out_img * out_span + out_mean
        return out_img

    return convert_img_range

def crop128(img):
    #return img[:, 77:141, 57:121]# 64,64 center crop
    return img[:, 45:173, 25:153] #ย 128,128 center crop

def resize64(img):
    return tf.image.resize(img, (64,64), antialias=True, method='bilinear')

"""# ๐Ÿ“‰ Evaluate model

## Load trained GAN
"""

from collections import namedtuple
ModelEntry = namedtuple('ModelEntry', '''entity, resume_id, model_name, exp_name, best_epoch, expected_fid, expected_f1, expected_acc, epoch_range, exp_avg_fid, exp_avg_f1, exp_avg_acc''')

dcgan = ModelEntry('buio','t1qyadzp', 'dcgan','dcgan',
                   'v24',25.64, 0, 0, (16,28), 24.29, 0,0)

acgan2 = ModelEntry('buio','rn8xslip','acgan2_BNstdev','acgan2_BNstdev', 
                    'v22', 29.05, 0.9182, 0.9295, (20,31), 24.79, 0.918, 0.926)

acgan10 = ModelEntry('buio','3ja6uvac','acgan10_nonseparBNstdev_split','acgan10_nonseparBNstdev_split',
                     'v24', 26.89, 0.859, 0.785, (18,30), 24.59, 0.789,0.858)

acgan40 = ModelEntry('buio','2ev65fpt','acgan40_BNstdev','acgan40_BNstdev',
                     'v15', 28.23, 0.430, 0.842, (15,25), 27.72, 04.6, 0.851)


acgan2_hd = ModelEntry('buio','6km6fdgr','acgan2_BNstdev_218x178','acgan2_BNstdev_218x178',
                     'v11', 0,0,0 ,(0,0), 0, 0, 0)

acgan10_hd = ModelEntry('buio','3v366skw','acgan40_BNstdev_218x178','acgan40_BNstdev_218x178',
                     'v14', 0,0,0, (0,0),0, 0, 0)

acgan40_hd = ModelEntry('buio','booicugb','acgan10_nonseparBNstdev_split_299_218x178','acgan10_nonseparBNstdev_split_299_218x178',
                     'v14', 52.9, 0.410, 0.834, (12,15), 0, 0, 0)



#1cr1a5w4 SAGAN_3 v31 buianifolli
#2o3z6bqb SAGAN_5 v17 buianifolli
#zscel8bz SAGAN_6 v29 buianifolli

#wandb artifacts
#sagan40 v18

#keras_metadata_url = "https://api.wandb.ai/artifactsV2/gcp-us/buianifolli/QXJ0aWZhY3Q6Mjg3MzA4NTY=/5f09f68e9bb5b09efbc37ad76cdcdbb0"
#saved_model_url = "https://api.wandb.ai/artifactsV2/gcp-us/buianifolli/QXJ0aWZhY3Q6Mjg3NDY1OTU=/2676cd88ef1866d6e572916e413a933e"
#variables_url = "https://api.wandb.ai/artifactsV2/gcp-us/buianifolli/QXJ0aWZhY3Q6Mjg3NDY1OTU=/5cab1cb7351f0732ea137fb2d2e0d4ec"
#index_url = "https://api.wandb.ai/artifactsV2/gcp-us/buianifolli/QXJ0aWZhY3Q6Mjg3NDY1OTU=/480b55762c3358f868b8cce53984736b"

#sagan10 v16

keras_metadata_url = "https://api.wandb.ai/artifactsV2/gcp-us/buianifolli/QXJ0aWZhY3Q6MjYxMDQwMDE=/392d036bf91d3648eb5a2fa74c1eb716"
saved_model_url = "https://api.wandb.ai/artifactsV2/gcp-us/buianifolli/QXJ0aWZhY3Q6MjYxMzQ0Mjg=/a5f8608efcc5dafbe780babcffbc79a9"
variables_url = "https://api.wandb.ai/artifactsV2/gcp-us/buianifolli/QXJ0aWZhY3Q6MjYxMzQ0Mjg=/a62bf0c4bf7047c0a31df7d2cfdb54f0"
index_url = "https://api.wandb.ai/artifactsV2/gcp-us/buianifolli/QXJ0aWZhY3Q6MjYxMzQ0Mjg=/de6539a7f0909d1dafa89571c7df43d1"

#download model
gan_path = "/content/gan_model/"
try:
    os.remove(gan_path+"keras_metadata.pb")
    os.remove(gan_path+"saved_model.pb")
    os.remove(gan_path+"variables/variables.data-00000-of-00001")
    os.remove(gan_path+"variables/variables.index")
except FileNotFoundError:
    pass
os.makedirs(gan_path,exist_ok =True)
os.makedirs(gan_path+"/variables",exist_ok =True)



import wget
wget.download(keras_metadata_url, gan_path+"keras_metadata.pb",)
wget.download(saved_model_url, gan_path+"saved_model.pb")
wget.download(variables_url, gan_path+"variables/variables.data-00000-of-00001")
wget.download(index_url, gan_path+"variables/variables.index")

gan = tf.keras.models.load_model(gan_path)

IMAGE_RANGE='11'
IMAGE_SIZE = gan.discriminator.input_shape[1]
if IMAGE_SIZE == 64:
    IMAGE_SHAPE = (64,64,3)
elif IMAGE_SIZE == 218:
    IMAGE_SHAPE = (218,178,3)

try:
    LATENT_DIM = gan.generator.input_shape[0][1]
    N_ATTRIBUTES = gan.generator.input_shape[1][1]
except TypeError:
    LATENT_DIM = gan.generator.input_shape[1]
    N_ATTRIBUTES =0

"""## ๐Ÿ’พ Dataset"""

#@title Select Attributes {form-width: "50%", display-mode: "both" }

#NUMBER_OF_ATTRIBUTES = "10" #@param [0, 2, 10, 12, 40]
#N_ATTRIBUTES = int(NUMBER_OF_ATTRIBUTES)

IMAGE_RANGE = '11'

BATCH_SIZE =  64 #@param {type: "number"}
if N_ATTRIBUTES == 2:
    LABELS = ["Male", "Smiling"]

elif N_ATTRIBUTES == 10:
    LABELS = [
          "Mouth_Slightly_Open", "Wearing_Lipstick", "High_Cheekbones", "Male", "Smiling", 
          "Heavy_Makeup", "Wavy_Hair", "Oval_Face", "Pointy_Nose", "Arched_Eyebrows"]

elif N_ATTRIBUTES == 12:
    LABELS = ['Wearing_Lipstick','Mouth_Slightly_Open','Male','Smiling',
              'High_Cheekbones','Heavy_Makeup','Attractive','Young',
              'No_Beard','Black_Hair','Arched_Eyebrows','Big_Nose']
elif N_ATTRIBUTES == 40:
    LABELS = [
            '5_o_Clock_Shadow', 'Arched_Eyebrows', 'Attractive',
            'Bags_Under_Eyes', 'Bald', 'Bangs', 'Big_Lips', 'Big_Nose',
            'Black_Hair', 'Blond_Hair', 'Blurry', 'Brown_Hair', 'Bushy_Eyebrows',
            'Chubby', 'Double_Chin', 'Eyeglasses', 'Goatee', 'Gray_Hair',
            'Heavy_Makeup', 'High_Cheekbones', 'Male', 'Mouth_Slightly_Open',
            'Mustache', 'Narrow_Eyes', 'No_Beard', 'Oval_Face', 'Pale_Skin',
            'Pointy_Nose', 'Receding_Hairline', 'Rosy_Cheeks', 'Sideburns',
            'Smiling', 'Straight_Hair', 'Wavy_Hair', 'Wearing_Earrings',
            'Wearing_Hat', 'Wearing_Lipstick', 'Wearing_Necklace',
            'Wearing_Necktie', 'Young']


else:
    LABELS = ["Male", "Smiling"]# just for dataset creation
 

# Take labels and a list of image locations in memory
df = pd.read_csv(r"/content/celeba_gan/list_attr_celeba01.csv")
attr_list = df[LABELS].values.tolist()

def gen_img(attributes):

    attr = np.zeros((1,N_ATTRIBUTES))
    for a in attributes:
        attr[0,int(a)] = 1
    num_img = 1
    random_latent_vectors = tf.random.normal(shape=(num_img, LATENT_DIM))

    generated_images = gan.generator((random_latent_vectors, attr))
    generated_images = (generated_images*0.5+0.5).numpy()
    print(generated_images[0].shape)
    return generated_images[0]

iface = gr.Interface(
    gen_img,
    gr.inputs.CheckboxGroup([LABELS[i] for i in range(N_ATTRIBUTES)], type='index'),
    "image",
    layout='unaligned'
)
iface.launch()