Upload 5 files
Browse files- app.py +150 -0
- e4e_projection.py +38 -0
- model.py +688 -0
- requirements.txt +10 -0
- util.py +205 -0
app.py
ADDED
|
@@ -0,0 +1,150 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from PIL import Image
|
| 3 |
+
import torch
|
| 4 |
+
import gradio as gr
|
| 5 |
+
import torch
|
| 6 |
+
torch.backends.cudnn.benchmark = True
|
| 7 |
+
from torchvision import transforms, utils
|
| 8 |
+
from util import *
|
| 9 |
+
from PIL import Image
|
| 10 |
+
import math
|
| 11 |
+
import random
|
| 12 |
+
import numpy as np
|
| 13 |
+
from torch import nn, autograd, optim
|
| 14 |
+
from torch.nn import functional as F
|
| 15 |
+
from tqdm import tqdm
|
| 16 |
+
import lpips
|
| 17 |
+
from model import *
|
| 18 |
+
import urllib.request
|
| 19 |
+
|
| 20 |
+
#from e4e_projection import projection as e4e_projection
|
| 21 |
+
|
| 22 |
+
from copy import deepcopy
|
| 23 |
+
import imageio
|
| 24 |
+
|
| 25 |
+
import os
|
| 26 |
+
import sys
|
| 27 |
+
import numpy as np
|
| 28 |
+
from PIL import Image
|
| 29 |
+
import torch
|
| 30 |
+
import torchvision.transforms as transforms
|
| 31 |
+
from argparse import Namespace
|
| 32 |
+
from e4e.models.psp import pSp
|
| 33 |
+
from util import *
|
| 34 |
+
from huggingface_hub import hf_hub_download
|
| 35 |
+
|
| 36 |
+
device= 'cpu'
|
| 37 |
+
model_path_e = hf_hub_download(repo_id="aijackliu/e4e", filename="e4e.pt")
|
| 38 |
+
ckpt = torch.load(model_path_e, map_location='cpu')
|
| 39 |
+
opts = ckpt['opts']
|
| 40 |
+
opts['checkpoint_path'] = model_path_e
|
| 41 |
+
opts= Namespace(**opts)
|
| 42 |
+
net = pSp(opts, device).eval().to(device)
|
| 43 |
+
# Fetch image for analysis
|
| 44 |
+
img_url = "http://claireye.com.tw/img/230212a.jpg"
|
| 45 |
+
urllib.request.urlretrieve(img_url, "pose.jpg")
|
| 46 |
+
@ torch.no_grad()
|
| 47 |
+
def projection(img, name, device='cuda'):
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
transform = transforms.Compose(
|
| 51 |
+
[
|
| 52 |
+
transforms.Resize(256),
|
| 53 |
+
transforms.CenterCrop(256),
|
| 54 |
+
transforms.ToTensor(),
|
| 55 |
+
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]),
|
| 56 |
+
]
|
| 57 |
+
)
|
| 58 |
+
img = transform(img).unsqueeze(0).to(device)
|
| 59 |
+
images, w_plus = net(img, randomize_noise=False, return_latents=True)
|
| 60 |
+
result_file = {}
|
| 61 |
+
result_file['latent'] = w_plus[0]
|
| 62 |
+
torch.save(result_file, name)
|
| 63 |
+
return w_plus[0]
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
device = 'cpu'
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
latent_dim = 512
|
| 72 |
+
|
| 73 |
+
model_path_s = hf_hub_download(repo_id="aijackliu/stylegan2", filename="stylegan2.pt")
|
| 74 |
+
original_generator = Generator(1024, latent_dim, 8, 2).to(device)
|
| 75 |
+
ckpt = torch.load(model_path_s, map_location=lambda storage, loc: storage)
|
| 76 |
+
original_generator.load_state_dict(ckpt["g_ema"], strict=False)
|
| 77 |
+
mean_latent = original_generator.mean_latent(10000)
|
| 78 |
+
|
| 79 |
+
generatorjojo = deepcopy(original_generator)
|
| 80 |
+
|
| 81 |
+
modelcaitlyn = deepcopy(original_generator)
|
| 82 |
+
|
| 83 |
+
generatorart = deepcopy(original_generator)
|
| 84 |
+
|
| 85 |
+
generatorsketch = deepcopy(original_generator)
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
transform = transforms.Compose(
|
| 89 |
+
[
|
| 90 |
+
transforms.Resize((1024, 1024)),
|
| 91 |
+
transforms.ToTensor(),
|
| 92 |
+
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
|
| 93 |
+
]
|
| 94 |
+
)
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
modeljojo = hf_hub_download(repo_id="aijackliu/jojo", filename="jojo.pt")
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
ckptjojo = torch.load(modeljojo, map_location=lambda storage, loc: storage)
|
| 103 |
+
generatorjojo.load_state_dict(ckptjojo["g"], strict=False)
|
| 104 |
+
|
| 105 |
+
modelcaitlyn = hf_hub_download(repo_id="aijackliu/arcane", filename="arcane.pt")
|
| 106 |
+
|
| 107 |
+
ckptcaitlyn = torch.load(modelcaitlyn, map_location=lambda storage, loc: storage)
|
| 108 |
+
generatorcaitlyn.load_state_dict(ckptcaitlyn["g"], strict=False)
|
| 109 |
+
|
| 110 |
+
modelart = hf_hub_download(repo_id="aijackliu/art", filename="art.pt")
|
| 111 |
+
|
| 112 |
+
ckptart = torch.load(modelart, map_location=lambda storage, loc: storage)
|
| 113 |
+
generatorart.load_state_dict(ckptart["g"], strict=False)
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
modelSketch = hf_hub_download(repo_id="aijackliu/sketch", filename="sketch.pt")
|
| 117 |
+
|
| 118 |
+
ckptsketch = torch.load(modelSketch, map_location=lambda storage, loc: storage)
|
| 119 |
+
generatorsketch.load_state_dict(ckptsketch["g"], strict=False)
|
| 120 |
+
|
| 121 |
+
def inference(img, model):
|
| 122 |
+
img.save('out.jpg')
|
| 123 |
+
aligned_face = align_face('out.jpg')
|
| 124 |
+
|
| 125 |
+
my_w = projection(aligned_face, "test.pt", device).unsqueeze(0)
|
| 126 |
+
if model == 'JoJo':
|
| 127 |
+
with torch.no_grad():
|
| 128 |
+
my_sample = generatorjojo(my_w, input_is_latent=True)
|
| 129 |
+
elif model == 'Caitlyn':
|
| 130 |
+
with torch.no_grad():
|
| 131 |
+
my_sample = generatorcaitlyn(my_w, input_is_latent=True)
|
| 132 |
+
elif model == 'Art':
|
| 133 |
+
with torch.no_grad():
|
| 134 |
+
my_sample = generatorart(my_w, input_is_latent=True)
|
| 135 |
+
else:
|
| 136 |
+
with torch.no_grad():
|
| 137 |
+
my_sample = generatorsketch(my_w, input_is_latent=True)
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
npimage = my_sample[0].permute(1, 2, 0).detach().numpy()
|
| 141 |
+
imageio.imwrite('filename.jpeg', npimage)
|
| 142 |
+
return 'filename.jpeg'
|
| 143 |
+
|
| 144 |
+
title = "JoJoGAN"
|
| 145 |
+
description = "Gradio Demo for JoJoGAN: One Shot Face Stylization. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below."
|
| 146 |
+
|
| 147 |
+
article = "<p style='text-align: center'><a href='http://claireye.com.tw'>Claireye</a> | 2023</p>"
|
| 148 |
+
|
| 149 |
+
examples=[['pose.jpg']]
|
| 150 |
+
gr.Interface(inference, [gr.inputs.Image(type="pil"),gr.inputs.Dropdown(choices=['JoJo', 'Caitlyn','Art','Sketch'], type="value", default='JoJo', label="Model")], gr.outputs.Image(type="file"),title=title,description=description,article=article,allow_flagging=False,examples=examples,allow_screenshot=False).launch()
|
e4e_projection.py
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import sys
|
| 3 |
+
import numpy as np
|
| 4 |
+
from PIL import Image
|
| 5 |
+
import torch
|
| 6 |
+
import torchvision.transforms as transforms
|
| 7 |
+
from argparse import Namespace
|
| 8 |
+
from e4e.models.psp import pSp
|
| 9 |
+
from util import *
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
@ torch.no_grad()
|
| 14 |
+
def projection(img, name, device='cuda'):
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
model_path = 'e4e.pt'
|
| 18 |
+
ckpt = torch.load(model_path, map_location='cpu')
|
| 19 |
+
opts = ckpt['opts']
|
| 20 |
+
opts['checkpoint_path'] = model_path
|
| 21 |
+
opts= Namespace(**opts)
|
| 22 |
+
net = pSp(opts, device).eval().to(device)
|
| 23 |
+
|
| 24 |
+
transform = transforms.Compose(
|
| 25 |
+
[
|
| 26 |
+
transforms.Resize(256),
|
| 27 |
+
transforms.CenterCrop(256),
|
| 28 |
+
transforms.ToTensor(),
|
| 29 |
+
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]),
|
| 30 |
+
]
|
| 31 |
+
)
|
| 32 |
+
|
| 33 |
+
img = transform(img).unsqueeze(0).to(device)
|
| 34 |
+
images, w_plus = net(img, randomize_noise=False, return_latents=True)
|
| 35 |
+
result_file = {}
|
| 36 |
+
result_file['latent'] = w_plus[0]
|
| 37 |
+
torch.save(result_file, name)
|
| 38 |
+
return w_plus[0]
|
model.py
ADDED
|
@@ -0,0 +1,688 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
import random
|
| 3 |
+
import functools
|
| 4 |
+
import operator
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
from torch import nn
|
| 8 |
+
from torch.nn import functional as F
|
| 9 |
+
from torch.autograd import Function
|
| 10 |
+
|
| 11 |
+
from op import conv2d_gradfix
|
| 12 |
+
if torch.cuda.is_available():
|
| 13 |
+
from op.fused_act import FusedLeakyReLU, fused_leaky_relu
|
| 14 |
+
from op.upfirdn2d import upfirdn2d
|
| 15 |
+
else:
|
| 16 |
+
from op.fused_act_cpu import FusedLeakyReLU, fused_leaky_relu
|
| 17 |
+
from op.upfirdn2d_cpu import upfirdn2d
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
class PixelNorm(nn.Module):
|
| 21 |
+
def __init__(self):
|
| 22 |
+
super().__init__()
|
| 23 |
+
|
| 24 |
+
def forward(self, input):
|
| 25 |
+
return input * torch.rsqrt(torch.mean(input ** 2, dim=1, keepdim=True) + 1e-8)
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def make_kernel(k):
|
| 29 |
+
k = torch.tensor(k, dtype=torch.float32)
|
| 30 |
+
|
| 31 |
+
if k.ndim == 1:
|
| 32 |
+
k = k[None, :] * k[:, None]
|
| 33 |
+
|
| 34 |
+
k /= k.sum()
|
| 35 |
+
|
| 36 |
+
return k
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
class Upsample(nn.Module):
|
| 40 |
+
def __init__(self, kernel, factor=2):
|
| 41 |
+
super().__init__()
|
| 42 |
+
|
| 43 |
+
self.factor = factor
|
| 44 |
+
kernel = make_kernel(kernel) * (factor ** 2)
|
| 45 |
+
self.register_buffer("kernel", kernel)
|
| 46 |
+
|
| 47 |
+
p = kernel.shape[0] - factor
|
| 48 |
+
|
| 49 |
+
pad0 = (p + 1) // 2 + factor - 1
|
| 50 |
+
pad1 = p // 2
|
| 51 |
+
|
| 52 |
+
self.pad = (pad0, pad1)
|
| 53 |
+
|
| 54 |
+
def forward(self, input):
|
| 55 |
+
out = upfirdn2d(input, self.kernel, up=self.factor, down=1, pad=self.pad)
|
| 56 |
+
|
| 57 |
+
return out
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
class Downsample(nn.Module):
|
| 61 |
+
def __init__(self, kernel, factor=2):
|
| 62 |
+
super().__init__()
|
| 63 |
+
|
| 64 |
+
self.factor = factor
|
| 65 |
+
kernel = make_kernel(kernel)
|
| 66 |
+
self.register_buffer("kernel", kernel)
|
| 67 |
+
|
| 68 |
+
p = kernel.shape[0] - factor
|
| 69 |
+
|
| 70 |
+
pad0 = (p + 1) // 2
|
| 71 |
+
pad1 = p // 2
|
| 72 |
+
|
| 73 |
+
self.pad = (pad0, pad1)
|
| 74 |
+
|
| 75 |
+
def forward(self, input):
|
| 76 |
+
out = upfirdn2d(input, self.kernel, up=1, down=self.factor, pad=self.pad)
|
| 77 |
+
|
| 78 |
+
return out
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
class Blur(nn.Module):
|
| 82 |
+
def __init__(self, kernel, pad, upsample_factor=1):
|
| 83 |
+
super().__init__()
|
| 84 |
+
|
| 85 |
+
kernel = make_kernel(kernel)
|
| 86 |
+
|
| 87 |
+
if upsample_factor > 1:
|
| 88 |
+
kernel = kernel * (upsample_factor ** 2)
|
| 89 |
+
|
| 90 |
+
self.register_buffer("kernel", kernel)
|
| 91 |
+
|
| 92 |
+
self.pad = pad
|
| 93 |
+
|
| 94 |
+
def forward(self, input):
|
| 95 |
+
out = upfirdn2d(input, self.kernel, pad=self.pad)
|
| 96 |
+
|
| 97 |
+
return out
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
class EqualConv2d(nn.Module):
|
| 101 |
+
def __init__(
|
| 102 |
+
self, in_channel, out_channel, kernel_size, stride=1, padding=0, bias=True
|
| 103 |
+
):
|
| 104 |
+
super().__init__()
|
| 105 |
+
|
| 106 |
+
self.weight = nn.Parameter(
|
| 107 |
+
torch.randn(out_channel, in_channel, kernel_size, kernel_size)
|
| 108 |
+
)
|
| 109 |
+
self.scale = 1 / math.sqrt(in_channel * kernel_size ** 2)
|
| 110 |
+
|
| 111 |
+
self.stride = stride
|
| 112 |
+
self.padding = padding
|
| 113 |
+
|
| 114 |
+
if bias:
|
| 115 |
+
self.bias = nn.Parameter(torch.zeros(out_channel))
|
| 116 |
+
|
| 117 |
+
else:
|
| 118 |
+
self.bias = None
|
| 119 |
+
|
| 120 |
+
def forward(self, input):
|
| 121 |
+
out = conv2d_gradfix.conv2d(
|
| 122 |
+
input,
|
| 123 |
+
self.weight * self.scale,
|
| 124 |
+
bias=self.bias,
|
| 125 |
+
stride=self.stride,
|
| 126 |
+
padding=self.padding,
|
| 127 |
+
)
|
| 128 |
+
|
| 129 |
+
return out
|
| 130 |
+
|
| 131 |
+
def __repr__(self):
|
| 132 |
+
return (
|
| 133 |
+
f"{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]},"
|
| 134 |
+
f" {self.weight.shape[2]}, stride={self.stride}, padding={self.padding})"
|
| 135 |
+
)
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
class EqualLinear(nn.Module):
|
| 139 |
+
def __init__(
|
| 140 |
+
self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None
|
| 141 |
+
):
|
| 142 |
+
super().__init__()
|
| 143 |
+
|
| 144 |
+
self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul))
|
| 145 |
+
|
| 146 |
+
if bias:
|
| 147 |
+
self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init))
|
| 148 |
+
|
| 149 |
+
else:
|
| 150 |
+
self.bias = None
|
| 151 |
+
|
| 152 |
+
self.activation = activation
|
| 153 |
+
|
| 154 |
+
self.scale = (1 / math.sqrt(in_dim)) * lr_mul
|
| 155 |
+
self.lr_mul = lr_mul
|
| 156 |
+
|
| 157 |
+
def forward(self, input):
|
| 158 |
+
if self.activation:
|
| 159 |
+
out = F.linear(input, self.weight * self.scale)
|
| 160 |
+
out = fused_leaky_relu(out, self.bias * self.lr_mul)
|
| 161 |
+
|
| 162 |
+
else:
|
| 163 |
+
out = F.linear(
|
| 164 |
+
input, self.weight * self.scale, bias=self.bias * self.lr_mul
|
| 165 |
+
)
|
| 166 |
+
|
| 167 |
+
return out
|
| 168 |
+
|
| 169 |
+
def __repr__(self):
|
| 170 |
+
return (
|
| 171 |
+
f"{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})"
|
| 172 |
+
)
|
| 173 |
+
|
| 174 |
+
|
| 175 |
+
class ModulatedConv2d(nn.Module):
|
| 176 |
+
def __init__(
|
| 177 |
+
self,
|
| 178 |
+
in_channel,
|
| 179 |
+
out_channel,
|
| 180 |
+
kernel_size,
|
| 181 |
+
style_dim,
|
| 182 |
+
demodulate=True,
|
| 183 |
+
upsample=False,
|
| 184 |
+
downsample=False,
|
| 185 |
+
blur_kernel=[1, 3, 3, 1],
|
| 186 |
+
fused=True,
|
| 187 |
+
):
|
| 188 |
+
super().__init__()
|
| 189 |
+
|
| 190 |
+
self.eps = 1e-8
|
| 191 |
+
self.kernel_size = kernel_size
|
| 192 |
+
self.in_channel = in_channel
|
| 193 |
+
self.out_channel = out_channel
|
| 194 |
+
self.upsample = upsample
|
| 195 |
+
self.downsample = downsample
|
| 196 |
+
|
| 197 |
+
if upsample:
|
| 198 |
+
factor = 2
|
| 199 |
+
p = (len(blur_kernel) - factor) - (kernel_size - 1)
|
| 200 |
+
pad0 = (p + 1) // 2 + factor - 1
|
| 201 |
+
pad1 = p // 2 + 1
|
| 202 |
+
|
| 203 |
+
self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor=factor)
|
| 204 |
+
|
| 205 |
+
if downsample:
|
| 206 |
+
factor = 2
|
| 207 |
+
p = (len(blur_kernel) - factor) + (kernel_size - 1)
|
| 208 |
+
pad0 = (p + 1) // 2
|
| 209 |
+
pad1 = p // 2
|
| 210 |
+
|
| 211 |
+
self.blur = Blur(blur_kernel, pad=(pad0, pad1))
|
| 212 |
+
|
| 213 |
+
fan_in = in_channel * kernel_size ** 2
|
| 214 |
+
self.scale = 1 / math.sqrt(fan_in)
|
| 215 |
+
self.padding = kernel_size // 2
|
| 216 |
+
|
| 217 |
+
self.weight = nn.Parameter(
|
| 218 |
+
torch.randn(1, out_channel, in_channel, kernel_size, kernel_size)
|
| 219 |
+
)
|
| 220 |
+
|
| 221 |
+
self.modulation = EqualLinear(style_dim, in_channel, bias_init=1)
|
| 222 |
+
|
| 223 |
+
self.demodulate = demodulate
|
| 224 |
+
self.fused = fused
|
| 225 |
+
|
| 226 |
+
def __repr__(self):
|
| 227 |
+
return (
|
| 228 |
+
f"{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, "
|
| 229 |
+
f"upsample={self.upsample}, downsample={self.downsample})"
|
| 230 |
+
)
|
| 231 |
+
|
| 232 |
+
def forward(self, input, style):
|
| 233 |
+
batch, in_channel, height, width = input.shape
|
| 234 |
+
|
| 235 |
+
if not self.fused:
|
| 236 |
+
weight = self.scale * self.weight.squeeze(0)
|
| 237 |
+
style = self.modulation(style)
|
| 238 |
+
|
| 239 |
+
if self.demodulate:
|
| 240 |
+
w = weight.unsqueeze(0) * style.view(batch, 1, in_channel, 1, 1)
|
| 241 |
+
dcoefs = (w.square().sum((2, 3, 4)) + 1e-8).rsqrt()
|
| 242 |
+
|
| 243 |
+
input = input * style.reshape(batch, in_channel, 1, 1)
|
| 244 |
+
|
| 245 |
+
if self.upsample:
|
| 246 |
+
weight = weight.transpose(0, 1)
|
| 247 |
+
out = conv2d_gradfix.conv_transpose2d(
|
| 248 |
+
input, weight, padding=0, stride=2
|
| 249 |
+
)
|
| 250 |
+
out = self.blur(out)
|
| 251 |
+
|
| 252 |
+
elif self.downsample:
|
| 253 |
+
input = self.blur(input)
|
| 254 |
+
out = conv2d_gradfix.conv2d(input, weight, padding=0, stride=2)
|
| 255 |
+
|
| 256 |
+
else:
|
| 257 |
+
out = conv2d_gradfix.conv2d(input, weight, padding=self.padding)
|
| 258 |
+
|
| 259 |
+
if self.demodulate:
|
| 260 |
+
out = out * dcoefs.view(batch, -1, 1, 1)
|
| 261 |
+
|
| 262 |
+
return out
|
| 263 |
+
|
| 264 |
+
style = self.modulation(style).view(batch, 1, in_channel, 1, 1)
|
| 265 |
+
weight = self.scale * self.weight * style
|
| 266 |
+
|
| 267 |
+
if self.demodulate:
|
| 268 |
+
demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-8)
|
| 269 |
+
weight = weight * demod.view(batch, self.out_channel, 1, 1, 1)
|
| 270 |
+
|
| 271 |
+
weight = weight.view(
|
| 272 |
+
batch * self.out_channel, in_channel, self.kernel_size, self.kernel_size
|
| 273 |
+
)
|
| 274 |
+
|
| 275 |
+
if self.upsample:
|
| 276 |
+
input = input.view(1, batch * in_channel, height, width)
|
| 277 |
+
weight = weight.view(
|
| 278 |
+
batch, self.out_channel, in_channel, self.kernel_size, self.kernel_size
|
| 279 |
+
)
|
| 280 |
+
weight = weight.transpose(1, 2).reshape(
|
| 281 |
+
batch * in_channel, self.out_channel, self.kernel_size, self.kernel_size
|
| 282 |
+
)
|
| 283 |
+
out = conv2d_gradfix.conv_transpose2d(
|
| 284 |
+
input, weight, padding=0, stride=2, groups=batch
|
| 285 |
+
)
|
| 286 |
+
_, _, height, width = out.shape
|
| 287 |
+
out = out.view(batch, self.out_channel, height, width)
|
| 288 |
+
out = self.blur(out)
|
| 289 |
+
|
| 290 |
+
elif self.downsample:
|
| 291 |
+
input = self.blur(input)
|
| 292 |
+
_, _, height, width = input.shape
|
| 293 |
+
input = input.view(1, batch * in_channel, height, width)
|
| 294 |
+
out = conv2d_gradfix.conv2d(
|
| 295 |
+
input, weight, padding=0, stride=2, groups=batch
|
| 296 |
+
)
|
| 297 |
+
_, _, height, width = out.shape
|
| 298 |
+
out = out.view(batch, self.out_channel, height, width)
|
| 299 |
+
|
| 300 |
+
else:
|
| 301 |
+
input = input.view(1, batch * in_channel, height, width)
|
| 302 |
+
out = conv2d_gradfix.conv2d(
|
| 303 |
+
input, weight, padding=self.padding, groups=batch
|
| 304 |
+
)
|
| 305 |
+
_, _, height, width = out.shape
|
| 306 |
+
out = out.view(batch, self.out_channel, height, width)
|
| 307 |
+
|
| 308 |
+
return out
|
| 309 |
+
|
| 310 |
+
|
| 311 |
+
class NoiseInjection(nn.Module):
|
| 312 |
+
def __init__(self):
|
| 313 |
+
super().__init__()
|
| 314 |
+
|
| 315 |
+
self.weight = nn.Parameter(torch.zeros(1))
|
| 316 |
+
|
| 317 |
+
def forward(self, image, noise=None):
|
| 318 |
+
if noise is None:
|
| 319 |
+
batch, _, height, width = image.shape
|
| 320 |
+
noise = image.new_empty(batch, 1, height, width).normal_()
|
| 321 |
+
|
| 322 |
+
return image + self.weight * noise
|
| 323 |
+
|
| 324 |
+
|
| 325 |
+
class ConstantInput(nn.Module):
|
| 326 |
+
def __init__(self, channel, size=4):
|
| 327 |
+
super().__init__()
|
| 328 |
+
|
| 329 |
+
self.input = nn.Parameter(torch.randn(1, channel, size, size))
|
| 330 |
+
|
| 331 |
+
def forward(self, input):
|
| 332 |
+
batch = input.shape[0]
|
| 333 |
+
out = self.input.repeat(batch, 1, 1, 1)
|
| 334 |
+
|
| 335 |
+
return out
|
| 336 |
+
|
| 337 |
+
|
| 338 |
+
class StyledConv(nn.Module):
|
| 339 |
+
def __init__(
|
| 340 |
+
self,
|
| 341 |
+
in_channel,
|
| 342 |
+
out_channel,
|
| 343 |
+
kernel_size,
|
| 344 |
+
style_dim,
|
| 345 |
+
upsample=False,
|
| 346 |
+
blur_kernel=[1, 3, 3, 1],
|
| 347 |
+
demodulate=True,
|
| 348 |
+
):
|
| 349 |
+
super().__init__()
|
| 350 |
+
|
| 351 |
+
self.conv = ModulatedConv2d(
|
| 352 |
+
in_channel,
|
| 353 |
+
out_channel,
|
| 354 |
+
kernel_size,
|
| 355 |
+
style_dim,
|
| 356 |
+
upsample=upsample,
|
| 357 |
+
blur_kernel=blur_kernel,
|
| 358 |
+
demodulate=demodulate,
|
| 359 |
+
)
|
| 360 |
+
|
| 361 |
+
self.noise = NoiseInjection()
|
| 362 |
+
# self.bias = nn.Parameter(torch.zeros(1, out_channel, 1, 1))
|
| 363 |
+
# self.activate = ScaledLeakyReLU(0.2)
|
| 364 |
+
self.activate = FusedLeakyReLU(out_channel)
|
| 365 |
+
|
| 366 |
+
def forward(self, input, style, noise=None):
|
| 367 |
+
out = self.conv(input, style)
|
| 368 |
+
out = self.noise(out, noise=noise)
|
| 369 |
+
# out = out + self.bias
|
| 370 |
+
out = self.activate(out)
|
| 371 |
+
|
| 372 |
+
return out
|
| 373 |
+
|
| 374 |
+
|
| 375 |
+
class ToRGB(nn.Module):
|
| 376 |
+
def __init__(self, in_channel, style_dim, upsample=True, blur_kernel=[1, 3, 3, 1]):
|
| 377 |
+
super().__init__()
|
| 378 |
+
|
| 379 |
+
if upsample:
|
| 380 |
+
self.upsample = Upsample(blur_kernel)
|
| 381 |
+
|
| 382 |
+
self.conv = ModulatedConv2d(in_channel, 3, 1, style_dim, demodulate=False)
|
| 383 |
+
self.bias = nn.Parameter(torch.zeros(1, 3, 1, 1))
|
| 384 |
+
|
| 385 |
+
def forward(self, input, style, skip=None):
|
| 386 |
+
out = self.conv(input, style)
|
| 387 |
+
out = out + self.bias
|
| 388 |
+
|
| 389 |
+
if skip is not None:
|
| 390 |
+
skip = self.upsample(skip)
|
| 391 |
+
|
| 392 |
+
out = out + skip
|
| 393 |
+
|
| 394 |
+
return out
|
| 395 |
+
|
| 396 |
+
|
| 397 |
+
class Generator(nn.Module):
|
| 398 |
+
def __init__(
|
| 399 |
+
self,
|
| 400 |
+
size,
|
| 401 |
+
style_dim,
|
| 402 |
+
n_mlp,
|
| 403 |
+
channel_multiplier=2,
|
| 404 |
+
blur_kernel=[1, 3, 3, 1],
|
| 405 |
+
lr_mlp=0.01,
|
| 406 |
+
):
|
| 407 |
+
super().__init__()
|
| 408 |
+
|
| 409 |
+
self.size = size
|
| 410 |
+
|
| 411 |
+
self.style_dim = style_dim
|
| 412 |
+
|
| 413 |
+
layers = [PixelNorm()]
|
| 414 |
+
|
| 415 |
+
for i in range(n_mlp):
|
| 416 |
+
layers.append(
|
| 417 |
+
EqualLinear(
|
| 418 |
+
style_dim, style_dim, lr_mul=lr_mlp, activation="fused_lrelu"
|
| 419 |
+
)
|
| 420 |
+
)
|
| 421 |
+
|
| 422 |
+
self.style = nn.Sequential(*layers)
|
| 423 |
+
|
| 424 |
+
self.channels = {
|
| 425 |
+
4: 512,
|
| 426 |
+
8: 512,
|
| 427 |
+
16: 512,
|
| 428 |
+
32: 512,
|
| 429 |
+
64: 256 * channel_multiplier,
|
| 430 |
+
128: 128 * channel_multiplier,
|
| 431 |
+
256: 64 * channel_multiplier,
|
| 432 |
+
512: 32 * channel_multiplier,
|
| 433 |
+
1024: 16 * channel_multiplier,
|
| 434 |
+
}
|
| 435 |
+
|
| 436 |
+
self.input = ConstantInput(self.channels[4])
|
| 437 |
+
self.conv1 = StyledConv(
|
| 438 |
+
self.channels[4], self.channels[4], 3, style_dim, blur_kernel=blur_kernel
|
| 439 |
+
)
|
| 440 |
+
self.to_rgb1 = ToRGB(self.channels[4], style_dim, upsample=False)
|
| 441 |
+
|
| 442 |
+
self.log_size = int(math.log(size, 2))
|
| 443 |
+
self.num_layers = (self.log_size - 2) * 2 + 1
|
| 444 |
+
|
| 445 |
+
self.convs = nn.ModuleList()
|
| 446 |
+
self.upsamples = nn.ModuleList()
|
| 447 |
+
self.to_rgbs = nn.ModuleList()
|
| 448 |
+
self.noises = nn.Module()
|
| 449 |
+
|
| 450 |
+
in_channel = self.channels[4]
|
| 451 |
+
|
| 452 |
+
for layer_idx in range(self.num_layers):
|
| 453 |
+
res = (layer_idx + 5) // 2
|
| 454 |
+
shape = [1, 1, 2 ** res, 2 ** res]
|
| 455 |
+
self.noises.register_buffer(f"noise_{layer_idx}", torch.randn(*shape))
|
| 456 |
+
|
| 457 |
+
for i in range(3, self.log_size + 1):
|
| 458 |
+
out_channel = self.channels[2 ** i]
|
| 459 |
+
|
| 460 |
+
self.convs.append(
|
| 461 |
+
StyledConv(
|
| 462 |
+
in_channel,
|
| 463 |
+
out_channel,
|
| 464 |
+
3,
|
| 465 |
+
style_dim,
|
| 466 |
+
upsample=True,
|
| 467 |
+
blur_kernel=blur_kernel,
|
| 468 |
+
)
|
| 469 |
+
)
|
| 470 |
+
|
| 471 |
+
self.convs.append(
|
| 472 |
+
StyledConv(
|
| 473 |
+
out_channel, out_channel, 3, style_dim, blur_kernel=blur_kernel
|
| 474 |
+
)
|
| 475 |
+
)
|
| 476 |
+
|
| 477 |
+
self.to_rgbs.append(ToRGB(out_channel, style_dim))
|
| 478 |
+
|
| 479 |
+
in_channel = out_channel
|
| 480 |
+
|
| 481 |
+
self.n_latent = self.log_size * 2 - 2
|
| 482 |
+
|
| 483 |
+
def make_noise(self):
|
| 484 |
+
device = self.input.input.device
|
| 485 |
+
|
| 486 |
+
noises = [torch.randn(1, 1, 2 ** 2, 2 ** 2, device=device)]
|
| 487 |
+
|
| 488 |
+
for i in range(3, self.log_size + 1):
|
| 489 |
+
for _ in range(2):
|
| 490 |
+
noises.append(torch.randn(1, 1, 2 ** i, 2 ** i, device=device))
|
| 491 |
+
|
| 492 |
+
return noises
|
| 493 |
+
|
| 494 |
+
@torch.no_grad()
|
| 495 |
+
def mean_latent(self, n_latent):
|
| 496 |
+
latent_in = torch.randn(
|
| 497 |
+
n_latent, self.style_dim, device=self.input.input.device
|
| 498 |
+
)
|
| 499 |
+
latent = self.style(latent_in).mean(0, keepdim=True)
|
| 500 |
+
|
| 501 |
+
return latent
|
| 502 |
+
|
| 503 |
+
@torch.no_grad()
|
| 504 |
+
def get_latent(self, input):
|
| 505 |
+
return self.style(input)
|
| 506 |
+
|
| 507 |
+
def forward(
|
| 508 |
+
self,
|
| 509 |
+
styles,
|
| 510 |
+
return_latents=False,
|
| 511 |
+
inject_index=None,
|
| 512 |
+
truncation=1,
|
| 513 |
+
truncation_latent=None,
|
| 514 |
+
input_is_latent=False,
|
| 515 |
+
noise=None,
|
| 516 |
+
randomize_noise=True,
|
| 517 |
+
):
|
| 518 |
+
|
| 519 |
+
if noise is None:
|
| 520 |
+
if randomize_noise:
|
| 521 |
+
noise = [None] * self.num_layers
|
| 522 |
+
else:
|
| 523 |
+
noise = [
|
| 524 |
+
getattr(self.noises, f"noise_{i}") for i in range(self.num_layers)
|
| 525 |
+
]
|
| 526 |
+
|
| 527 |
+
if not input_is_latent:
|
| 528 |
+
styles = [self.style(s) for s in styles]
|
| 529 |
+
|
| 530 |
+
if truncation < 1:
|
| 531 |
+
style_t = []
|
| 532 |
+
|
| 533 |
+
for style in styles:
|
| 534 |
+
style_t.append(
|
| 535 |
+
truncation_latent + truncation * (style - truncation_latent)
|
| 536 |
+
)
|
| 537 |
+
|
| 538 |
+
styles = style_t
|
| 539 |
+
latent = styles[0].unsqueeze(1).repeat(1, self.n_latent, 1)
|
| 540 |
+
else:
|
| 541 |
+
latent = styles
|
| 542 |
+
|
| 543 |
+
out = self.input(latent)
|
| 544 |
+
out = self.conv1(out, latent[:, 0], noise=noise[0])
|
| 545 |
+
|
| 546 |
+
skip = self.to_rgb1(out, latent[:, 1])
|
| 547 |
+
|
| 548 |
+
i = 1
|
| 549 |
+
for conv1, conv2, noise1, noise2, to_rgb in zip(
|
| 550 |
+
self.convs[::2], self.convs[1::2], noise[1::2], noise[2::2], self.to_rgbs
|
| 551 |
+
):
|
| 552 |
+
out = conv1(out, latent[:, i], noise=noise1)
|
| 553 |
+
out = conv2(out, latent[:, i + 1], noise=noise2)
|
| 554 |
+
skip = to_rgb(out, latent[:, i + 2], skip)
|
| 555 |
+
|
| 556 |
+
i += 2
|
| 557 |
+
|
| 558 |
+
image = skip
|
| 559 |
+
|
| 560 |
+
return image
|
| 561 |
+
|
| 562 |
+
|
| 563 |
+
class ConvLayer(nn.Sequential):
|
| 564 |
+
def __init__(
|
| 565 |
+
self,
|
| 566 |
+
in_channel,
|
| 567 |
+
out_channel,
|
| 568 |
+
kernel_size,
|
| 569 |
+
downsample=False,
|
| 570 |
+
blur_kernel=[1, 3, 3, 1],
|
| 571 |
+
bias=True,
|
| 572 |
+
activate=True,
|
| 573 |
+
):
|
| 574 |
+
layers = []
|
| 575 |
+
|
| 576 |
+
if downsample:
|
| 577 |
+
factor = 2
|
| 578 |
+
p = (len(blur_kernel) - factor) + (kernel_size - 1)
|
| 579 |
+
pad0 = (p + 1) // 2
|
| 580 |
+
pad1 = p // 2
|
| 581 |
+
|
| 582 |
+
layers.append(Blur(blur_kernel, pad=(pad0, pad1)))
|
| 583 |
+
|
| 584 |
+
stride = 2
|
| 585 |
+
self.padding = 0
|
| 586 |
+
|
| 587 |
+
else:
|
| 588 |
+
stride = 1
|
| 589 |
+
self.padding = kernel_size // 2
|
| 590 |
+
|
| 591 |
+
layers.append(
|
| 592 |
+
EqualConv2d(
|
| 593 |
+
in_channel,
|
| 594 |
+
out_channel,
|
| 595 |
+
kernel_size,
|
| 596 |
+
padding=self.padding,
|
| 597 |
+
stride=stride,
|
| 598 |
+
bias=bias and not activate,
|
| 599 |
+
)
|
| 600 |
+
)
|
| 601 |
+
|
| 602 |
+
if activate:
|
| 603 |
+
layers.append(FusedLeakyReLU(out_channel, bias=bias))
|
| 604 |
+
|
| 605 |
+
super().__init__(*layers)
|
| 606 |
+
|
| 607 |
+
|
| 608 |
+
class ResBlock(nn.Module):
|
| 609 |
+
def __init__(self, in_channel, out_channel, blur_kernel=[1, 3, 3, 1]):
|
| 610 |
+
super().__init__()
|
| 611 |
+
|
| 612 |
+
self.conv1 = ConvLayer(in_channel, in_channel, 3)
|
| 613 |
+
self.conv2 = ConvLayer(in_channel, out_channel, 3, downsample=True)
|
| 614 |
+
|
| 615 |
+
self.skip = ConvLayer(
|
| 616 |
+
in_channel, out_channel, 1, downsample=True, activate=False, bias=False
|
| 617 |
+
)
|
| 618 |
+
|
| 619 |
+
def forward(self, input):
|
| 620 |
+
out = self.conv1(input)
|
| 621 |
+
out = self.conv2(out)
|
| 622 |
+
|
| 623 |
+
skip = self.skip(input)
|
| 624 |
+
out = (out + skip) / math.sqrt(2)
|
| 625 |
+
|
| 626 |
+
return out
|
| 627 |
+
|
| 628 |
+
|
| 629 |
+
class Discriminator(nn.Module):
|
| 630 |
+
def __init__(self, size, channel_multiplier=2, blur_kernel=[1, 3, 3, 1]):
|
| 631 |
+
super().__init__()
|
| 632 |
+
|
| 633 |
+
channels = {
|
| 634 |
+
4: 512,
|
| 635 |
+
8: 512,
|
| 636 |
+
16: 512,
|
| 637 |
+
32: 512,
|
| 638 |
+
64: 256 * channel_multiplier,
|
| 639 |
+
128: 128 * channel_multiplier,
|
| 640 |
+
256: 64 * channel_multiplier,
|
| 641 |
+
512: 32 * channel_multiplier,
|
| 642 |
+
1024: 16 * channel_multiplier,
|
| 643 |
+
}
|
| 644 |
+
|
| 645 |
+
convs = [ConvLayer(3, channels[size], 1)]
|
| 646 |
+
|
| 647 |
+
log_size = int(math.log(size, 2))
|
| 648 |
+
|
| 649 |
+
in_channel = channels[size]
|
| 650 |
+
|
| 651 |
+
for i in range(log_size, 2, -1):
|
| 652 |
+
out_channel = channels[2 ** (i - 1)]
|
| 653 |
+
|
| 654 |
+
convs.append(ResBlock(in_channel, out_channel, blur_kernel))
|
| 655 |
+
|
| 656 |
+
in_channel = out_channel
|
| 657 |
+
|
| 658 |
+
self.convs = nn.Sequential(*convs)
|
| 659 |
+
|
| 660 |
+
self.stddev_group = 4
|
| 661 |
+
self.stddev_feat = 1
|
| 662 |
+
|
| 663 |
+
self.final_conv = ConvLayer(in_channel + 1, channels[4], 3)
|
| 664 |
+
self.final_linear = nn.Sequential(
|
| 665 |
+
EqualLinear(channels[4] * 4 * 4, channels[4], activation="fused_lrelu"),
|
| 666 |
+
EqualLinear(channels[4], 1),
|
| 667 |
+
)
|
| 668 |
+
|
| 669 |
+
def forward(self, input):
|
| 670 |
+
out = self.convs(input)
|
| 671 |
+
|
| 672 |
+
batch, channel, height, width = out.shape
|
| 673 |
+
group = min(batch, self.stddev_group)
|
| 674 |
+
stddev = out.view(
|
| 675 |
+
group, -1, self.stddev_feat, channel // self.stddev_feat, height, width
|
| 676 |
+
)
|
| 677 |
+
stddev = torch.sqrt(stddev.var(0, unbiased=False) + 1e-8)
|
| 678 |
+
stddev = stddev.mean([2, 3, 4], keepdims=True).squeeze(2)
|
| 679 |
+
stddev = stddev.repeat(group, 1, height, width)
|
| 680 |
+
out = torch.cat([out, stddev], 1)
|
| 681 |
+
|
| 682 |
+
out = self.final_conv(out)
|
| 683 |
+
|
| 684 |
+
out = out.view(batch, -1)
|
| 685 |
+
out = self.final_linear(out)
|
| 686 |
+
|
| 687 |
+
return out
|
| 688 |
+
|
requirements.txt
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
tqdm
|
| 2 |
+
gdown
|
| 3 |
+
scikit-learn==0.22
|
| 4 |
+
scipy
|
| 5 |
+
lpips
|
| 6 |
+
opencv-python-headless
|
| 7 |
+
torch
|
| 8 |
+
torchvision
|
| 9 |
+
imageio
|
| 10 |
+
dlib
|
util.py
ADDED
|
@@ -0,0 +1,205 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from matplotlib import pyplot as plt
|
| 2 |
+
import torch
|
| 3 |
+
import torch.nn.functional as F
|
| 4 |
+
import os
|
| 5 |
+
import cv2
|
| 6 |
+
import dlib
|
| 7 |
+
from PIL import Image
|
| 8 |
+
import numpy as np
|
| 9 |
+
import math
|
| 10 |
+
import torchvision
|
| 11 |
+
import scipy
|
| 12 |
+
import scipy.ndimage
|
| 13 |
+
import torchvision.transforms as transforms
|
| 14 |
+
|
| 15 |
+
from huggingface_hub import hf_hub_download
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
shape_predictor_path = hf_hub_download(repo_id="aijackliu/jojogan", filename="face_landmarks.dat")
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
google_drive_paths = {
|
| 22 |
+
|
| 23 |
+
}
|
| 24 |
+
|
| 25 |
+
@torch.no_grad()
|
| 26 |
+
def load_model(generator, model_file_path):
|
| 27 |
+
ensure_checkpoint_exists(model_file_path)
|
| 28 |
+
ckpt = torch.load(model_file_path, map_location=lambda storage, loc: storage)
|
| 29 |
+
generator.load_state_dict(ckpt["g_ema"], strict=False)
|
| 30 |
+
return generator.mean_latent(50000)
|
| 31 |
+
|
| 32 |
+
def ensure_checkpoint_exists(model_weights_filename):
|
| 33 |
+
if not os.path.isfile(model_weights_filename) and (
|
| 34 |
+
model_weights_filename in google_drive_paths
|
| 35 |
+
):
|
| 36 |
+
gdrive_url = google_drive_paths[model_weights_filename]
|
| 37 |
+
try:
|
| 38 |
+
from gdown import download as drive_download
|
| 39 |
+
|
| 40 |
+
drive_download(gdrive_url, model_weights_filename, quiet=False)
|
| 41 |
+
except ModuleNotFoundError:
|
| 42 |
+
print(
|
| 43 |
+
"gdown module not found.",
|
| 44 |
+
"pip3 install gdown or, manually download the checkpoint file:",
|
| 45 |
+
gdrive_url
|
| 46 |
+
)
|
| 47 |
+
|
| 48 |
+
if not os.path.isfile(model_weights_filename) and (
|
| 49 |
+
model_weights_filename not in google_drive_paths
|
| 50 |
+
):
|
| 51 |
+
print(
|
| 52 |
+
model_weights_filename,
|
| 53 |
+
" not found, you may need to manually download the model weights."
|
| 54 |
+
)
|
| 55 |
+
|
| 56 |
+
# given a list of filenames, load the inverted style code
|
| 57 |
+
@torch.no_grad()
|
| 58 |
+
def load_source(files, generator, device='cuda'):
|
| 59 |
+
sources = []
|
| 60 |
+
|
| 61 |
+
for file in files:
|
| 62 |
+
source = torch.load(f'./inversion_codes/{file}.pt')['latent'].to(device)
|
| 63 |
+
|
| 64 |
+
if source.size(0) != 1:
|
| 65 |
+
source = source.unsqueeze(0)
|
| 66 |
+
|
| 67 |
+
if source.ndim == 3:
|
| 68 |
+
source = generator.get_latent(source, truncation=1, is_latent=True)
|
| 69 |
+
source = list2style(source)
|
| 70 |
+
|
| 71 |
+
sources.append(source)
|
| 72 |
+
|
| 73 |
+
sources = torch.cat(sources, 0)
|
| 74 |
+
if type(sources) is not list:
|
| 75 |
+
sources = style2list(sources)
|
| 76 |
+
|
| 77 |
+
return sources
|
| 78 |
+
|
| 79 |
+
def display_image(image, size=None, mode='nearest', unnorm=False, title=''):
|
| 80 |
+
# image is [3,h,w] or [1,3,h,w] tensor [0,1]
|
| 81 |
+
if not isinstance(image, torch.Tensor):
|
| 82 |
+
image = transforms.ToTensor()(image).unsqueeze(0)
|
| 83 |
+
if image.is_cuda:
|
| 84 |
+
image = image.cpu()
|
| 85 |
+
if size is not None and image.size(-1) != size:
|
| 86 |
+
image = F.interpolate(image, size=(size,size), mode=mode)
|
| 87 |
+
if image.dim() == 4:
|
| 88 |
+
image = image[0]
|
| 89 |
+
image = image.permute(1, 2, 0).detach().numpy()
|
| 90 |
+
plt.figure()
|
| 91 |
+
plt.title(title)
|
| 92 |
+
plt.axis('off')
|
| 93 |
+
plt.imshow(image)
|
| 94 |
+
|
| 95 |
+
def get_landmark(filepath, predictor):
|
| 96 |
+
"""get landmark with dlib
|
| 97 |
+
:return: np.array shape=(68, 2)
|
| 98 |
+
"""
|
| 99 |
+
detector = dlib.get_frontal_face_detector()
|
| 100 |
+
|
| 101 |
+
img = dlib.load_rgb_image(filepath)
|
| 102 |
+
dets = detector(img, 1)
|
| 103 |
+
assert len(dets) > 0, "Face not detected, try another face image"
|
| 104 |
+
|
| 105 |
+
for k, d in enumerate(dets):
|
| 106 |
+
shape = predictor(img, d)
|
| 107 |
+
|
| 108 |
+
t = list(shape.parts())
|
| 109 |
+
a = []
|
| 110 |
+
for tt in t:
|
| 111 |
+
a.append([tt.x, tt.y])
|
| 112 |
+
lm = np.array(a)
|
| 113 |
+
return lm
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
def align_face(filepath, output_size=256, transform_size=1024, enable_padding=True):
|
| 117 |
+
|
| 118 |
+
"""
|
| 119 |
+
:param filepath: str
|
| 120 |
+
:return: PIL Image
|
| 121 |
+
"""
|
| 122 |
+
predictor = dlib.shape_predictor(shape_predictor_path)
|
| 123 |
+
lm = get_landmark(filepath, predictor)
|
| 124 |
+
|
| 125 |
+
lm_chin = lm[0: 17] # left-right
|
| 126 |
+
lm_eyebrow_left = lm[17: 22] # left-right
|
| 127 |
+
lm_eyebrow_right = lm[22: 27] # left-right
|
| 128 |
+
lm_nose = lm[27: 31] # top-down
|
| 129 |
+
lm_nostrils = lm[31: 36] # top-down
|
| 130 |
+
lm_eye_left = lm[36: 42] # left-clockwise
|
| 131 |
+
lm_eye_right = lm[42: 48] # left-clockwise
|
| 132 |
+
lm_mouth_outer = lm[48: 60] # left-clockwise
|
| 133 |
+
lm_mouth_inner = lm[60: 68] # left-clockwise
|
| 134 |
+
|
| 135 |
+
# Calculate auxiliary vectors.
|
| 136 |
+
eye_left = np.mean(lm_eye_left, axis=0)
|
| 137 |
+
eye_right = np.mean(lm_eye_right, axis=0)
|
| 138 |
+
eye_avg = (eye_left + eye_right) * 0.5
|
| 139 |
+
eye_to_eye = eye_right - eye_left
|
| 140 |
+
mouth_left = lm_mouth_outer[0]
|
| 141 |
+
mouth_right = lm_mouth_outer[6]
|
| 142 |
+
mouth_avg = (mouth_left + mouth_right) * 0.5
|
| 143 |
+
eye_to_mouth = mouth_avg - eye_avg
|
| 144 |
+
|
| 145 |
+
# Choose oriented crop rectangle.
|
| 146 |
+
x = eye_to_eye - np.flipud(eye_to_mouth) * [-1, 1]
|
| 147 |
+
x /= np.hypot(*x)
|
| 148 |
+
x *= max(np.hypot(*eye_to_eye) * 2.0, np.hypot(*eye_to_mouth) * 1.8)
|
| 149 |
+
y = np.flipud(x) * [-1, 1]
|
| 150 |
+
c = eye_avg + eye_to_mouth * 0.1
|
| 151 |
+
quad = np.stack([c - x - y, c - x + y, c + x + y, c + x - y])
|
| 152 |
+
qsize = np.hypot(*x) * 2
|
| 153 |
+
|
| 154 |
+
# read image
|
| 155 |
+
img = Image.open(filepath)
|
| 156 |
+
|
| 157 |
+
transform_size = output_size
|
| 158 |
+
enable_padding = True
|
| 159 |
+
|
| 160 |
+
# Shrink.
|
| 161 |
+
shrink = int(np.floor(qsize / output_size * 0.5))
|
| 162 |
+
if shrink > 1:
|
| 163 |
+
rsize = (int(np.rint(float(img.size[0]) / shrink)), int(np.rint(float(img.size[1]) / shrink)))
|
| 164 |
+
img = img.resize(rsize, Image.ANTIALIAS)
|
| 165 |
+
quad /= shrink
|
| 166 |
+
qsize /= shrink
|
| 167 |
+
|
| 168 |
+
# Crop.
|
| 169 |
+
border = max(int(np.rint(qsize * 0.1)), 3)
|
| 170 |
+
crop = (int(np.floor(min(quad[:, 0]))), int(np.floor(min(quad[:, 1]))), int(np.ceil(max(quad[:, 0]))),
|
| 171 |
+
int(np.ceil(max(quad[:, 1]))))
|
| 172 |
+
crop = (max(crop[0] - border, 0), max(crop[1] - border, 0), min(crop[2] + border, img.size[0]),
|
| 173 |
+
min(crop[3] + border, img.size[1]))
|
| 174 |
+
if crop[2] - crop[0] < img.size[0] or crop[3] - crop[1] < img.size[1]:
|
| 175 |
+
img = img.crop(crop)
|
| 176 |
+
quad -= crop[0:2]
|
| 177 |
+
|
| 178 |
+
# Pad.
|
| 179 |
+
pad = (int(np.floor(min(quad[:, 0]))), int(np.floor(min(quad[:, 1]))), int(np.ceil(max(quad[:, 0]))),
|
| 180 |
+
int(np.ceil(max(quad[:, 1]))))
|
| 181 |
+
pad = (max(-pad[0] + border, 0), max(-pad[1] + border, 0), max(pad[2] - img.size[0] + border, 0),
|
| 182 |
+
max(pad[3] - img.size[1] + border, 0))
|
| 183 |
+
if enable_padding and max(pad) > border - 4:
|
| 184 |
+
pad = np.maximum(pad, int(np.rint(qsize * 0.3)))
|
| 185 |
+
img = np.pad(np.float32(img), ((pad[1], pad[3]), (pad[0], pad[2]), (0, 0)), 'reflect')
|
| 186 |
+
h, w, _ = img.shape
|
| 187 |
+
y, x, _ = np.ogrid[:h, :w, :1]
|
| 188 |
+
mask = np.maximum(1.0 - np.minimum(np.float32(x) / pad[0], np.float32(w - 1 - x) / pad[2]),
|
| 189 |
+
1.0 - np.minimum(np.float32(y) / pad[1], np.float32(h - 1 - y) / pad[3]))
|
| 190 |
+
blur = qsize * 0.02
|
| 191 |
+
img += (scipy.ndimage.gaussian_filter(img, [blur, blur, 0]) - img) * np.clip(mask * 3.0 + 1.0, 0.0, 1.0)
|
| 192 |
+
img += (np.median(img, axis=(0, 1)) - img) * np.clip(mask, 0.0, 1.0)
|
| 193 |
+
img = Image.fromarray(np.uint8(np.clip(np.rint(img), 0, 255)), 'RGB')
|
| 194 |
+
quad += pad[:2]
|
| 195 |
+
|
| 196 |
+
# Transform.
|
| 197 |
+
img = img.transform((transform_size, transform_size), Image.QUAD, (quad + 0.5).flatten(), Image.BILINEAR)
|
| 198 |
+
if output_size < transform_size:
|
| 199 |
+
img = img.resize((output_size, output_size), Image.ANTIALIAS)
|
| 200 |
+
|
| 201 |
+
# Return aligned image.
|
| 202 |
+
return img
|
| 203 |
+
|
| 204 |
+
def strip_path_extension(path):
|
| 205 |
+
return os.path.splitext(path)[0]
|