File size: 6,143 Bytes
bf173c0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
import tempfile

import gradio as gr
import torch
import torchvision
from PIL import Image
import numpy as np
import imageio
import spaces
from einops import rearrange

# lables
labels_k = [
	'yaw1',
	'yaw2',
	'pitch',
	'roll1',
	'roll2',
	'neck',

	'pout',
	'open->close',
	'"O" Mouth',
	'smile',

	'close->open',
	'eyebrows',
	'eyeballs1',
	'eyeballs2',

]

labels_v = [
	37, 39, 28, 15, 33, 31,
	6, 25, 16, 19,
	13, 24, 17, 26
]


@torch.compiler.allow_in_graph
def load_image(img, size):
	img = Image.open(img).convert('RGB')
	w, h = img.size
	img = img.resize((size, size))
	img = np.asarray(img)
	img = np.copy(img)
	img = np.transpose(img, (2, 0, 1))	# 3 x 256 x 256

	return img / 255.0, w, h


@torch.compiler.allow_in_graph
def img_preprocessing(img_path, size):
	img, w, h = load_image(img_path, size)	# [0, 1]
	img = torch.from_numpy(img).unsqueeze(0).float()  # [0, 1]
	imgs_norm = (img - 0.5) * 2.0  # [-1, 1]

	return imgs_norm, w, h


# Pre-compile resize transforms for better performance
resize_transform_cache = {}

def get_resize_transform(size):
	"""Get cached resize transform - creates once, reuses many times"""
	if size not in resize_transform_cache:
		# Only create the transform if it doesn't exist in cache
		resize_transform_cache[size] = torchvision.transforms.Resize(
			size,
			interpolation=torchvision.transforms.InterpolationMode.BILINEAR,
			antialias=True
		)
	return resize_transform_cache[size]


def resize(img, size):
	"""Use cached resize transform"""
	transform = get_resize_transform((size, size))
	return transform(img)


def resize_back(img, w, h):
	"""Use cached resize transform for back operation"""
	transform = get_resize_transform((h, w))
	return transform(img)


def img_denorm(img):
	img = img.clamp(-1, 1)
	img = (img - img.min()) / (img.max() - img.min())

	return img


def img_postprocessing(img, w, h):

	img = resize_back(img, w, h)
	img = img_denorm(img)
	img = img.squeeze(0).permute(1, 2, 0).contiguous()	# contiguous() for fast transfer
	img_output = (img.cpu().numpy() * 255).astype(np.uint8)

	return img_output	


def img_edit(gen, device):

	@torch.compile
	def compiled_enc_img(image_tensor, selected_s):
		"""Compiled version of just the model inference"""
		return gen.enc_img(image_tensor, labels_v, selected_s)

	@torch.compile
	def compiled_dec_img(z_s2r, alpha_r2s, feat_rgb):
		"""Compiled version of just the model inference"""
		return gen.dec_img(z_s2r, alpha_r2s, feat_rgb)


	# Pre-warm the compiled model with dummy data to reduce first-run compilation time
	def _warmup_model():
		"""Pre-warm the model compilation with representative shapes"""
		print("[img_edit] Pre-warming model compilation...")
		dummy_image = torch.randn(1, 3, 512, 512, device=device)
		dummy_selected_s = [0.0] * len(labels_v)

		try:
			with torch.inference_mode():
				z_s2r, alpha_r2s, feat_rgb = compiled_enc_img(dummy_image, dummy_selected_s)
				_ = compiled_dec_img(z_s2r, alpha_r2s, feat_rgb)
			print("[img_edit] Model pre-warming completed successfully")
		except Exception as e:
			print(f"[img_edit] Model pre-warming failed (will compile on first use): {e}")

	# Pre-warm the model
	_warmup_model()

	@spaces.GPU
	@torch.inference_mode()
	def edit_img(image, *selected_s):

		image_tensor, w, h = img_preprocessing(image, 512)
		image_tensor = image_tensor.to(device)

		z_s2r, alpha_r2s, feat_rgb = compiled_enc_img(image_tensor, selected_s)
		edited_image_tensor = compiled_dec_img(z_s2r, alpha_r2s, feat_rgb)

		# de-norm
		edited_image = img_postprocessing(edited_image_tensor, w, h)

		return edited_image

	def clear_media():
		return None, *([0] * len(labels_k))

	with gr.Tab("Image Editing"):

		inputs_s = []

		with gr.Row():
			with gr.Column(scale=1):
				with gr.Row():
					with gr.Accordion(open=True, label="Image"):
						image_input = gr.Image(type="filepath", width=512)	# , height=550)
						gr.Examples(
							examples=[
								["./data/source/macron.png"],
								["./data/source/einstein.png"],
								["./data/source/taylor.png"],
								["./data/source/portrait1.png"],
								["./data/source/portrait2.png"],
								["./data/source/portrait3.png"],
							],
							inputs=[image_input],
							cache_examples=False,
							visible=True,
							)


				with gr.Row():
					with gr.Column(scale=1):
						with gr.Row():	# Buttons now within a single Row
							#edit_btn = gr.Button("Edit")
							clear_btn = gr.Button("Clear")
						#with gr.Row():
						#	animate_btn = gr.Button("Generate")



			with gr.Column(scale=1):

				with gr.Row():
					with gr.Accordion(open=True, label="Edited Image"):
						image_output = gr.Image(label="Output Image", type='numpy', interactive=False, width=512)


				with gr.Accordion("Control Panel - Using Sliders to Edit Image", open=True):
					with gr.Tab("Head"):
						with gr.Row():
							for k in labels_k[:3]:
								slider = gr.Slider(minimum=-1.0, maximum=0.5, value=0, label=k)
								inputs_s.append(slider)
						with gr.Row():
							for k in labels_k[3:6]:
								slider = gr.Slider(minimum=-0.5, maximum=0.5, value=0, label=k)
								inputs_s.append(slider)

					with gr.Tab("Mouth"):
						with gr.Row():
							for k in labels_k[6:8]:
								slider = gr.Slider(minimum=-0.4, maximum=0.4, value=0, label=k)
								inputs_s.append(slider)
						with gr.Row():
							for k in labels_k[8:10]:
								slider = gr.Slider(minimum=-0.4, maximum=0.4, value=0, label=k)
								inputs_s.append(slider)

					with gr.Tab("Eyes"):
						with gr.Row():
							for k in labels_k[10:12]:
								slider = gr.Slider(minimum=-0.4, maximum=0.4, value=0, label=k)
								inputs_s.append(slider)
						with gr.Row():
							for k in labels_k[12:14]:
								slider = gr.Slider(minimum=-0.2, maximum=0.2, value=0, label=k)
								inputs_s.append(slider)

		for slider in inputs_s:
			slider.change(
			fn=edit_img,
			inputs=[image_input] + inputs_s,
			outputs=[image_output],

			show_progress='hidden',

			trigger_mode='always_last',

			# currently we have a latency around 450ms
			stream_every=0.5
		)

		clear_btn.click(
			fn=clear_media,
			outputs=[image_output] + inputs_s
		)