Spaces:
Configuration error
Configuration error
add uuid for animation temp folders
Browse files- ImageState.py +9 -3
- app.py +4 -1
ImageState.py
CHANGED
|
@@ -41,8 +41,8 @@ class ImageState:
|
|
| 41 |
self.transform_history = []
|
| 42 |
self.attn_mask = None
|
| 43 |
self.prompt_optim = prompt_optimizer
|
| 44 |
-
self.state_id =
|
| 45 |
-
print("NEW INSTANCE")
|
| 46 |
print(self.state_id)
|
| 47 |
self._load_vectors()
|
| 48 |
self.init_transforms()
|
|
@@ -122,6 +122,9 @@ class ImageState:
|
|
| 122 |
def _render_all_transformations(self, return_twice=True):
|
| 123 |
global num
|
| 124 |
# global vqgan
|
|
|
|
|
|
|
|
|
|
| 125 |
current_vector_transforms = (self.blue_eyes, self.lip_size, self.hair_gp, self.asian_transform, sum(self.current_prompt_transforms))
|
| 126 |
new_latent = self.blend_latent + sum(current_vector_transforms)
|
| 127 |
if self.quant:
|
|
@@ -159,7 +162,8 @@ class ImageState:
|
|
| 159 |
if path1 is None: path1 = path2
|
| 160 |
if path2 is None: path2 = path1
|
| 161 |
self.path1, self.path2 = path1, path2
|
| 162 |
-
|
|
|
|
| 163 |
return self.blend(blend_weight)
|
| 164 |
@torch.no_grad()
|
| 165 |
def blend(self, weight):
|
|
@@ -182,6 +186,8 @@ class ImageState:
|
|
| 182 |
# rep[mask >= 0.03] = 1
|
| 183 |
# return rep
|
| 184 |
def apply_prompts(self, positive_prompts, negative_prompts, lr, iterations, lpips_weight, reconstruction_steps):
|
|
|
|
|
|
|
| 185 |
transform_log = PromptTransformHistory(iterations + reconstruction_steps)
|
| 186 |
transform_log.transforms.append(torch.zeros_like(self.blend_latent, requires_grad=False))
|
| 187 |
self.current_prompt_transforms.append(torch.zeros_like(self.blend_latent, requires_grad=False))
|
|
|
|
| 41 |
self.transform_history = []
|
| 42 |
self.attn_mask = None
|
| 43 |
self.prompt_optim = prompt_optimizer
|
| 44 |
+
self.state_id = None
|
| 45 |
+
# print("NEW INSTANCE")
|
| 46 |
print(self.state_id)
|
| 47 |
self._load_vectors()
|
| 48 |
self.init_transforms()
|
|
|
|
| 122 |
def _render_all_transformations(self, return_twice=True):
|
| 123 |
global num
|
| 124 |
# global vqgan
|
| 125 |
+
if self.state_id is None:
|
| 126 |
+
self.state_id = str(uuid.uuid4())
|
| 127 |
+
print("redner all", self.state_id)
|
| 128 |
current_vector_transforms = (self.blue_eyes, self.lip_size, self.hair_gp, self.asian_transform, sum(self.current_prompt_transforms))
|
| 129 |
new_latent = self.blend_latent + sum(current_vector_transforms)
|
| 130 |
if self.quant:
|
|
|
|
| 162 |
if path1 is None: path1 = path2
|
| 163 |
if path2 is None: path2 = path1
|
| 164 |
self.path1, self.path2 = path1, path2
|
| 165 |
+
if self.state_id:
|
| 166 |
+
clear_img_dir(self.state_id)
|
| 167 |
return self.blend(blend_weight)
|
| 168 |
@torch.no_grad()
|
| 169 |
def blend(self, weight):
|
|
|
|
| 186 |
# rep[mask >= 0.03] = 1
|
| 187 |
# return rep
|
| 188 |
def apply_prompts(self, positive_prompts, negative_prompts, lr, iterations, lpips_weight, reconstruction_steps):
|
| 189 |
+
if self.state_id is None:
|
| 190 |
+
self.state_id = "./" + str(uuid.uuid4())
|
| 191 |
transform_log = PromptTransformHistory(iterations + reconstruction_steps)
|
| 192 |
transform_log.transforms.append(torch.zeros_like(self.blend_latent, requires_grad=False))
|
| 193 |
self.current_prompt_transforms.append(torch.zeros_like(self.blend_latent, requires_grad=False))
|
app.py
CHANGED
|
@@ -8,6 +8,9 @@ import torch
|
|
| 8 |
from configs import set_major_global, set_major_local, set_preset, set_small_local
|
| 9 |
import uuid
|
| 10 |
# print()'
|
|
|
|
|
|
|
|
|
|
| 11 |
sys.path.append("taming-transformers")
|
| 12 |
|
| 13 |
import gradio as gr
|
|
@@ -71,7 +74,7 @@ class StateWrapper:
|
|
| 71 |
def update_requant(state, *args, **kwargs):
|
| 72 |
return state, *state[0].update_requant(*args, **kwargs)
|
| 73 |
with gr.Blocks(css="styles.css") as demo:
|
| 74 |
-
id = gr.State(str(uuid.uuid4()))
|
| 75 |
state = gr.State([ImageState(vqgan, promptoptim), str(uuid.uuid4())])
|
| 76 |
with gr.Row():
|
| 77 |
with gr.Column(scale=1):
|
|
|
|
| 8 |
from configs import set_major_global, set_major_local, set_preset, set_small_local
|
| 9 |
import uuid
|
| 10 |
# print()'
|
| 11 |
+
import torch
|
| 12 |
+
import torchvision.models as models
|
| 13 |
+
from torch.profiler import profile, record_function, ProfilerActivity
|
| 14 |
sys.path.append("taming-transformers")
|
| 15 |
|
| 16 |
import gradio as gr
|
|
|
|
| 74 |
def update_requant(state, *args, **kwargs):
|
| 75 |
return state, *state[0].update_requant(*args, **kwargs)
|
| 76 |
with gr.Blocks(css="styles.css") as demo:
|
| 77 |
+
# id = gr.State(str(uuid.uuid4()))
|
| 78 |
state = gr.State([ImageState(vqgan, promptoptim), str(uuid.uuid4())])
|
| 79 |
with gr.Row():
|
| 80 |
with gr.Column(scale=1):
|