Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -74,7 +74,6 @@ def find_best_bucket(h, w, options):
|
|
74 |
return best_bucket
|
75 |
|
76 |
@spaces.GPU()
|
77 |
-
@torch.inference_mode()
|
78 |
def encode_cropped_prompt_77tokens(txt: str):
|
79 |
memory_management.load_models_to_gpu(text_encoder)
|
80 |
cond_ids = tokenizer(txt,
|
@@ -86,7 +85,6 @@ def encode_cropped_prompt_77tokens(txt: str):
|
|
86 |
return text_cond
|
87 |
|
88 |
@spaces.GPU()
|
89 |
-
@torch.inference_mode()
|
90 |
def pytorch2numpy(imgs):
|
91 |
results = []
|
92 |
for x in imgs:
|
@@ -97,7 +95,6 @@ def pytorch2numpy(imgs):
|
|
97 |
return results
|
98 |
|
99 |
@spaces.GPU()
|
100 |
-
@torch.inference_mode()
|
101 |
def numpy2pytorch(imgs):
|
102 |
h = torch.from_numpy(np.stack(imgs, axis=0)).float() / 127.5 - 1.0
|
103 |
h = h.movedim(-1, 1)
|
@@ -110,12 +107,10 @@ def resize_without_crop(image, target_width, target_height):
|
|
110 |
return np.array(resized_image)
|
111 |
|
112 |
@spaces.GPU()
|
113 |
-
@torch.inference_mode()
|
114 |
def interrogator_process(x):
|
115 |
return wd14tagger.default_interrogator(x)
|
116 |
|
117 |
@spaces.GPU()
|
118 |
-
@torch.inference_mode()
|
119 |
def process(input_fg, prompt, input_undo_steps, image_width, image_height, seed, steps, n_prompt, cfg,
|
120 |
progress=gr.Progress()):
|
121 |
rng = torch.Generator(device=memory_management.gpu).manual_seed(int(seed))
|
@@ -155,7 +150,6 @@ def process(input_fg, prompt, input_undo_steps, image_width, image_height, seed,
|
|
155 |
return pixels
|
156 |
|
157 |
@spaces.GPU()
|
158 |
-
@torch.inference_mode()
|
159 |
def process_video_inner(image_1, image_2, prompt, seed=123, steps=25, cfg_scale=7.5, fs=3, progress_tqdm=None):
|
160 |
random.seed(seed)
|
161 |
np.random.seed(seed)
|
@@ -211,7 +205,6 @@ def process_video_inner(image_1, image_2, prompt, seed=123, steps=25, cfg_scale=
|
|
211 |
return video, image_1, image_2
|
212 |
|
213 |
@spaces.GPU
|
214 |
-
@torch.inference_mode()
|
215 |
def process_video(keyframes, prompt, steps, cfg, fps, seed, progress=gr.Progress()):
|
216 |
result_frames = []
|
217 |
cropped_images = []
|
|
|
74 |
return best_bucket
|
75 |
|
76 |
@spaces.GPU()
|
|
|
77 |
def encode_cropped_prompt_77tokens(txt: str):
|
78 |
memory_management.load_models_to_gpu(text_encoder)
|
79 |
cond_ids = tokenizer(txt,
|
|
|
85 |
return text_cond
|
86 |
|
87 |
@spaces.GPU()
|
|
|
88 |
def pytorch2numpy(imgs):
|
89 |
results = []
|
90 |
for x in imgs:
|
|
|
95 |
return results
|
96 |
|
97 |
@spaces.GPU()
|
|
|
98 |
def numpy2pytorch(imgs):
|
99 |
h = torch.from_numpy(np.stack(imgs, axis=0)).float() / 127.5 - 1.0
|
100 |
h = h.movedim(-1, 1)
|
|
|
107 |
return np.array(resized_image)
|
108 |
|
109 |
@spaces.GPU()
|
|
|
110 |
def interrogator_process(x):
|
111 |
return wd14tagger.default_interrogator(x)
|
112 |
|
113 |
@spaces.GPU()
|
|
|
114 |
def process(input_fg, prompt, input_undo_steps, image_width, image_height, seed, steps, n_prompt, cfg,
|
115 |
progress=gr.Progress()):
|
116 |
rng = torch.Generator(device=memory_management.gpu).manual_seed(int(seed))
|
|
|
150 |
return pixels
|
151 |
|
152 |
@spaces.GPU()
|
|
|
153 |
def process_video_inner(image_1, image_2, prompt, seed=123, steps=25, cfg_scale=7.5, fs=3, progress_tqdm=None):
|
154 |
random.seed(seed)
|
155 |
np.random.seed(seed)
|
|
|
205 |
return video, image_1, image_2
|
206 |
|
207 |
@spaces.GPU
|
|
|
208 |
def process_video(keyframes, prompt, steps, cfg, fps, seed, progress=gr.Progress()):
|
209 |
result_frames = []
|
210 |
cropped_images = []
|