Spaces:
Runtime error
Runtime error
last/first frame output, reordering some options, examples
Browse files- .gitattributes +1 -0
- app.py +71 -32
- example_input.png +0 -0
.gitattributes
CHANGED
|
@@ -1,5 +1,6 @@
|
|
| 1 |
*.webp filter=lfs diff=lfs merge=lfs -text
|
| 2 |
*.gif filter=lfs diff=lfs merge=lfs -text
|
|
|
|
| 3 |
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 4 |
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 5 |
*.bin filter=lfs diff=lfs merge=lfs -text
|
|
|
|
| 1 |
*.webp filter=lfs diff=lfs merge=lfs -text
|
| 2 |
*.gif filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.png filter=lfs diff=lfs merge=lfs -text
|
| 4 |
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 5 |
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 6 |
*.bin filter=lfs diff=lfs merge=lfs -text
|
app.py
CHANGED
|
@@ -1,5 +1,6 @@
|
|
| 1 |
|
| 2 |
import os
|
|
|
|
| 3 |
from io import BytesIO
|
| 4 |
import base64
|
| 5 |
from functools import partial
|
|
@@ -16,8 +17,6 @@ from makeavid_sd.inference import (
|
|
| 16 |
print(os.environ.get('XLA_PYTHON_CLIENT_PREALLOCATE', 'NotSet'))
|
| 17 |
print(os.environ.get('XLA_PYTHON_CLIENT_ALLOCATOR', 'NotSet'))
|
| 18 |
|
| 19 |
-
_preheat: bool = False
|
| 20 |
-
|
| 21 |
_seen_compilations = set()
|
| 22 |
|
| 23 |
_model = InferenceUNetPseudo3D(
|
|
@@ -30,9 +29,20 @@ if _model.failed != False:
|
|
| 30 |
trace = f'```{_model.failed}```'
|
| 31 |
with gr.Blocks(title = 'Make-A-Video Stable Diffusion JAX', analytics_enabled = False) as demo:
|
| 32 |
exception = gr.Markdown(trace)
|
| 33 |
-
|
| 34 |
demo.launch()
|
| 35 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 36 |
_output_formats = (
|
| 37 |
'webp', 'gif'
|
| 38 |
)
|
|
@@ -100,10 +110,17 @@ def generate(
|
|
| 100 |
duration = round(1000 / fps),
|
| 101 |
allow_mixed = True
|
| 102 |
)
|
| 103 |
-
data = base64.b64encode(buffer.getvalue()).decode()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 104 |
buffer.close()
|
| 105 |
-
|
| 106 |
-
return data
|
| 107 |
|
| 108 |
def check_if_compiled(hint_image, inference_steps, height, width, num_frames, scheduler_type, message):
|
| 109 |
height = int(height)
|
|
@@ -116,27 +133,6 @@ def check_if_compiled(hint_image, inference_steps, height, width, num_frames, sc
|
|
| 116 |
else:
|
| 117 |
return f"""{message}"""
|
| 118 |
|
| 119 |
-
if _preheat:
|
| 120 |
-
print('\npreheating the oven')
|
| 121 |
-
generate(
|
| 122 |
-
prompt = 'preheating the oven',
|
| 123 |
-
neg_prompt = '',
|
| 124 |
-
image = None,
|
| 125 |
-
inference_steps = 20,
|
| 126 |
-
cfg = 12.0,
|
| 127 |
-
seed = 0
|
| 128 |
-
)
|
| 129 |
-
print('Entertaining the guests with sailor songs played on an old piano.')
|
| 130 |
-
dada = generate(
|
| 131 |
-
prompt = 'Entertaining the guests with sailor songs played on an old harmonium.',
|
| 132 |
-
neg_prompt = '',
|
| 133 |
-
image = Image.new('RGB', size = (512, 512), color = (0, 0, 0)),
|
| 134 |
-
inference_steps = 20,
|
| 135 |
-
cfg = 12.0,
|
| 136 |
-
seed = 0
|
| 137 |
-
)
|
| 138 |
-
print('dinner is ready\n')
|
| 139 |
-
|
| 140 |
with gr.Blocks(title = 'Make-A-Video Stable Diffusion JAX', analytics_enabled = False) as demo:
|
| 141 |
variant = 'panel'
|
| 142 |
with gr.Row():
|
|
@@ -283,6 +279,52 @@ with gr.Blocks(title = 'Make-A-Video Stable Diffusion JAX', analytics_enabled =
|
|
| 283 |
value = 'example.gif',
|
| 284 |
interactive = False
|
| 285 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 286 |
#trigger_inputs = [ image_input, inference_steps_input, height_input, width_input, num_frames_input, scheduler_input ]
|
| 287 |
#trigger_check_fun = partial(check_if_compiled, message = 'Current parameters need compilation.')
|
| 288 |
#height_input.change(fn = trigger_check_fun, inputs = trigger_inputs, outputs = will_trigger)
|
|
@@ -308,14 +350,11 @@ with gr.Blocks(title = 'Make-A-Video Stable Diffusion JAX', analytics_enabled =
|
|
| 308 |
scheduler_input,
|
| 309 |
output_format
|
| 310 |
],
|
| 311 |
-
outputs = image_output,
|
| 312 |
postprocess = False
|
| 313 |
)
|
| 314 |
#cancel_button.click(fn = lambda: None, cancels = ev)
|
| 315 |
|
| 316 |
-
demo.queue(concurrency_count = 1, max_size =
|
| 317 |
demo.launch()
|
| 318 |
|
| 319 |
-
# Photorealistic fantasy oil painting of the angry minotaur in a threatening pose by Randy Vargas.
|
| 320 |
-
# A girl is dancing by a beautiful lake by sophie anderson and greg rutkowski and alphonse mucha.
|
| 321 |
-
# They are dancing in the club but everybody is a 3d cg hairy monster wearing a hairy costume.
|
|
|
|
| 1 |
|
| 2 |
import os
|
| 3 |
+
import json
|
| 4 |
from io import BytesIO
|
| 5 |
import base64
|
| 6 |
from functools import partial
|
|
|
|
| 17 |
print(os.environ.get('XLA_PYTHON_CLIENT_PREALLOCATE', 'NotSet'))
|
| 18 |
print(os.environ.get('XLA_PYTHON_CLIENT_ALLOCATOR', 'NotSet'))
|
| 19 |
|
|
|
|
|
|
|
| 20 |
_seen_compilations = set()
|
| 21 |
|
| 22 |
_model = InferenceUNetPseudo3D(
|
|
|
|
| 29 |
trace = f'```{_model.failed}```'
|
| 30 |
with gr.Blocks(title = 'Make-A-Video Stable Diffusion JAX', analytics_enabled = False) as demo:
|
| 31 |
exception = gr.Markdown(trace)
|
|
|
|
| 32 |
demo.launch()
|
| 33 |
|
| 34 |
+
_examples = []
|
| 35 |
+
_expath = 'examples'
|
| 36 |
+
for x in os.listdir(_expath):
|
| 37 |
+
with open(os.path.join(_expath, x, 'params.json'), 'r') as f:
|
| 38 |
+
ex = json.load(f)
|
| 39 |
+
ex['image_input'] = None
|
| 40 |
+
if os.path.isfile(os.path.join(_expath, x, 'input.png')):
|
| 41 |
+
ex['image_input'] = os.path.join(_expath, x, 'input.png')
|
| 42 |
+
ex['image_output'] = os.path.join(_expath, x, 'output.gif')
|
| 43 |
+
_examples.append(ex)
|
| 44 |
+
|
| 45 |
+
|
| 46 |
_output_formats = (
|
| 47 |
'webp', 'gif'
|
| 48 |
)
|
|
|
|
| 110 |
duration = round(1000 / fps),
|
| 111 |
allow_mixed = True
|
| 112 |
)
|
| 113 |
+
data = f'data:image/{output_format};base64,' + base64.b64encode(buffer.getvalue()).decode()
|
| 114 |
+
buffer.close()
|
| 115 |
+
buffer = BytesIO()
|
| 116 |
+
images[-1].save(buffer, format ='png')
|
| 117 |
+
last_data = f'data:image/png;base64,' + base64.b64encode(buffer.getvalue()).decode()
|
| 118 |
+
buffer.close()
|
| 119 |
+
buffer = BytesIO()
|
| 120 |
+
images[0].save(buffer, format ='png')
|
| 121 |
+
first_data = f'data:image/png;base64,' + base64.b64encode(buffer.getvalue()).decode()
|
| 122 |
buffer.close()
|
| 123 |
+
return data, last_data, first_data
|
|
|
|
| 124 |
|
| 125 |
def check_if_compiled(hint_image, inference_steps, height, width, num_frames, scheduler_type, message):
|
| 126 |
height = int(height)
|
|
|
|
| 133 |
else:
|
| 134 |
return f"""{message}"""
|
| 135 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 136 |
with gr.Blocks(title = 'Make-A-Video Stable Diffusion JAX', analytics_enabled = False) as demo:
|
| 137 |
variant = 'panel'
|
| 138 |
with gr.Row():
|
|
|
|
| 279 |
value = 'example.gif',
|
| 280 |
interactive = False
|
| 281 |
)
|
| 282 |
+
tips = gr.Markdown('🤫 *Secret tip*: take the last frame as input for the next generation.')
|
| 283 |
+
with gr.Row():
|
| 284 |
+
last_frame_output = gr.Image(
|
| 285 |
+
label = 'Last frame',
|
| 286 |
+
interactive = False
|
| 287 |
+
)
|
| 288 |
+
first_frame_output = gr.Image(
|
| 289 |
+
label = 'First frame',
|
| 290 |
+
interactive = False
|
| 291 |
+
)
|
| 292 |
+
examples_lst = []
|
| 293 |
+
for x in _examples:
|
| 294 |
+
examples_lst.append([
|
| 295 |
+
x['image_output'],
|
| 296 |
+
x['prompt'],
|
| 297 |
+
x['neg_prompt'],
|
| 298 |
+
x['image_input'],
|
| 299 |
+
x['cfg'],
|
| 300 |
+
x['cfg_image'],
|
| 301 |
+
x['seed'],
|
| 302 |
+
x['fps'],
|
| 303 |
+
x['num_frames'],
|
| 304 |
+
x['height'],
|
| 305 |
+
x['width'],
|
| 306 |
+
x['scheduler'],
|
| 307 |
+
x['format']
|
| 308 |
+
])
|
| 309 |
+
examples = gr.Examples(
|
| 310 |
+
examples = examples_lst,
|
| 311 |
+
inputs = [
|
| 312 |
+
image_output,
|
| 313 |
+
prompt_input,
|
| 314 |
+
neg_prompt_input,
|
| 315 |
+
image_input,
|
| 316 |
+
cfg_input,
|
| 317 |
+
cfg_image_input,
|
| 318 |
+
seed_input,
|
| 319 |
+
fps_input,
|
| 320 |
+
num_frames_input,
|
| 321 |
+
height_input,
|
| 322 |
+
width_input,
|
| 323 |
+
scheduler_input,
|
| 324 |
+
output_format
|
| 325 |
+
],
|
| 326 |
+
postprocess = False
|
| 327 |
+
)
|
| 328 |
#trigger_inputs = [ image_input, inference_steps_input, height_input, width_input, num_frames_input, scheduler_input ]
|
| 329 |
#trigger_check_fun = partial(check_if_compiled, message = 'Current parameters need compilation.')
|
| 330 |
#height_input.change(fn = trigger_check_fun, inputs = trigger_inputs, outputs = will_trigger)
|
|
|
|
| 350 |
scheduler_input,
|
| 351 |
output_format
|
| 352 |
],
|
| 353 |
+
outputs = [ image_output, last_frame_output, first_frame_output ],
|
| 354 |
postprocess = False
|
| 355 |
)
|
| 356 |
#cancel_button.click(fn = lambda: None, cancels = ev)
|
| 357 |
|
| 358 |
+
demo.queue(concurrency_count = 1, max_size = 10)
|
| 359 |
demo.launch()
|
| 360 |
|
|
|
|
|
|
|
|
|
example_input.png
DELETED
|
Binary file (196 kB)
|
|
|