Spaces:
Running
Running
Yaron Koresh
commited on
Update app.py
Browse files
app.py
CHANGED
|
@@ -148,18 +148,22 @@ def infer(pm):
|
|
| 148 |
|
| 149 |
p1 = pm["p"]
|
| 150 |
name = generate_random_string(12)+".png"
|
|
|
|
| 151 |
|
|
|
|
|
|
|
| 152 |
_do = ['beautiful', 'playful', 'photographed', 'realistic', 'dynamic poze', 'deep field', 'reasonable coloring', 'rough texture', 'best quality', 'focused']
|
| 153 |
if p1 != "":
|
| 154 |
_do.append(f'{p1}')
|
| 155 |
-
posi = " ".join(_do)
|
| 156 |
|
| 157 |
return Piper(name,posi,pm["m"])
|
| 158 |
|
| 159 |
-
def run(m,p1,*result):
|
| 160 |
|
| 161 |
p1_en = translate(p1,"english")
|
| 162 |
-
|
|
|
|
| 163 |
ln = len(result)
|
| 164 |
print("images: "+str(ln))
|
| 165 |
rng = list(range(ln))
|
|
@@ -189,27 +193,29 @@ def main():
|
|
| 189 |
fps=40
|
| 190 |
time=5
|
| 191 |
device = "cuda"
|
| 192 |
-
dtype = torch.
|
| 193 |
result=[]
|
| 194 |
step = 2
|
| 195 |
|
| 196 |
progress=gr.Progress()
|
| 197 |
progress((0, step))
|
| 198 |
|
| 199 |
-
base
|
| 200 |
-
|
| 201 |
-
|
|
|
|
| 202 |
|
| 203 |
-
unet = UNet2DConditionModel.from_config(base, subfolder="unet").to(device
|
| 204 |
-
unet.load_state_dict(load_file(hf_hub_download(repo, ckpt), device=device), strict=False)
|
| 205 |
|
|
|
|
| 206 |
repo = "ByteDance/AnimateDiff-Lightning"
|
| 207 |
ckpt = f"animatediff_lightning_{step}step_diffusers.safetensors"
|
| 208 |
|
| 209 |
adapter = MotionAdapter().to(device, dtype)
|
| 210 |
adapter.load_state_dict(load_file(hf_hub_download(repo ,ckpt), device=device), strict=False)
|
| 211 |
|
| 212 |
-
pipe = AnimateDiffPipeline.from_pretrained(base,
|
| 213 |
pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing", beta_schedule="linear")
|
| 214 |
|
| 215 |
mp.set_start_method("spawn", force=True)
|
|
@@ -222,7 +228,14 @@ def main():
|
|
| 222 |
with gr.Row():
|
| 223 |
prompt = gr.Textbox(
|
| 224 |
elem_id="prompt",
|
| 225 |
-
placeholder="
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 226 |
container=False,
|
| 227 |
max_lines=1
|
| 228 |
)
|
|
@@ -250,8 +263,8 @@ def main():
|
|
| 250 |
result.append(gr.Image(interactive=False,elem_classes="image-container", label="Result", show_label=False, type='filepath', show_share_button=False))
|
| 251 |
|
| 252 |
gr.on(
|
| 253 |
-
triggers=[run_button.click, prompt.submit],
|
| 254 |
-
fn=run,inputs=[motion,prompt,*result],outputs=result
|
| 255 |
)
|
| 256 |
demo.queue().launch()
|
| 257 |
|
|
|
|
| 148 |
|
| 149 |
p1 = pm["p"]
|
| 150 |
name = generate_random_string(12)+".png"
|
| 151 |
+
neg = pm["n"]
|
| 152 |
|
| 153 |
+
if neg != "":
|
| 154 |
+
neg=,f' (((({neg}))))'
|
| 155 |
_do = ['beautiful', 'playful', 'photographed', 'realistic', 'dynamic poze', 'deep field', 'reasonable coloring', 'rough texture', 'best quality', 'focused']
|
| 156 |
if p1 != "":
|
| 157 |
_do.append(f'{p1}')
|
| 158 |
+
posi = " ".join(_do)+neg
|
| 159 |
|
| 160 |
return Piper(name,posi,pm["m"])
|
| 161 |
|
| 162 |
+
def run(m,p1,p2,*result):
|
| 163 |
|
| 164 |
p1_en = translate(p1,"english")
|
| 165 |
+
p2_en = translate(p2,"english")
|
| 166 |
+
pm = {"p":p1_en,"n":p2_en,"m":m}
|
| 167 |
ln = len(result)
|
| 168 |
print("images: "+str(ln))
|
| 169 |
rng = list(range(ln))
|
|
|
|
| 193 |
fps=40
|
| 194 |
time=5
|
| 195 |
device = "cuda"
|
| 196 |
+
dtype = torch.bfloat16
|
| 197 |
result=[]
|
| 198 |
step = 2
|
| 199 |
|
| 200 |
progress=gr.Progress()
|
| 201 |
progress((0, step))
|
| 202 |
|
| 203 |
+
#base="SG161222/Realistic_Vision_V6.0_B1_noVAE"
|
| 204 |
+
#vae="stabilityai/sd-vae-ft-mse-original"
|
| 205 |
+
#repo = "ByteDance/SDXL-Lightning"
|
| 206 |
+
#ckpt = f"sdxl_lightning_{step}step_unet.safetensors"
|
| 207 |
|
| 208 |
+
#unet = UNet2DConditionModel.from_config(base, subfolder="unet").to(device)
|
| 209 |
+
#unet.load_state_dict(load_file(hf_hub_download(repo, ckpt), device=device), strict=False)
|
| 210 |
|
| 211 |
+
base = "emilianJR/epiCRealism"
|
| 212 |
repo = "ByteDance/AnimateDiff-Lightning"
|
| 213 |
ckpt = f"animatediff_lightning_{step}step_diffusers.safetensors"
|
| 214 |
|
| 215 |
adapter = MotionAdapter().to(device, dtype)
|
| 216 |
adapter.load_state_dict(load_file(hf_hub_download(repo ,ckpt), device=device), strict=False)
|
| 217 |
|
| 218 |
+
pipe = AnimateDiffPipeline.from_pretrained(base, motion_adapter=adapter, torch_dtype=dtype, variant="fp16").to(dtype=dtype)
|
| 219 |
pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing", beta_schedule="linear")
|
| 220 |
|
| 221 |
mp.set_start_method("spawn", force=True)
|
|
|
|
| 228 |
with gr.Row():
|
| 229 |
prompt = gr.Textbox(
|
| 230 |
elem_id="prompt",
|
| 231 |
+
placeholder="INCLUDE",
|
| 232 |
+
container=False,
|
| 233 |
+
max_lines=1
|
| 234 |
+
)
|
| 235 |
+
with gr.Row():
|
| 236 |
+
prompt2 = gr.Textbox(
|
| 237 |
+
elem_id="prompt",
|
| 238 |
+
placeholder="EXCLUDE",
|
| 239 |
container=False,
|
| 240 |
max_lines=1
|
| 241 |
)
|
|
|
|
| 263 |
result.append(gr.Image(interactive=False,elem_classes="image-container", label="Result", show_label=False, type='filepath', show_share_button=False))
|
| 264 |
|
| 265 |
gr.on(
|
| 266 |
+
triggers=[run_button.click, prompt.submit, prompt2.submit],
|
| 267 |
+
fn=run,inputs=[motion,prompt,prompt2,*result],outputs=result
|
| 268 |
)
|
| 269 |
demo.queue().launch()
|
| 270 |
|