Spaces:
Running
on
Zero
Running
on
Zero
import spaces | |
import torch | |
import gradio as gr | |
from modeling.dmm_pipeline import StableDiffusionDMMPipeline | |
from huggingface_hub import snapshot_download | |
ckpt_path = "ckpt" | |
snapshot_download(repo_id="MCG-NJU/DMM", local_dir=ckpt_path) | |
pipe = StableDiffusionDMMPipeline.from_pretrained( | |
ckpt_path, | |
torch_dtype=torch.float16, | |
use_safetensors=True | |
) | |
pipe.to("cuda") | |
def generate(prompt: str, | |
negative_prompt: str, | |
model_id: int, | |
seed: int = 1234, | |
all: bool = True): | |
if all: | |
outputs = [] | |
for i in range(pipe.unet.get_num_models()): | |
output = pipe( | |
prompt=prompt, | |
negative_prompt=negative_prompt, | |
width=512, | |
height=512, | |
num_inference_steps=25, | |
guidance_scale=7, | |
model_id=i, | |
generator=torch.Generator().manual_seed(seed), | |
).images[0] | |
outputs.append(output) | |
return outputs | |
else: | |
output = pipe( | |
prompt=prompt, | |
negative_prompt=negative_prompt, | |
width=512, | |
height=512, | |
num_inference_steps=25, | |
guidance_scale=7, | |
model_id=int(model_id), | |
generator=torch.Generator().manual_seed(seed), | |
).images[0] | |
return [output,] | |
def main(): | |
with gr.Blocks() as demo: | |
gr.Markdown("# DMM") | |
with gr.Row(): | |
with gr.Column(): | |
prompt = gr.Textbox("portrait photo of a girl, long golden hair, flowers, best quality", label="Prompt") | |
negative_prompt = gr.Textbox("worst quality,low quality,normal quality,lowres,watermark,nsfw", label="Negative Prompt") | |
seed = gr.Number(1234, label="Seed", precision=0) | |
with gr.Column(): | |
model_id = gr.Slider(label="Model Index", minimum=0, maximum=7, step=1) | |
all_check = gr.Checkbox(label="All") | |
btn = gr.Button("Submit", variant="primary") | |
output = gr.Gallery(label="images") | |
btn.click(generate, | |
inputs=[prompt, negative_prompt, model_id, seed, all_check], | |
outputs=[output]) | |
demo.launch() | |
if __name__ == "__main__": | |
main() | |