from diffusers import CycleDiffusionPipeline, DDIMScheduler import gradio as gr import torch from PIL import Image import utils is_colab = utils.is_google_colab() scheduler = DDIMScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000, clip_sample=False, set_alpha_to_one=False) model_id_or_path = "CompVis/stable-diffusion-v1-4" pipe = CycleDiffusionPipeline.from_pretrained(model_id_or_path, scheduler=scheduler).to("cuda") if torch.cuda.is_available(): pipe = pipe.to("cuda") device = "GPU 🔥" if torch.cuda.is_available() else "CPU 🥶" def inference(model_name, prompt, guidance, steps, width=512, height=512, seed=0, img=None, strength=0.5, neg_prompt="", inpaint_image=None): global current_model for model in models: if model.name == model_name: current_model = model model_path = current_model.path generator = torch.Generator('cuda').manual_seed(seed) if seed != 0 else None if img is not None: return img_to_img(model_path, prompt, neg_prompt, img, strength, guidance, steps, width, height, generator) else: return txt_to_img(model_path, prompt, neg_prompt, guidance, steps, width, height, generator, inpaint_image) def img_to_img(model_path, prompt, neg_prompt, img, strength, guidance, steps, width, height, generator=None): global last_mode global pipe global current_model_path if model_path != current_model_path or last_mode != "img2img": current_model_path = model_path if is_colab or current_model == custom_model: pipe = StableDiffusionImg2ImgPipeline.from_pretrained(current_model_path, torch_dtype=torch.float16, scheduler=scheduler) else: pipe.to("cpu") pipe = current_model.pipe_i2i if torch.cuda.is_available(): pipe = pipe.to("cuda") last_mode = "img2img" prompt = current_model.prefix + prompt ratio = min(height / img.height, width / img.width) img = img.resize((int(img.width * ratio), int(img.height * ratio)), Image.LANCZOS) result = pipe( prompt, negative_prompt = neg_prompt, # num_images_per_prompt=n_images, init_image = img, num_inference_steps = int(steps), strength = strength, guidance_scale = guidance, width = width, height = height, generator = generator) return replace_nsfw_images(result) def replace_nsfw_images(results): for i in range(len(results.images)): if results.nsfw_content_detected[i]: results.images[i] = Image.open("nsfw.png") return results.images[0] css = """.finetuned-diffusion-div div{display:inline-flex;align-items:center;gap:.8rem;font-size:1.75rem}.finetuned-diffusion-div div h1{font-weight:900;margin-bottom:7px}.finetuned-diffusion-div p{margin-bottom:10px;font-size:94%}.finetuned-diffusion-div p a{text-decoration:underline}.tabs{margin-top:0;margin-bottom:0}#gallery{min-height:20rem} """ with gr.Blocks(css=css) as demo: gr.HTML( f"""

CycleDiffusion with Stable Diffusion

Demo for CycleDiffusion with Stable Diffusion, built with Diffusers 🧨 by HuggingFace 🤗.

You can skip the queue in the colab: Open In Colab

Running on {device}{(" in a Google Colab." if is_colab else "")}

""" ) with gr.Row(): with gr.Column(scale=55): with gr.Group(): with gr.Row(): prompt = gr.Textbox(label="Prompt", show_label=False, max_lines=2,placeholder="Enter prompt. Style applied automatically").style(container=False) generate = gr.Button(value="Generate").style(rounded=(False, True, True, False)) image_out = gr.Image(height=512) # gallery = gr.Gallery( # label="Generated images", show_label=False, elem_id="gallery" # ).style(grid=[1], height="auto") with gr.Column(scale=45): with gr.Tab("Options"): with gr.Group(): neg_prompt = gr.Textbox(label="Negative prompt", placeholder="What to exclude from the image") # n_images = gr.Slider(label="Images", value=1, minimum=1, maximum=4, step=1) with gr.Row(): guidance = gr.Slider(label="Guidance scale", value=7.5, maximum=15) steps = gr.Slider(label="Steps", value=25, minimum=2, maximum=75, step=1) with gr.Row(): width = gr.Slider(label="Width", value=512, minimum=64, maximum=1024, step=8) height = gr.Slider(label="Height", value=512, minimum=64, maximum=1024, step=8) seed = gr.Slider(0, 2147483647, label='Seed (0 = random)', value=0, step=1) with gr.Tab("Image to image"): with gr.Group(): image = gr.Image(label="Image", height=256, tool="editor", type="pil") strength = gr.Slider(label="Transformation strength", minimum=0, maximum=1, step=0.01, value=0.5) with gr.Tab("Inpainting"): inpaint_image = gr.Image(source='upload', tool='sketch', type="pil", label="Upload").style(height=256) model_name.change(lambda x: gr.update(visible = x == models[0].name), inputs=model_name, outputs=custom_model_group) if is_colab: custom_model_path.change(custom_model_changed, inputs=custom_model_path, outputs=None) # n_images.change(lambda n: gr.Gallery().style(grid=[2 if n > 1 else 1], height="auto"), inputs=n_images, outputs=gallery) inputs = [model_name, prompt, guidance, steps, width, height, seed, image, strength, neg_prompt, inpaint_image] prompt.submit(inference, inputs=inputs, outputs=image_out) generate.click(inference, inputs=inputs, outputs=image_out) ex = gr.Examples([ [models[1].name, "jason bateman disassembling the demon core", 7.5, 50], [models[4].name, "portrait of dwayne johnson", 7.0, 75], [models[5].name, "portrait of a beautiful alyx vance half life", 10, 50], [models[6].name, "Aloy from Horizon: Zero Dawn, half body portrait, smooth, detailed armor, beautiful face, illustration", 7.0, 45], [models[5].name, "fantasy portrait painting, digital art", 4.0, 30], ], [model_name, prompt, guidance, steps, seed], image_out, inference, cache_examples=False) gr.Markdown(''' Models by [@nitrosocke](https://huggingface.co/nitrosocke), [@haruu1367](https://twitter.com/haruu1367), [@Helixngc7293](https://twitter.com/DGSpitzer) and others. ❤️
Space by: [![Twitter Follow](https://img.shields.io/twitter/follow/hahahahohohe?label=%40anzorq&style=social)](https://twitter.com/hahahahohohe) ![visitors](https://visitor-badge.glitch.me/badge?page_id=anzorq.finetuned_diffusion) ''') if not is_colab: demo.queue(concurrency_count=1) demo.launch(debug=is_colab, share=is_colab)