Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from diffusers import StableDiffusionControlNetPipeline, ControlNetModel | |
| import torch | |
| model_id = "runwayml/stable-diffusion-v1-5" | |
| controlnet_id = "lllyasviel/control_v11p_sd15_openpose" | |
| controlnet = ControlNetModel.from_pretrained(controlnet_id, torch_dtype=torch.float32) | |
| pipe = StableDiffusionControlNetPipeline.from_pretrained( | |
| model_id, | |
| controlnet=controlnet, | |
| safety_checker=None, | |
| torch_dtype=torch.float32 | |
| ) | |
| pipe = pipe.to("cpu") | |
| pipe.enable_attention_slicing() | |
| def generate_image(prompt, control_image, num_inference_steps=25, guidance_scale=7.5, controlnet_conditioning_scale=1.0): | |
| """ | |
| Generate an image using the ControlNet pipeline. | |
| Args: | |
| prompt (str): Your text prompt for image generation. | |
| control_image (PIL.Image): A control image to guide generation. | |
| num_inference_steps (int): Number of denoising steps. | |
| guidance_scale (float): Classifier-free guidance scale. | |
| controlnet_conditioning_scale (float): How strongly to condition on the control image. | |
| Returns: | |
| PIL.Image: The generated image. | |
| """ | |
| result = pipe( | |
| prompt=prompt, | |
| image=control_image, | |
| num_inference_steps=num_inference_steps, | |
| guidance_scale=guidance_scale, | |
| controlnet_conditioning_scale=controlnet_conditioning_scale | |
| ) | |
| return result.images[0] | |
| with gr.Blocks() as demo: | |
| gr.Markdown("# ControlNet Image Generator on CPU\nThis demo uses a ControlNet pipeline (openpose variant) with Stable Diffusion to generate images guided by a control image. Note: Running on CPU can be slow!") | |
| with gr.Row(): | |
| prompt_input = gr.Textbox(label="Prompt", placeholder="Enter your image prompt here", value="A futuristic cityscape at dusk") | |
| with gr.Row(): | |
| control_image_input = gr.Image(label="Control Image", type="pil") | |
| output_image = gr.Image(label="Generated Image", type="pil") | |
| with gr.Row(): | |
| num_steps = gr.Slider(minimum=10, maximum=50, value=25, step=1, label="Inference Steps") | |
| guidance = gr.Slider(minimum=1.0, maximum=15.0, value=7.5, step=0.5, label="Guidance Scale") | |
| control_scale = gr.Slider(minimum=0.1, maximum=2.0, value=1.0, step=0.1, label="ControlNet Conditioning Scale") | |
| generate_btn = gr.Button("Generate Image") | |
| generate_btn.click( | |
| fn=generate_image, | |
| inputs=[prompt_input, control_image_input, num_steps, guidance, control_scale], | |
| outputs=output_image | |
| ) | |
| demo.launch() | |