Spaces:
Runtime error
Runtime error
import os | |
import random | |
import uuid | |
from typing import Tuple | |
import gradio as gr | |
import numpy as np | |
from PIL import Image | |
import spaces | |
import torch | |
from diffusers import StableDiffusionXLPipeline, EulerAncestralDiscreteScheduler | |
from huggingface_hub import login | |
# Log in to Hugging Face using the provided token | |
hf_token = os.getenv("HF_TOKEN") | |
login(hf_token) | |
DESCRIPTIONz = """## STABLE IMAGINE 🍺""" | |
def save_image(img): | |
unique_name = str(uuid.uuid4()) + ".png" | |
img.save(unique_name) | |
return unique_name | |
def randomize_seed_fn(seed: int, randomize_seed: bool) -> int: | |
if randomize_seed: | |
seed = random.randint(0, MAX_SEED) | |
return seed | |
MAX_SEED = np.iinfo(np.int32).max | |
DESCRIPTIONz = "" | |
if not torch.cuda.is_available(): | |
DESCRIPTIONz += """ | |
<p>⚠️Running on CPU, This may not work on CPU. If it runs for an extended time or if you encounter errors, try running it on a GPU by duplicating the space using @spaces.GPU(). 📍</p> | |
""" | |
USE_TORCH_COMPILE = 0 | |
ENABLE_CPU_OFFLOAD = 0 | |
if torch.cuda.is_available(): | |
pipe = StableDiffusionXLPipeline.from_pretrained( | |
"SG161222/RealVisXL_V4.0_Lightning", | |
torch_dtype=torch.float16, | |
use_safetensors=True, | |
) | |
pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config) | |
pipe.to("cuda") | |
if USE_TORCH_COMPILE: | |
pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True) | |
else: | |
# If CUDA is not available, fall back to CPU (not ideal for SDXL) | |
pipe = StableDiffusionXLPipeline.from_pretrained( | |
"SG161222/RealVisXL_V4.0_Lightning", | |
torch_dtype=torch.float32, # safer for CPU | |
use_safetensors=True, | |
) | |
pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config) | |
pipe.to("cpu") | |
if ENABLE_CPU_OFFLOAD: | |
# Optionally offload to CPU with accelerate or similar, if set up | |
pipe.enable_model_cpu_offload() | |
LORA_OPTIONS = { | |
"Realism (face/character)👦🏻": ("prithivMLmods/Canopus-Realism-LoRA", "Canopus-Realism-LoRA.safetensors", "rlms"), | |
"Pixar (art/toons)🙀": ("prithivMLmods/Canopus-Pixar-Art", "Canopus-Pixar-Art.safetensors", "pixar"), | |
"Interior Architecture (house/hotel)🏠": ("prithivMLmods/Canopus-Interior-Architecture-0.1", "Canopus-Interior-Architecture-0.1δ.safetensors", "arch"), | |
"Fashion Product (wearing/usable)👜": ("prithivMLmods/Canopus-Fashion-Product-Dilation", "Canopus-Fashion-Product-Dilation.safetensors", "fashion"), | |
"Minimalistic Image (minimal/detailed)🏞️": ("prithivMLmods/Pegasi-Minimalist-Image-Style", "Pegasi-Minimalist-Image-Style.safetensors", "minimalist"), | |
"Modern Clothing (trend/new)👕": ("prithivMLmods/Canopus-Modern-Clothing-Design", "Canopus-Modern-Clothing-Design.safetensors", "mdrnclth"), | |
"Animaliea (farm/wild)🫎": ("prithivMLmods/Canopus-Animaliea-Artism", "Canopus-Animaliea-Artism.safetensors", "Animaliea"), | |
"Canes Cars (realistic/futurecars)🚘": ("prithivMLmods/Canes-Cars-Model-LoRA", "Canes-Cars-Model-LoRA.safetensors", "car"), | |
"Art Minimalistic (paint/semireal)🎨": ("prithivMLmods/Canopus-Art-Medium-LoRA", "Canopus-Art-Medium-LoRA.safetensors", "mdm"), | |
} | |
for model_name, weight_name, adapter_name in LORA_OPTIONS.values(): | |
pipe.load_lora_weights(model_name, weight_name=weight_name, adapter_name=adapter_name) | |
pipe.to("cuda") | |
style_list = [ | |
{ | |
"name": "3840 x 2160", | |
"prompt": "hyper-realistic 8K image of {prompt}. ultra-detailed, lifelike, high-resolution, sharp, vibrant colors, photorealistic", | |
"negative_prompt": "cartoonish, low resolution, blurry, simplistic, abstract, deformed, ugly", | |
}, | |
# Add more style dicts here if needed | |
] | |
styles = {k["name"]: (k["prompt"], k["negative_prompt"]) for k in style_list} | |
def apply_style(style_name: str, positive: str, negative: str = "") -> Tuple[str, str]: | |
if style_name in styles: | |
p, n = styles.get(style_name, styles[DEFAULT_STYLE_NAME]) | |
else: | |
p, n = styles[DEFAULT_STYLE_NAME] | |
if not negative: | |
negative = "" | |
return p.replace("{prompt}", positive), n + negative | |
DEFAULT_STYLE_NAME = "3840 x 2160" | |
def generate( | |
prompt: str, | |
negative_prompt: str = "", | |
use_negative_prompt: bool = False, | |
seed: int = 0, | |
width: int = 1024, | |
height: int = 1024, | |
guidance_scale: float = 3, | |
randomize_seed: bool = False, | |
style_name: str = DEFAULT_STYLE_NAME, | |
lora_model: str = "Realism (face/character)👦🏻", | |
progress=gr.Progress(track_tqdm=True), | |
): | |
seed = int(randomize_seed_fn(seed, randomize_seed)) | |
positive_prompt, effective_negative_prompt = apply_style(style_name, prompt, negative_prompt) | |
if not use_negative_prompt: | |
effective_negative_prompt = "" # type: ignore | |
model_name, weight_name, adapter_name = LORA_OPTIONS[lora_model] | |
pipe.set_adapters(adapter_name) | |
images = pipe( | |
prompt=positive_prompt, | |
negative_prompt=effective_negative_prompt, | |
width=width, | |
height=height, | |
guidance_scale=guidance_scale, | |
num_inference_steps=20, | |
num_images_per_prompt=1, | |
cross_attention_kwargs={"scale": 0.65}, | |
output_type="pil", | |
).images | |
image_paths = [save_image(img) for img in images] | |
return image_paths, seed | |
with gr.Blocks() as demo: | |
gr.Markdown(DESCRIPTIONz) | |
with gr.Row(): | |
input_prompt = gr.Textbox(label="Prompt", placeholder="Enter prompt", lines=2) | |
use_negative_prompt = gr.Checkbox(label="Use negative prompt?", value=False) | |
negative_prompt = gr.Textbox(label="Negative Prompt", placeholder="Enter negative prompt", lines=2) | |
with gr.Row(): | |
randomize_seed = gr.Checkbox(label="Randomize Seed", value=False) | |
seed = gr.Number(value=0, label="Seed") | |
with gr.Row(): | |
style_dropdown = gr.Dropdown(label="Image Style", choices=list(styles.keys()), value=DEFAULT_STYLE_NAME) | |
lora_dropdown = gr.Dropdown(label="LoRA Model", choices=list(LORA_OPTIONS.keys()), value="Realism (face/character)👦🏻") | |
with gr.Row(): | |
width = gr.Slider(512, 2048, value=1024, step=64, label="Width") | |
height = gr.Slider(512, 2048, value=1024, step=64, label="Height") | |
with gr.Row(): | |
guidance_scale = gr.Slider(1.0, 15.0, value=3, step=0.5, label="Guidance Scale") | |
output_gallery = gr.Gallery(label="Generated Images", columns=[2], height="auto") | |
output_seed = gr.Number(label="Final Seed", interactive=False) | |
generate_button = gr.Button("Generate Images") | |
generate_button.click( | |
fn=generate, | |
inputs=[ | |
input_prompt, | |
negative_prompt, | |
use_negative_prompt, | |
seed, | |
width, | |
height, | |
guidance_scale, | |
randomize_seed, | |
style_dropdown, | |
lora_dropdown, | |
], | |
outputs=[output_gallery, output_seed], | |
) | |
demo.launch() | |