easteregg / app.py
5to9's picture
css changed
1cd1a20
import spaces
import gradio as gr
import numpy as np
import random
from pathlib import Path
import urllib.parse
import os
import requests
import logging
import traceback
from diffusers import DiffusionPipeline
import torch
try:
logging.basicConfig(level=logging.DEBUG)
SPACER = "\n" + "*" * 50 + "\n" # highlights output in crowded logs
device = "cuda" if torch.cuda.is_available() else "cpu"
model_repo_id = "stabilityai/stable-diffusion-xl-base-1.0" # Replace to the model you would like to use
# model_repo_id = "stablediffusionapi/copax-timelessxl-sdxl10"
# Check if file exists, else pull from CIVITAI
civitai_token = os.environ.get("CIVTAI_TOKEN")
lora_path = Path("./lora")
file_name = "RealMessyEaster_v09_exp.safetensors"
file_path = lora_path / file_name
base_url = "https://civitai.com/api/download/models/414396"
params = {"token": civitai_token}
encoded_params = urllib.parse.urlencode(params)
full_url = f"{base_url}?{encoded_params}"
if not lora_path.exists():
lora_path.mkdir(parents=True, exist_ok=True)
logging.info(f"{SPACER}Created path {lora_path}{SPACER}")
if not file_path.exists():
logging.info(
f"{SPACER}File {file_name} does not exist. Downloading {full_url[20:]}.{SPACER}"
)
response = requests.get(full_url)
response.raise_for_status() # Raise an error for bad responses
with open(file_path, "wb") as f:
f.write(response.content)
logging.info(f"{SPACER}Download ready.")
else:
logging.info(f"{SPACER}File {file_name} already exists.{SPACER}")
if torch.cuda.is_available():
torch_dtype = torch.float16
logging.info(f"{SPACER}CUDA available, setting dtype to float16{SPACER}")
else:
torch_dtype = torch.float32
logging.info(f"{SPACER}CUDA not available, setting dtype to float32{SPACER}")
pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype)
pipe.load_lora_weights(
lora_path,
weight_name=file_name,
adapter_name="messy_easter",
)
pipe = pipe.to(device)
# image parameters
MAX_SEED = np.iinfo(np.int32).max
MAX_IMAGE_SIZE = 1024
# Layout
TITLE = """<h1><center>🥙 Messy Easter, Everybody! 🥙</center></h1>
<center><h2>This year, let AI hide easter eggs for you.</h2></center></br><p>This SDXL LoRA experiment will place a small number of tiny easter eggs somewhere in the generated image. <strong>Apply. Generate. Have fun searching!</strong> RealMessyEaster is trained on 75 labelled images of single small plastic eggs in a messy surrounding, mostly at the edges. </br></br>Goals:</br>
<ul>
<li>Integrating an easter egg to look for.</li>
<li>Adding "messyness" to the background, to make eggs a little harder to spot.</li>
</ul></br>
Don't forget the trigger words in your prompt: '1easteregg', 'messy'. You can find and download the LoRa <a href='https://civitai.com/models/370927/realmessyeaster'>on civitai.</a></p>"""
PLACEHOLDER = """Describe a scene containing words 'messy' and '1easteregg'"""
except Exception as e:
logging.error(
f"{SPACER}Error {e}. Traceback {traceback.format_exc()}{SPACER}\nExiting"
)
exit
@spaces.GPU # [uncomment to use ZeroGPU]
def infer(
prompt,
negative_prompt,
seed,
randomize_seed,
width,
height,
guidance_scale,
num_inference_steps,
progress=gr.Progress(track_tqdm=True),
):
if randomize_seed:
seed = random.randint(0, MAX_SEED)
if "messy" not in prompt:
prompt = f"Messy scene. {prompt}"
logging.info("Triggerword 'messy' added to prompt.")
if "1easteregg" not in prompt:
prompt = f"1easteregg hidden in the scene. {prompt}"
prompt = f"{prompt} Very detailed, 8k, documentary art photography, Martin Parr."
negative_prompt = f"{negative_prompt} Distorted, warped."
generator = torch.Generator().manual_seed(seed)
lora_scale = 0.9
image = pipe(
prompt=prompt,
negative_prompt=negative_prompt,
guidance_scale=guidance_scale,
num_inference_steps=num_inference_steps,
cross_attention_kwargs={"scale": lora_scale},
width=width,
height=height,
generator=generator,
).images[0]
return image, seed
examples = [
"1easteregg hidden in a messy laundry room with piles of laundry.",
"1easteregg hidden in a messy artist’s studio stained with colours.",
"1easteregg hidden in a messy punk band practice room full of instruments.",
"1easteregg hidden in a messy teenager’s bedroom, clothes on the floor.",
"1easteregg hidden in a messy and packed antique store.",
]
with gr.Blocks(theme=gr.themes.Soft()) as demo:
with gr.Column():
gr.HTML(TITLE)
with gr.Row():
prompt = gr.Text(
label="Prompt",
show_label=False,
max_lines=1,
placeholder=PLACEHOLDER,
container=False,
)
run_button = gr.Button("Run", scale=0, variant="primary")
result = gr.Image(label="Result", show_label=False)
with gr.Accordion("Advanced Settings", open=False):
negative_prompt = gr.Text(
label="Negative prompt",
max_lines=1,
placeholder="Enter a negative prompt",
visible=True,
)
seed = gr.Slider(
label="Seed",
minimum=0,
maximum=MAX_SEED,
step=1,
value=0,
)
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
with gr.Row():
width = gr.Slider(
label="Width",
minimum=256,
maximum=MAX_IMAGE_SIZE,
step=32,
value=1024, # Replace with defaults that work for your model
)
height = gr.Slider(
label="Height",
minimum=256,
maximum=MAX_IMAGE_SIZE,
step=32,
value=768, # Replace with defaults that work for your model
)
with gr.Row():
guidance_scale = gr.Slider(
label="Guidance scale",
minimum=0.0,
maximum=10.0,
step=0.1,
value=7.0, # Replace with defaults that work for your model
)
num_inference_steps = gr.Slider(
label="Number of inference steps",
minimum=1,
maximum=50,
step=1,
value=40, # Replace with defaults that work for your model
)
gr.Examples(examples=examples, inputs=[prompt])
gr.on(
triggers=[run_button.click, prompt.submit],
fn=infer,
inputs=[
prompt,
negative_prompt,
seed,
randomize_seed,
width,
height,
guidance_scale,
num_inference_steps,
],
outputs=[result, seed],
)
if __name__ == "__main__":
demo.launch()