File size: 5,304 Bytes
392b88f f4ff201 7e9a760 ceb106c 54ad393 3fec1fb 0428186 f4ff201 1d60574 6fed0f7 0a2c532 7e19ced 0a2c532 f483809 6fed0f7 fae7b1e 11df3d4 7e19ced d2e2903 f483809 ae9efe4 3fec1fb 70e3d12 3fec1fb 70e3d12 3fec1fb 2839abc 7e9a760 548031b 1291143 dba1359 ea3b1d6 7e9a760 ae9efe4 45bbd2a 3fec1fb 6fed0f7 a02443f 6fed0f7 ca74145 75f237b 5626083 c08f255 5626083 c08f255 5626083 c08f255 5626083 3fec1fb 0a14984 3fec1fb f1ebf81 70e3d12 548031b f1ebf81 7e9a760 2839abc ae9efe4 75f237b 7e9a760 bb307d3 63aa355 75f237b 63aa355 6fed0f7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 |
from diffusers import StableDiffusionXLPipeline, AutoencoderKL
import torch
import random
import os
#from controlnet_aux import OpenposeDetector
#from diffusers.utils import load_image
import gradio as gr
import gc
model_id = int(os.getenv("Model"))
#stable-diffusion-xl-base-1.0 0 - base model
#Colossus_Project_XL 1 - better people
#Sevenof9_v3_sdxl 2 - nsfw
#JuggernautXL_version5 3 - better faces
#RealVisXL_V2.0 4 - realistic people
#AlbedoBaseXL_v11 5 - realistic people
#BetterThanWords_v20_sdxl 6 - nsfw
#AcornIsSpinning_acornXLV1 7 - nsfw
model_url_list = ["stabilityai/stable-diffusion-xl-base-1.0/blob/main/sd_xl_base_1.0.safetensors",
"Krebzonide/Colossus_Project_XL/blob/main/colossusProjectXLSFW_v202BakedVAE.safetensors",
"Krebzonide/Sevenof9_v3_sdxl/blob/main/nsfwSevenof9V3_nsfwSevenof9V3.safetensors",
"Krebzonide/JuggernautXL_version5/blob/main/juggernautXL_version5.safetensors",
"SG161222/RealVisXL_V2.0/blob/main/RealVisXL_V2.0.safetensors",
"Krebzonide/AlbedoBaseXL_v11/blob/main/albedobaseXL_v11.safetensors",
"Krebzonide/BetterThanWords_v20_sdxl/blob/main/betterThanWords_v20.safetensors",
"Krebzonide/AcornIsSpinning_acornXLV1/blob/main/acornIsSpinning_acornxlV1.safetensors"]
css = """
.btn-green {
background-image: linear-gradient(to bottom right, #6dd178, #00a613) !important;
border-color: #22c55e !important;
color: #166534 !important;
}
.btn-green:hover {
background-image: linear-gradient(to bottom right, #6dd178, #6dd178) !important;
}
"""
def generate(prompt, neg_prompt, samp_steps, guide_scale, batch_size, seed, height, width, progress=gr.Progress(track_tqdm=True)):
if seed < 0:
seed = random.randint(1,999999)
images = pipe(
prompt,
negative_prompt=neg_prompt,
num_inference_steps=samp_steps,
guidance_scale=guide_scale,
#cross_attention_kwargs={"scale": lora_scale},
num_images_per_prompt=batch_size,
height=height,
width=width,
generator=torch.manual_seed(seed),
).images
return [(img, f"Image {i+1}") for i, img in enumerate(images)]
def set_base_model(base_model_id):
vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
global model_url_list
model_url = "https://huggingface.co/" + model_url_list[base_model_id]
pipe = StableDiffusionXLPipeline.from_single_file(
model_url,
torch_dtype = torch.float16,
variant = "fp16",
vae = vae,
use_safetensors = True,
use_auth_token="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"
)
pipe.to("cuda")
return pipe
def update_pixel_ratio(height, width):
return round(height*width/1048576,3)
examples = [
['A serious capybara at work, wearing a suit',
'low quality'],
['a graffiti of a robot serving meals to people',
'low quality'],
['photo of a small cozy modern house in red woods on a mountain, solar panels, garage, driveway, great view, sunshine',
'red house'],
['cinematic photo of a woman sitting at a cafe, 35mm photograph, film, bokeh, professional, 4k, highly detailede',
'drawing, painting, crayon, sketch, graphite, impressionist, noisy, blurry, soft, deformed, ugly'],
['analog film photo of old woman on the streets of london, faded film, desaturated, 35mm photo, grainy, vignette, vintage, Kodachrome, Lomography, stained, highly detailed, found footage',
'painting, drawing, illustration, glitch, deformed, mutated, cross-eyed, ugly, disfigured'],
['nude photo of a 20 year old model in the back seat of a car, detailed face',
'big boobs'],
['nude photo of a 20 year old man, penis and testicles, dick and balls, erection',
'woman']
]
with gr.Blocks(css=css) as demo:
with gr.Column():
prompt = gr.Textbox(label="Prompt")
negative_prompt = gr.Textbox(label="Negative Prompt")
submit_btn = gr.Button("Generate", elem_classes="btn-green")
with gr.Row():
samp_steps = gr.Slider(1, 50, value=20, step=1, label="Sampling steps")
guide_scale = gr.Slider(1, 6, value=3, step=0.5, label="Guidance scale")
batch_size = gr.Slider(1, 6, value=1, step=1, label="Batch size")
with gr.Row():
height = gr.Slider(label="Height", value=1024, minimum=512, maximum=2048, step=16)
width = gr.Slider(label="Width", value=1024, minimum=512, maximum=2048, step=16)
with gr.Row():
pixels = gr.Number(label="Pixel Ratio", value=1, interactive=False)
seed = gr.Number(label="Seed", value=-1, minimum=-1, precision=0)
gallery = gr.Gallery(label="Generated images", height=800)
ex = gr.Examples(examples=examples, inputs=[prompt, negative_prompt])
submit_btn.click(generate, [prompt, negative_prompt, samp_steps, guide_scale, batch_size, seed, height, width], [gallery], queue=True)
height.change(update_pixel_ratio, [height, width], [pixels], queue=False)
width.change(update_pixel_ratio, [height, width], [pixels], queue=False)
pipe = set_base_model(model_id)
demo.launch(debug=True) |