Spaces:
Runtime error
Runtime error
| from diffusers import AutoPipelineForText2Image | |
| import torch | |
| import random | |
| import os | |
| import gradio as gr | |
| hf_token = os.getenv("HF_TOKEN") | |
| model_id = int(os.getenv("Model")) | |
| nsfw_filter = int(os.getenv("Safe")) | |
| #stable-diffusion-xl-base-1.0 0 - base model | |
| #Colossus_Project_XL 1 - better people | |
| #AlbedoBaseXL_v11 2 - realistic | |
| #JuggernautXL_v7 3 - better faces | |
| #RealVisXL_V2.0 4 - better photorealism | |
| model_url_list = ["stabilityai/stable-diffusion-xl-base-1.0/blob/main/sd_xl_base_1.0.safetensors", | |
| "Krebzonide/Colossus_Project_XL/blob/main/colossusProjectXLSFW_v202BakedVAE.safetensors", | |
| "Krebzonide/AlbedoBaseXL_v11/blob/main/albedobaseXL_v11.safetensors", | |
| "Krebzonide/JuggernautXL_version5/blob/main/juggernautXL_v7Rundiffusion.safetensors", | |
| "SG161222/RealVisXL_V2.0/blob/main/RealVisXL_V2.0.safetensors", | |
| "Krebzonide/AcornIsSpinning_acornXLV1/blob/main/acornIsSpinning_acornxlV1.safetensors"] | |
| naughtyWords = ["nude", "nsfw", "naked", "porn", "boob", "tit", "nipple", "vagina", "pussy", "panties", "underwear", "upskirt", "bottomless", "topless", "petite", "xxx"] | |
| css = """ | |
| .btn-green { | |
| background-image: linear-gradient(to bottom right, #6dd178, #00a613) !important; | |
| border-color: #22c55e !important; | |
| color: #166534 !important; | |
| } | |
| .btn-green:hover { | |
| background-image: linear-gradient(to bottom right, #6dd178, #6dd178) !important; | |
| } | |
| """ | |
| def generate(prompt, samp_steps, batch_size, seed, progress=gr.Progress(track_tqdm=True)): | |
| prompt = prompt.lower() | |
| if nsfw_filter: | |
| if prompt[:10] == "krebzonide": | |
| prompt = prompt[10:] | |
| else: | |
| neg_prompt = neg_prompt + ", child, nsfw, nipples, nude, underwear" | |
| for word in naughtyWords: | |
| if prompt.find(word) >= 0: | |
| return None, 58008 | |
| if seed < 0: | |
| seed = random.randint(1,999999) | |
| images = pipe( | |
| prompt, | |
| num_inference_steps=samp_steps, | |
| num_images_per_prompt=batch_size, | |
| guidance_scale=0.0, | |
| generator=torch.manual_seed(seed), | |
| ).images | |
| return gr.update(value = [(img, f"Image {i+1}") for i, img in enumerate(images)], height=height+90), seed | |
| def set_base_model(base_model_id): | |
| vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16) | |
| global model_url_list | |
| model_url = "https://huggingface.co/" + model_url_list[base_model_id] | |
| pipe = AutoPipelineForText2Image.from_pretrained( | |
| "stabilityai/sdxl-turbo", | |
| torch_dtype = torch.float16, | |
| variant = "fp16" | |
| #use_auth_token=hf_token | |
| ) | |
| pipe.to("cuda") | |
| return pipe | |
| with gr.Blocks(css=css) as demo: | |
| with gr.Column(): | |
| prompt = gr.Textbox(label="Prompt") | |
| submit_btn = gr.Button("Generate", elem_classes="btn-green") | |
| with gr.Row(): | |
| samp_steps = gr.Slider(1, 30, value=10, step=1, label="Sampling steps") | |
| batch_size = gr.Slider(1, 6, value=1, step=1, label="Batch size", interactive=True) | |
| seed = gr.Number(label="Seed", value=-1, minimum=-1, precision=0) | |
| lastSeed = gr.Number(label="Last Seed", value=-1, interactive=False) | |
| gallery = gr.Gallery(show_label=False, preview=True, container=False) | |
| with gr.Row(): | |
| submit_btn.click(generate, [prompt, samp_steps, batch_size, seed], [gallery, lastSeed], queue=True) | |
| pipe = set_base_model(model_id) | |
| demo.launch(debug=True) |