File size: 2,445 Bytes
98b5af6
f4ff201
7e9a760
ceb106c
3fec1fb
f4ff201
a211f54
0e93213
6fed0f7
0e93213
 
3fec1fb
 
70e3d12
3fec1fb
 
 
 
70e3d12
3fec1fb
 
 
b47c647
6dd4c00
0e93213
6dd4c00
 
 
a1be0d0
6dd4c00
5d0cc84
d7ce33c
7e9a760
 
548031b
 
 
dba1359
98b5af6
7e9a760
ae9efe4
be13023
3fec1fb
5c1a385
98b5af6
 
ca74145
a1be0d0
98b5af6
ca74145
 
 
75f237b
3fec1fb
0a14984
3fec1fb
70e3d12
548031b
a794409
98b5af6
75f237b
5f1159f
2d4bfe7
552c858
b47c647
5c1a385
6fed0f7
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
from diffusers import AutoPipelineForText2Image
import torch
import random
import os
import gradio as gr

hf_token = os.getenv("HF_TOKEN")
nsfw_filter = int(os.getenv("Safe"))

naughtyWords = ["nude", "nsfw", "naked", "porn", "boob", "tit", "nipple", "vagina", "pussy", "panties", "underwear", "upskirt", "bottomless", "topless", "petite", "xxx"]

css = """
.btn-green {
  background-image: linear-gradient(to bottom right, #6dd178, #00a613) !important;
  border-color: #22c55e !important;
  color: #166534 !important;
}
.btn-green:hover {
  background-image: linear-gradient(to bottom right, #6dd178, #6dd178) !important;
}
"""

def generate(prompt, samp_steps, batch_size, seed, progress=gr.Progress(track_tqdm=True)):
    prompt = prompt.lower()
    if nsfw_filter:
        if prompt[:10] == "krebzonide":
            prompt = prompt[10:]
        else:
            neg_prompt = neg_prompt + ", child, nsfw, nipples, nude, underwear, naked"
            for word in naughtyWords:
                if prompt.find(word) >= 0:
                    return None, 80085
    if seed < 0:
        seed = random.randint(1,999999)
    images = pipe(
        prompt,
        num_inference_steps=samp_steps,
        num_images_per_prompt=batch_size,
        guidance_scale=0.0,
        generator=torch.manual_seed(seed),
    ).images
    return gr.update(value = [(img, f"Image {i+1}") for i, img in enumerate(images)]), seed
        
def set_base_model():
    pipe = AutoPipelineForText2Image.from_pretrained(
        "stabilityai/sdxl-turbo",
        torch_dtype = torch.float16,
        variant = "fp16",
        #use_auth_token=hf_token
    )
    pipe.to("cuda")
    return pipe

with gr.Blocks(css=css) as demo:
    with gr.Column():
        prompt = gr.Textbox(label="Prompt")
        submit_btn = gr.Button("Generate", elem_classes="btn-green")
        with gr.Row():
            samp_steps = gr.Slider(1, 5, value=1, step=1, label="Sampling steps")
            batch_size = gr.Slider(1, 6, value=1, step=1, label="Batch size", interactive=True)
            seed = gr.Number(label="Seed", value=-1, minimum=-1, precision=0)
            lastSeed = gr.Number(label="Last Seed", value=-1, interactive=False)
        gallery = gr.Gallery(show_label=False, preview=True, container=False, height=700)
    submit_btn.click(generate, [prompt, samp_steps, batch_size, seed], [gallery, lastSeed], queue=True)
    
pipe = set_base_model()
demo.launch(debug=True)