Krebzonide commited on
Commit
a4ca4f9
·
1 Parent(s): a794409

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -22
app.py CHANGED
@@ -1,14 +1,7 @@
1
  from diffusers import AutoPipelineForText2Image
2
- import torch
3
  import random
4
- import os
5
  import gradio as gr
6
 
7
- hf_token = os.getenv("HF_TOKEN")
8
- nsfw_filter = int(os.getenv("Safe"))
9
-
10
- naughtyWords = ["nude", "nsfw", "naked", "porn", "boob", "tit", "nipple", "vagina", "pussy", "panties", "underwear", "upskirt", "bottomless", "topless", "petite", "xxx"]
11
-
12
  css = """
13
  .btn-green {
14
  background-image: linear-gradient(to bottom right, #6dd178, #00a613) !important;
@@ -21,15 +14,6 @@ css = """
21
  """
22
 
23
  def generate(prompt, samp_steps, batch_size, seed, progress=gr.Progress(track_tqdm=True)):
24
- prompt = prompt.lower()
25
- if nsfw_filter:
26
- if prompt[:10] == "krebzonide":
27
- prompt = prompt[10:]
28
- else:
29
- neg_prompt = neg_prompt + ", child, nsfw, nipples, nude, underwear, naked"
30
- for word in naughtyWords:
31
- if prompt.find(word) >= 0:
32
- return None, 80085
33
  if seed < 0:
34
  seed = random.randint(1,999999)
35
  images = pipe(
@@ -45,8 +29,7 @@ def set_base_model():
45
  pipe = AutoPipelineForText2Image.from_pretrained(
46
  "stabilityai/sdxl-turbo",
47
  torch_dtype = torch.float16,
48
- variant = "fp16",
49
- #use_auth_token=hf_token
50
  )
51
  pipe.to("cuda")
52
  return pipe
@@ -55,13 +38,16 @@ with gr.Blocks(css=css) as demo:
55
  with gr.Column():
56
  prompt = gr.Textbox(label="Prompt")
57
  submit_btn = gr.Button("Generate", elem_classes="btn-green")
 
58
  with gr.Row():
59
- samp_steps = gr.Slider(1, 5, value=1, step=1, label="Sampling steps")
60
- batch_size = gr.Slider(1, 6, value=1, step=1, label="Batch size", interactive=True)
61
  seed = gr.Number(label="Seed", value=-1, minimum=-1, precision=0)
62
  lastSeed = gr.Number(label="Last Seed", value=-1, interactive=False)
63
- gallery = gr.Gallery(show_label=False, preview=True, container=False, height=700)
64
- submit_btn.click(generate, [prompt, samp_steps, batch_size, seed], [gallery, lastSeed], queue=True)
 
 
65
 
66
  pipe = set_base_model()
67
  demo.launch(debug=True)
 
1
  from diffusers import AutoPipelineForText2Image
 
2
  import random
 
3
  import gradio as gr
4
 
 
 
 
 
 
5
  css = """
6
  .btn-green {
7
  background-image: linear-gradient(to bottom right, #6dd178, #00a613) !important;
 
14
  """
15
 
16
  def generate(prompt, samp_steps, batch_size, seed, progress=gr.Progress(track_tqdm=True)):
 
 
 
 
 
 
 
 
 
17
  if seed < 0:
18
  seed = random.randint(1,999999)
19
  images = pipe(
 
29
  pipe = AutoPipelineForText2Image.from_pretrained(
30
  "stabilityai/sdxl-turbo",
31
  torch_dtype = torch.float16,
32
+ variant = "fp16"
 
33
  )
34
  pipe.to("cuda")
35
  return pipe
 
38
  with gr.Column():
39
  prompt = gr.Textbox(label="Prompt")
40
  submit_btn = gr.Button("Generate", elem_classes="btn-green")
41
+
42
  with gr.Row():
43
+ sampling_steps = gr.Slider(1, 4, value=1, step=1, label="Sampling steps")
44
+ batch_size = gr.Slider(1, 6, value=1, step=1, label="Batch size")
45
  seed = gr.Number(label="Seed", value=-1, minimum=-1, precision=0)
46
  lastSeed = gr.Number(label="Last Seed", value=-1, interactive=False)
47
+
48
+ gallery = gr.Gallery(show_label=False, preview=True, container=False, height=650)
49
+
50
+ submit_btn.click(generate, [prompt, sampling_steps, batch_size, seed], [gallery, lastSeed], queue=True)
51
 
52
  pipe = set_base_model()
53
  demo.launch(debug=True)