John6666 commited on
Commit
b233ebf
β€’
1 Parent(s): 31cedff

Upload 3 files

Browse files
Files changed (3) hide show
  1. README.md +1 -1
  2. app.py +14 -12
  3. multit2i.py +13 -11
README.md CHANGED
@@ -4,7 +4,7 @@ emoji: 🌐🌊
4
  colorFrom: blue
5
  colorTo: purple
6
  sdk: gradio
7
- sdk_version: 4.40.0
8
  app_file: app.py
9
  short_description: Text-to-Image
10
  license: mit
 
4
  colorFrom: blue
5
  colorTo: purple
6
  sdk: gradio
7
+ sdk_version: 4.42.0
8
  app_file: app.py
9
  short_description: Text-to-Image
10
  license: mit
app.py CHANGED
@@ -1,13 +1,12 @@
1
  import gradio as gr
2
  from model import models
3
- from multit2i import (
4
- load_models, infer_fn, infer_rand_fn, save_gallery,
5
  change_model, warm_model, get_model_info_md, loaded_models,
6
  get_positive_prefix, get_positive_suffix, get_negative_prefix, get_negative_suffix,
7
- get_recom_prompt_type, set_recom_prompt_preset, get_tag_type,
8
- )
9
 
10
  max_images = 8
 
11
  load_models(models)
12
 
13
  css = """
@@ -26,10 +25,13 @@ with gr.Blocks(theme="NoCrypt/miku@>=1.2.2", fill_width=True, css=css) as demo:
26
  prompt = gr.Text(label="Prompt", lines=2, max_lines=8, placeholder="1girl, solo, ...", show_copy_button=True)
27
  neg_prompt = gr.Text(label="Negative Prompt", lines=1, max_lines=8, placeholder="")
28
  with gr.Accordion("Advanced options", open=False):
29
- width = gr.Number(label="Width", info="If 0, the default value is used.", maximum=1216, step=32, value=0)
30
- height = gr.Number(label="Height", info="If 0, the default value is used.", maximum=1216, step=32, value=0)
31
- steps = gr.Number(label="Number of inference steps", info="If 0, the default value is used.", maximum=100, step=1, value=0)
32
- cfg = gr.Number(label="Guidance scale", info="If 0, the default value is used.", maximum=30.0, step=0.1, value=0)
 
 
 
33
  with gr.Accordion("Recommended Prompt", open=False):
34
  recom_prompt_preset = gr.Radio(label="Set Presets", choices=get_recom_prompt_type(), value="Common")
35
  with gr.Row():
@@ -81,13 +83,13 @@ with gr.Blocks(theme="NoCrypt/miku@>=1.2.2", fill_width=True, css=css) as demo:
81
  img_i = gr.Number(i, visible=False)
82
  image_num.change(lambda i, n: gr.update(visible = (i < n)), [img_i, image_num], o, show_api=False)
83
  gen_event = gr.on(triggers=[run_button.click, prompt.submit],
84
- fn=lambda i, n, m, t1, t2, n1, n2, n3, n4, l1, l2, l3, l4: infer_fn(m, t1, t2, n1, n2, n3, n4, l1, l2, l3, l4) if (i < n) else None,
85
- inputs=[img_i, image_num, model_name, prompt, neg_prompt, height, width, steps, cfg,
86
  positive_prefix, positive_suffix, negative_prefix, negative_suffix],
87
  outputs=[o], queue=True, show_api=False)
88
  gen_event2 = gr.on(triggers=[random_button.click],
89
- fn=lambda i, n, m, t1, t2, n1, n2, n3, n4, l1, l2, l3, l4: infer_rand_fn(m, t1, t2, n1, n2, n3, n4, l1, l2, l3, l4) if (i < n) else None,
90
- inputs=[img_i, image_num, model_name, prompt, neg_prompt, height, width, steps, cfg,
91
  positive_prefix, positive_suffix, negative_prefix, negative_suffix],
92
  outputs=[o], queue=True, show_api=False)
93
  o.change(save_gallery, [o, results], [results, image_files], show_api=False)
 
1
  import gradio as gr
2
  from model import models
3
+ from multit2i import (load_models, infer_fn, infer_rand_fn, save_gallery,
 
4
  change_model, warm_model, get_model_info_md, loaded_models,
5
  get_positive_prefix, get_positive_suffix, get_negative_prefix, get_negative_suffix,
6
+ get_recom_prompt_type, set_recom_prompt_preset, get_tag_type)
 
7
 
8
  max_images = 8
9
+ MAX_SEED = 2**32-1
10
  load_models(models)
11
 
12
  css = """
 
25
  prompt = gr.Text(label="Prompt", lines=2, max_lines=8, placeholder="1girl, solo, ...", show_copy_button=True)
26
  neg_prompt = gr.Text(label="Negative Prompt", lines=1, max_lines=8, placeholder="")
27
  with gr.Accordion("Advanced options", open=False):
28
+ with gr.Row():
29
+ width = gr.Number(label="Width", info="If 0, the default value is used.", maximum=1216, step=32, value=0)
30
+ height = gr.Number(label="Height", info="If 0, the default value is used.", maximum=1216, step=32, value=0)
31
+ with gr.Row():
32
+ steps = gr.Number(label="Number of inference steps", info="If 0, the default value is used.", maximum=100, step=1, value=0)
33
+ cfg = gr.Number(label="Guidance scale", info="If 0, the default value is used.", maximum=30.0, step=0.1, value=0)
34
+ seed = gr.Slider(label="Seed", info="Randomize Seed if -1.", minimum=-1, maximum=MAX_SEED, step=1, value=-1)
35
  with gr.Accordion("Recommended Prompt", open=False):
36
  recom_prompt_preset = gr.Radio(label="Set Presets", choices=get_recom_prompt_type(), value="Common")
37
  with gr.Row():
 
83
  img_i = gr.Number(i, visible=False)
84
  image_num.change(lambda i, n: gr.update(visible = (i < n)), [img_i, image_num], o, show_api=False)
85
  gen_event = gr.on(triggers=[run_button.click, prompt.submit],
86
+ fn=lambda i, n, m, t1, t2, n1, n2, n3, n4, n5, l1, l2, l3, l4: infer_fn(m, t1, t2, n1, n2, n3, n4, n5, l1, l2, l3, l4) if (i < n) else None,
87
+ inputs=[img_i, image_num, model_name, prompt, neg_prompt, height, width, steps, cfg, seed,
88
  positive_prefix, positive_suffix, negative_prefix, negative_suffix],
89
  outputs=[o], queue=True, show_api=False)
90
  gen_event2 = gr.on(triggers=[random_button.click],
91
+ fn=lambda i, n, m, t1, t2, n1, n2, n3, n4, n5, l1, l2, l3, l4: infer_rand_fn(m, t1, t2, n1, n2, n3, n4, n5, l1, l2, l3, l4) if (i < n) else None,
92
+ inputs=[img_i, image_num, model_name, prompt, neg_prompt, height, width, steps, cfg, seed,
93
  positive_prefix, positive_suffix, negative_prefix, negative_suffix],
94
  outputs=[o], queue=True, show_api=False)
95
  o.change(save_gallery, [o, results], [results, image_files], show_api=False)
multit2i.py CHANGED
@@ -6,7 +6,7 @@ from huggingface_hub import InferenceClient
6
  import os
7
 
8
 
9
- HF_TOKEN = os.environ.get("HF_TOKEN") if os.environ.get("HF_TOKEN") else None
10
  server_timeout = 600
11
  inference_timeout = 300
12
 
@@ -354,13 +354,14 @@ def warm_model(model_name: str):
354
  # https://huggingface.co/docs/huggingface_hub/package_reference/inference_client
355
  def infer_body(client: InferenceClient | gr.Interface, prompt: str, neg_prompt: str | None = None,
356
  height: int | None = None, width: int | None = None,
357
- steps: int | None = None, cfg: int | None = None):
358
  png_path = "image.png"
359
  kwargs = {}
360
  if height is not None and height >= 256: kwargs["height"] = height
361
  if width is not None and width >= 256: kwargs["width"] = width
362
  if steps is not None and steps >= 1: kwargs["num_inference_steps"] = steps
363
  if cfg is not None and cfg > 0: cfg = kwargs["guidance_scale"] = cfg
 
364
  try:
365
  if isinstance(client, InferenceClient):
366
  image = client.text_to_image(prompt=prompt, negative_prompt=neg_prompt, **kwargs, token=HF_TOKEN)
@@ -376,17 +377,18 @@ def infer_body(client: InferenceClient | gr.Interface, prompt: str, neg_prompt:
376
 
377
  async def infer(model_name: str, prompt: str, neg_prompt: str | None = None,
378
  height: int | None = None, width: int | None = None,
379
- steps: int | None = None, cfg: int | None = None,
380
  save_path: str | None = None, timeout: float = inference_timeout):
381
  import random
382
  noise = ""
383
- rand = random.randint(1, 500)
384
- for i in range(rand):
385
- noise += " "
 
386
  model = load_model(model_name)
387
  if not model: return None
388
  task = asyncio.create_task(asyncio.to_thread(infer_body, model, f"{prompt} {noise}", neg_prompt,
389
- height, width, steps, cfg))
390
  await asyncio.sleep(0)
391
  try:
392
  result = await asyncio.wait_for(task, timeout=timeout)
@@ -403,7 +405,7 @@ async def infer(model_name: str, prompt: str, neg_prompt: str | None = None,
403
 
404
 
405
  def infer_fn(model_name: str, prompt: str, neg_prompt: str | None = None, height: int | None = None,
406
- width: int | None = None, steps: int | None = None, cfg: int | None = None,
407
  pos_pre: list = [], pos_suf: list = [], neg_pre: list = [], neg_suf: list = [], save_path: str | None = None):
408
  if model_name == 'NA':
409
  return None
@@ -411,7 +413,7 @@ def infer_fn(model_name: str, prompt: str, neg_prompt: str | None = None, height
411
  prompt, neg_prompt = recom_prompt(prompt, neg_prompt, pos_pre, pos_suf, neg_pre, neg_suf)
412
  loop = asyncio.new_event_loop()
413
  result = loop.run_until_complete(infer(model_name, prompt, neg_prompt, height, width,
414
- steps, cfg, save_path, inference_timeout))
415
  except (Exception, asyncio.CancelledError) as e:
416
  print(e)
417
  print(f"Task aborted: {model_name}")
@@ -422,7 +424,7 @@ def infer_fn(model_name: str, prompt: str, neg_prompt: str | None = None, height
422
 
423
 
424
  def infer_rand_fn(model_name_dummy: str, prompt: str, neg_prompt: str | None = None, height: int | None = None,
425
- width: int | None = None, steps: int | None = None, cfg: int | None = None,
426
  pos_pre: list = [], pos_suf: list = [], neg_pre: list = [], neg_suf: list = [], save_path: str | None = None):
427
  import random
428
  if model_name_dummy == 'NA':
@@ -433,7 +435,7 @@ def infer_rand_fn(model_name_dummy: str, prompt: str, neg_prompt: str | None = N
433
  prompt, neg_prompt = recom_prompt(prompt, neg_prompt, pos_pre, pos_suf, neg_pre, neg_suf)
434
  loop = asyncio.new_event_loop()
435
  result = loop.run_until_complete(infer(model_name, prompt, neg_prompt, height, width,
436
- steps, cfg, save_path, inference_timeout))
437
  except (Exception, asyncio.CancelledError) as e:
438
  print(e)
439
  print(f"Task aborted: {model_name}")
 
6
  import os
7
 
8
 
9
+ HF_TOKEN = os.environ.get("HF_TOKEN") if os.environ.get("HF_TOKEN") else None # If private or gated models aren't used, ENV setting is unnecessary.
10
  server_timeout = 600
11
  inference_timeout = 300
12
 
 
354
  # https://huggingface.co/docs/huggingface_hub/package_reference/inference_client
355
  def infer_body(client: InferenceClient | gr.Interface, prompt: str, neg_prompt: str | None = None,
356
  height: int | None = None, width: int | None = None,
357
+ steps: int | None = None, cfg: int | None = None, seed: int = -1):
358
  png_path = "image.png"
359
  kwargs = {}
360
  if height is not None and height >= 256: kwargs["height"] = height
361
  if width is not None and width >= 256: kwargs["width"] = width
362
  if steps is not None and steps >= 1: kwargs["num_inference_steps"] = steps
363
  if cfg is not None and cfg > 0: cfg = kwargs["guidance_scale"] = cfg
364
+ if seed >= 0: kwargs["seed"] = seed
365
  try:
366
  if isinstance(client, InferenceClient):
367
  image = client.text_to_image(prompt=prompt, negative_prompt=neg_prompt, **kwargs, token=HF_TOKEN)
 
377
 
378
  async def infer(model_name: str, prompt: str, neg_prompt: str | None = None,
379
  height: int | None = None, width: int | None = None,
380
+ steps: int | None = None, cfg: int | None = None, seed: int = -1,
381
  save_path: str | None = None, timeout: float = inference_timeout):
382
  import random
383
  noise = ""
384
+ if seed < 0:
385
+ rand = random.randint(1, 500)
386
+ for i in range(rand):
387
+ noise += " "
388
  model = load_model(model_name)
389
  if not model: return None
390
  task = asyncio.create_task(asyncio.to_thread(infer_body, model, f"{prompt} {noise}", neg_prompt,
391
+ height, width, steps, cfg, seed))
392
  await asyncio.sleep(0)
393
  try:
394
  result = await asyncio.wait_for(task, timeout=timeout)
 
405
 
406
 
407
  def infer_fn(model_name: str, prompt: str, neg_prompt: str | None = None, height: int | None = None,
408
+ width: int | None = None, steps: int | None = None, cfg: int | None = None, seed: int = -1,
409
  pos_pre: list = [], pos_suf: list = [], neg_pre: list = [], neg_suf: list = [], save_path: str | None = None):
410
  if model_name == 'NA':
411
  return None
 
413
  prompt, neg_prompt = recom_prompt(prompt, neg_prompt, pos_pre, pos_suf, neg_pre, neg_suf)
414
  loop = asyncio.new_event_loop()
415
  result = loop.run_until_complete(infer(model_name, prompt, neg_prompt, height, width,
416
+ steps, cfg, seed, save_path, inference_timeout))
417
  except (Exception, asyncio.CancelledError) as e:
418
  print(e)
419
  print(f"Task aborted: {model_name}")
 
424
 
425
 
426
  def infer_rand_fn(model_name_dummy: str, prompt: str, neg_prompt: str | None = None, height: int | None = None,
427
+ width: int | None = None, steps: int | None = None, cfg: int | None = None, seed: int = -1,
428
  pos_pre: list = [], pos_suf: list = [], neg_pre: list = [], neg_suf: list = [], save_path: str | None = None):
429
  import random
430
  if model_name_dummy == 'NA':
 
435
  prompt, neg_prompt = recom_prompt(prompt, neg_prompt, pos_pre, pos_suf, neg_pre, neg_suf)
436
  loop = asyncio.new_event_loop()
437
  result = loop.run_until_complete(infer(model_name, prompt, neg_prompt, height, width,
438
+ steps, cfg, seed, save_path, inference_timeout))
439
  except (Exception, asyncio.CancelledError) as e:
440
  print(e)
441
  print(f"Task aborted: {model_name}")