John6666 commited on
Commit
e3109cf
β€’
1 Parent(s): 466fb15

Upload 3 files

Browse files
Files changed (3) hide show
  1. README.md +14 -14
  2. app.py +1 -1
  3. multit2i.py +1 -1
README.md CHANGED
@@ -1,14 +1,14 @@
1
- ---
2
- title: Free Multi Models Text-to-Image Demo V3
3
- emoji: 🌐🌊
4
- colorFrom: blue
5
- colorTo: purple
6
- sdk: gradio
7
- sdk_version: 5.4.0
8
- app_file: app.py
9
- short_description: Text-to-Image
10
- license: mit
11
- pinned: true
12
- ---
13
-
14
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
+ ---
2
+ title: Free Multi Models Text-to-Image Demo V3
3
+ emoji: 🌐🌊
4
+ colorFrom: blue
5
+ colorTo: purple
6
+ sdk: gradio
7
+ sdk_version: 5.9.1
8
+ app_file: app.py
9
+ short_description: Text-to-Image
10
+ license: mit
11
+ pinned: true
12
+ ---
13
+
14
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py CHANGED
@@ -131,5 +131,5 @@ with gr.Blocks(theme="NoCrypt/miku@>=1.2.2", fill_width=True, css=css) as demo:
131
  .then(translate_to_en, [neg_prompt], [neg_prompt], queue=False, show_api=False)
132
 
133
  #demo.queue(default_concurrency_limit=200, max_size=200)
134
- demo.launch(max_threads=400)
135
  # https://github.com/gradio-app/gradio/issues/6339
 
131
  .then(translate_to_en, [neg_prompt], [neg_prompt], queue=False, show_api=False)
132
 
133
  #demo.queue(default_concurrency_limit=200, max_size=200)
134
+ demo.launch(max_threads=400, ssr_mode=False)
135
  # https://github.com/gradio-app/gradio/issues/6339
multit2i.py CHANGED
@@ -69,7 +69,7 @@ def find_model_list(author: str="", tags: list[str]=[], not_tag="", sort: str="l
69
  for model in model_infos:
70
  if not model.private and not model.gated or HF_TOKEN is not None:
71
  loadable = is_loadable(model.id, force_gpu) if check_status else True
72
- if not_tag and not_tag in model.tags or not loadable: continue
73
  models.append(model.id)
74
  if len(models) == limit: break
75
  return models
 
69
  for model in model_infos:
70
  if not model.private and not model.gated or HF_TOKEN is not None:
71
  loadable = is_loadable(model.id, force_gpu) if check_status else True
72
+ if not_tag and not_tag in model.tags or not loadable or "not-for-all-audiences" in model.tags: continue
73
  models.append(model.id)
74
  if len(models) == limit: break
75
  return models