fffiloni commited on
Commit
a41d0ed
1 Parent(s): 8bfdc48

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +21 -10
app.py CHANGED
@@ -1,4 +1,5 @@
1
- #@title Setup
 
2
  import os, subprocess
3
 
4
  def setup():
@@ -44,20 +45,21 @@ ci = Interrogator(config)
44
  def inference(image, mode, best_max_flavors):
45
  image = image.convert('RGB')
46
  if mode == 'best':
47
- return ci.interrogate(image, max_flavors=int(best_max_flavors))
48
  elif mode == 'classic':
49
- return ci.interrogate_classic(image)
50
  else:
51
- return ci.interrogate_fast(image)
52
 
53
  title = """
54
- <div style="text-align: center; max-width: 650px; margin: 0 auto;">
55
  <div
56
  style="
57
  display: inline-flex;
58
  align-items: center;
59
  gap: 0.8rem;
60
  font-size: 1.75rem;
 
61
  "
62
  >
63
  <h1 style="font-weight: 600; margin-bottom: 7px;">
@@ -65,14 +67,15 @@ title = """
65
  </h1>
66
  </div>
67
  <p style="margin-bottom: 10px;font-size: 16px;font-weight: 100;line-height: 1.5em;">
68
- Want to figure out what a good prompt might be to create new images like an existing one? The CLIP Interrogator is here to get you answers!
 
69
  <br />This version is specialized for producing nice prompts for use with Stable Diffusion 2.0 using the ViT-H-14 OpenCLIP model!
70
  </p>
71
  </div>
72
  """
73
 
74
  article = """
75
- <div style="text-align: center; max-width: 650px; margin: 0 auto;">
76
 
77
  <p>
78
  Server busy? You can also run on <a href="https://colab.research.google.com/github/pharmapsychotic/clip-interrogator/blob/open-clip/clip_interrogator.ipynb">Google Colab</a>
@@ -125,11 +128,19 @@ with gr.Blocks(css=css) as block:
125
  input_image = gr.Image(type='pil', elem_id="input-img")
126
  mode_input = gr.Radio(['best', 'classic', 'fast'], label='', value='best')
127
  flavor_input = gr.Number(value=4, label='best mode max flavors')
 
128
  submit_btn = gr.Button("Submit")
 
129
  output_text = gr.Textbox(label="Output", elem_id="output-txt")
130
-
 
 
 
 
 
131
  gr.HTML(article)
132
 
133
- submit_btn.click(fn=inference, inputs=[input_image,mode_input,flavor_input], outputs=[output_text])
134
-
 
135
  block.queue(max_size=32).launch(show_api=False)
 
1
+ from share_btn import community_icon_html, loading_icon_html, share_js
2
+
3
  import os, subprocess
4
 
5
  def setup():
 
45
  def inference(image, mode, best_max_flavors):
46
  image = image.convert('RGB')
47
  if mode == 'best':
48
+ return ci.interrogate(image, max_flavors=int(best_max_flavors)), gr.update(visible=True), gr.update(visible=True), gr.update(visible=True)
49
  elif mode == 'classic':
50
+ return ci.interrogate_classic(image), gr.update(visible=True), gr.update(visible=True), gr.update(visible=True)
51
  else:
52
+ return ci.interrogate_fast(image), gr.update(visible=True), gr.update(visible=True), gr.update(visible=True)
53
 
54
  title = """
55
+ <div style="text-align: center; max-width: 500px; margin: 0 auto;">
56
  <div
57
  style="
58
  display: inline-flex;
59
  align-items: center;
60
  gap: 0.8rem;
61
  font-size: 1.75rem;
62
+ margin-bottom: 10px;
63
  "
64
  >
65
  <h1 style="font-weight: 600; margin-bottom: 7px;">
 
67
  </h1>
68
  </div>
69
  <p style="margin-bottom: 10px;font-size: 16px;font-weight: 100;line-height: 1.5em;">
70
+ Want to figure out what a good prompt might be to create new images like an existing one?
71
+ <br />The CLIP Interrogator is here to get you answers!
72
  <br />This version is specialized for producing nice prompts for use with Stable Diffusion 2.0 using the ViT-H-14 OpenCLIP model!
73
  </p>
74
  </div>
75
  """
76
 
77
  article = """
78
+ <div style="text-align: center; max-width: 500px; margin: 0 auto;font-size: 94%;">
79
 
80
  <p>
81
  Server busy? You can also run on <a href="https://colab.research.google.com/github/pharmapsychotic/clip-interrogator/blob/open-clip/clip_interrogator.ipynb">Google Colab</a>
 
128
  input_image = gr.Image(type='pil', elem_id="input-img")
129
  mode_input = gr.Radio(['best', 'classic', 'fast'], label='', value='best')
130
  flavor_input = gr.Number(value=4, label='best mode max flavors')
131
+
132
  submit_btn = gr.Button("Submit")
133
+
134
  output_text = gr.Textbox(label="Output", elem_id="output-txt")
135
+
136
+ with gr.Group(elem_id="share-btn-container"):
137
+ community_icon = gr.HTML(community_icon_html, visible=False)
138
+ loading_icon = gr.HTML(loading_icon_html, visible=False)
139
+ share_button = gr.Button("Share to community", elem_id="share-btn", visible=False)
140
+
141
  gr.HTML(article)
142
 
143
+ submit_btn.click(fn=inference, inputs=[input_image,mode_input,flavor_input], outputs=[output_text, share_button, community_icon, loading_icon])
144
+ share_button.click(None, [], [], _js=share_js)
145
+
146
  block.queue(max_size=32).launch(show_api=False)