Spaces:
Running
on
Zero
Running
on
Zero
gokaygokay
commited on
Commit
•
db519e8
1
Parent(s):
fb29fff
Update ui_components.py
Browse files- ui_components.py +6 -6
ui_components.py
CHANGED
@@ -88,16 +88,16 @@ def create_interface():
|
|
88 |
|
89 |
with gr.Accordion("Image and Caption", open=False):
|
90 |
input_image = gr.Image(label="Input Image (optional)")
|
91 |
-
caption_output = gr.Textbox(label="Generated Caption", lines=3)
|
92 |
caption_model = gr.Radio(["Florence-2", "Qwen2-VL", "JoyCaption"], label="Caption Model", value="Florence-2")
|
93 |
create_caption_button = gr.Button("Create Caption")
|
94 |
add_caption_button = gr.Button("Add Caption to Prompt")
|
95 |
|
96 |
with gr.Accordion("Prompt Generation", open=True):
|
97 |
-
output = gr.Textbox(label="Generated Prompt / Input Text", lines=4)
|
98 |
-
t5xxl_output = gr.Textbox(label="T5XXL Output", visible=True)
|
99 |
-
clip_l_output = gr.Textbox(label="CLIP L Output", visible=True)
|
100 |
-
clip_g_output = gr.Textbox(label="CLIP G Output", visible=True)
|
101 |
|
102 |
with gr.Column(scale=2):
|
103 |
with gr.Accordion("""Prompt Generation with LLM
|
@@ -138,7 +138,7 @@ def create_interface():
|
|
138 |
model = gr.Dropdown(label="Model", choices=["meta-llama/Meta-Llama-3.1-70B-Instruct"], value="meta-llama/Meta-Llama-3.1-70B-Instruct")
|
139 |
|
140 |
generate_text_button = gr.Button("Generate Prompt with LLM")
|
141 |
-
text_output = gr.Textbox(label="Generated Text", lines=10)
|
142 |
|
143 |
def create_caption(image, model):
|
144 |
if image is not None:
|
|
|
88 |
|
89 |
with gr.Accordion("Image and Caption", open=False):
|
90 |
input_image = gr.Image(label="Input Image (optional)")
|
91 |
+
caption_output = gr.Textbox(label="Generated Caption", lines=3, show_copy_button=True)
|
92 |
caption_model = gr.Radio(["Florence-2", "Qwen2-VL", "JoyCaption"], label="Caption Model", value="Florence-2")
|
93 |
create_caption_button = gr.Button("Create Caption")
|
94 |
add_caption_button = gr.Button("Add Caption to Prompt")
|
95 |
|
96 |
with gr.Accordion("Prompt Generation", open=True):
|
97 |
+
output = gr.Textbox(label="Generated Prompt / Input Text", lines=4, show_copy_button=True)
|
98 |
+
t5xxl_output = gr.Textbox(label="T5XXL Output", visible=True, show_copy_button=True)
|
99 |
+
clip_l_output = gr.Textbox(label="CLIP L Output", visible=True, show_copy_button=True)
|
100 |
+
clip_g_output = gr.Textbox(label="CLIP G Output", visible=True, show_copy_button=True)
|
101 |
|
102 |
with gr.Column(scale=2):
|
103 |
with gr.Accordion("""Prompt Generation with LLM
|
|
|
138 |
model = gr.Dropdown(label="Model", choices=["meta-llama/Meta-Llama-3.1-70B-Instruct"], value="meta-llama/Meta-Llama-3.1-70B-Instruct")
|
139 |
|
140 |
generate_text_button = gr.Button("Generate Prompt with LLM")
|
141 |
+
text_output = gr.Textbox(label="Generated Text", lines=10, show_copy_button=True)
|
142 |
|
143 |
def create_caption(image, model):
|
144 |
if image is not None:
|