fancyfeast commited on
Commit
5a5956e
·
1 Parent(s): 248feb4

Man the chatinterface is weird

Browse files
Files changed (1) hide show
  1. app.py +8 -3
app.py CHANGED
@@ -75,6 +75,8 @@ assert isinstance(end_of_header_id, int) and isinstance(end_of_turn_id, int)
75
  def chat_joycaption(message: dict, history, temperature: float, max_new_tokens: int) -> Generator[str, None, None]:
76
  torch.cuda.empty_cache()
77
 
 
 
78
  # Prompts are always stripped in training for now
79
  prompt = message['text'].strip()
80
 
@@ -134,6 +136,8 @@ def chat_joycaption(message: dict, history, temperature: float, max_new_tokens:
134
  pixel_values = TVF.normalize(pixel_values, [0.5], [0.5])
135
  pixel_values = pixel_values.to(torch.bfloat16)
136
 
 
 
137
  generate_kwargs = dict(
138
  input_ids=input_ids,
139
  pixel_values=pixel_values,
@@ -151,7 +155,6 @@ def chat_joycaption(message: dict, history, temperature: float, max_new_tokens:
151
  if temperature == 0:
152
  generate_kwargs["do_sample"] = False
153
 
154
- streamer = TextIteratorStreamer(tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True)
155
  t = Thread(target=model.generate, kwargs=generate_kwargs)
156
  t.start()
157
 
@@ -161,7 +164,8 @@ def chat_joycaption(message: dict, history, temperature: float, max_new_tokens:
161
  yield "".join(outputs)
162
 
163
 
164
- chatbot=gr.Chatbot(height=450, placeholder=PLACEHOLDER, label='Gradio ChatInterface')
 
165
 
166
  with gr.Blocks() as demo:
167
  gr.HTML(TITLE)
@@ -171,7 +175,8 @@ with gr.Blocks() as demo:
171
  chatbot=chatbot,
172
  fill_height=True,
173
  multimodal=True,
174
- additional_inputs_accordion=gr.Accordion(label="⚙️ Parameters", open=False, render=False),
 
175
  additional_inputs=[
176
  gr.Slider(minimum=0,
177
  maximum=1,
 
75
  def chat_joycaption(message: dict, history, temperature: float, max_new_tokens: int) -> Generator[str, None, None]:
76
  torch.cuda.empty_cache()
77
 
78
+ print(message)
79
+
80
  # Prompts are always stripped in training for now
81
  prompt = message['text'].strip()
82
 
 
136
  pixel_values = TVF.normalize(pixel_values, [0.5], [0.5])
137
  pixel_values = pixel_values.to(torch.bfloat16)
138
 
139
+ streamer = TextIteratorStreamer(tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True)
140
+
141
  generate_kwargs = dict(
142
  input_ids=input_ids,
143
  pixel_values=pixel_values,
 
155
  if temperature == 0:
156
  generate_kwargs["do_sample"] = False
157
 
 
158
  t = Thread(target=model.generate, kwargs=generate_kwargs)
159
  t.start()
160
 
 
164
  yield "".join(outputs)
165
 
166
 
167
+ chatbot=gr.Chatbot(height=450, placeholder=PLACEHOLDER, label='Gradio ChatInterface', type="messages")
168
+ textbox = gr.MultimodalTextbox(file_types=["image"], file_count="single")
169
 
170
  with gr.Blocks() as demo:
171
  gr.HTML(TITLE)
 
175
  chatbot=chatbot,
176
  fill_height=True,
177
  multimodal=True,
178
+ textbox=textbox,
179
+ additional_inputs_accordion=None,#gr.Accordion(label="⚙️ Parameters", open=False, render=False),
180
  additional_inputs=[
181
  gr.Slider(minimum=0,
182
  maximum=1,