Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -14,54 +14,20 @@ import spaces
|
|
| 14 |
|
| 15 |
PLACEHOLDER = """
|
| 16 |
<div style="padding: 30px; text-align: center; display: flex; flex-direction: column; align-items: center;">
|
| 17 |
-
<img src="https://
|
| 18 |
-
<h1 style="font-size: 28px; margin-bottom: 2px; opacity: 0.55;">
|
| 19 |
-
<p style="font-size: 18px; margin-bottom: 2px; opacity: 0.65;">
|
| 20 |
</div>
|
| 21 |
"""
|
| 22 |
-
#####################
|
| 23 |
-
|
| 24 |
-
'''processor = LlavaNextProcessor.from_pretrained("tiiuae/falcon-11B-vlm", tokenizer_class='PreTrainedTokenizerFast')
|
| 25 |
-
model = LlavaNextForConditionalGeneration.from_pretrained("tiiuae/falcon-11B-vlm", torch_dtype=torch.bfloat16)
|
| 26 |
-
|
| 27 |
-
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
| 28 |
-
cats_image = Image.open(requests.get(url, stream=True).raw)
|
| 29 |
-
instruction = 'Write a long paragraph about this picture.'
|
| 30 |
-
|
| 31 |
-
prompt = f"""User:<image>\n{instruction} Falcon:"""
|
| 32 |
-
inputs = processor(prompt, images=cats_image, return_tensors="pt", padding=True).to('cuda:0')
|
| 33 |
-
|
| 34 |
-
model.to('cuda:0')
|
| 35 |
-
output = model.generate(**inputs, max_new_tokens=256)
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
prompt_length = inputs['input_ids'].shape[1]
|
| 39 |
-
generated_captions = processor.decode(output[0], skip_special_tokens=True).strip()
|
| 40 |
-
|
| 41 |
-
print(generated_captions)
|
| 42 |
-
'''
|
| 43 |
-
#############################
|
| 44 |
-
|
| 45 |
-
#model_id = "xtuner/llava-llama-3-8b-v1_1-transformers"
|
| 46 |
model_id = "tiiuae/falcon-11B-vlm"
|
| 47 |
|
| 48 |
-
#processor = AutoProcessor.from_pretrained(model_id)
|
| 49 |
processor = LlavaNextProcessor.from_pretrained("tiiuae/falcon-11B-vlm", tokenizer_class='PreTrainedTokenizerFast')
|
| 50 |
-
|
| 51 |
model = LlavaNextForConditionalGeneration.from_pretrained("tiiuae/falcon-11B-vlm",
|
| 52 |
torch_dtype=torch.bfloat16,
|
| 53 |
#torch_dtype=torch.float16,
|
| 54 |
low_cpu_mem_usage=True,)
|
| 55 |
|
| 56 |
-
#model = LlavaForConditionalGeneration.from_pretrained(
|
| 57 |
-
# model_id,
|
| 58 |
-
# torch_dtype=torch.float16,
|
| 59 |
-
# low_cpu_mem_usage=True,
|
| 60 |
-
#)
|
| 61 |
-
|
| 62 |
model.to("cuda:0")
|
| 63 |
-
#model.generation_config.eos_token_id = 128009
|
| 64 |
-
|
| 65 |
|
| 66 |
@spaces.GPU
|
| 67 |
def bot_streaming(message, history):
|
|
@@ -125,7 +91,7 @@ with gr.Blocks(fill_height=True, ) as demo:
|
|
| 125 |
title="FalconVLM",
|
| 126 |
examples=[{"text": "What is on the flower?", "files": ["./bee.jpg"]},
|
| 127 |
{"text": "How to make this pastry?", "files": ["./baklava.png"]}],
|
| 128 |
-
description="Try [
|
| 129 |
stop_btn="Stop Generation",
|
| 130 |
multimodal=True,
|
| 131 |
textbox=chat_input,
|
|
|
|
| 14 |
|
| 15 |
PLACEHOLDER = """
|
| 16 |
<div style="padding: 30px; text-align: center; display: flex; flex-direction: column; align-items: center;">
|
| 17 |
+
<img src="https://raw.githubusercontent.com/huggingface/blog/09dbdfd196a3112ecbb533fc0b6c700571cbc753/assets/179_falcon2-11b/thumbnail.jpg" style="width: 80%; max-width: 550px; height: auto; opacity: 0.55; ">
|
| 18 |
+
<h1 style="font-size: 28px; margin-bottom: 2px; opacity: 0.55;">Falcon2-11B-VLM</h1>
|
| 19 |
+
<p style="font-size: 18px; margin-bottom: 2px; opacity: 0.65;">Falcon2-11B-VLM is an 11B parameters causal decoder-only model built by TII</p>
|
| 20 |
</div>
|
| 21 |
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 22 |
model_id = "tiiuae/falcon-11B-vlm"
|
| 23 |
|
|
|
|
| 24 |
processor = LlavaNextProcessor.from_pretrained("tiiuae/falcon-11B-vlm", tokenizer_class='PreTrainedTokenizerFast')
|
|
|
|
| 25 |
model = LlavaNextForConditionalGeneration.from_pretrained("tiiuae/falcon-11B-vlm",
|
| 26 |
torch_dtype=torch.bfloat16,
|
| 27 |
#torch_dtype=torch.float16,
|
| 28 |
low_cpu_mem_usage=True,)
|
| 29 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 30 |
model.to("cuda:0")
|
|
|
|
|
|
|
| 31 |
|
| 32 |
@spaces.GPU
|
| 33 |
def bot_streaming(message, history):
|
|
|
|
| 91 |
title="FalconVLM",
|
| 92 |
examples=[{"text": "What is on the flower?", "files": ["./bee.jpg"]},
|
| 93 |
{"text": "How to make this pastry?", "files": ["./baklava.png"]}],
|
| 94 |
+
description="Try [tiiuae/falcon-11B-VLM](https://huggingface.co/tiiuae/falcon-11B-vlm). Upload an image and start chatting about it, or simply try one of the examples below. If you don't upload an image, you will receive an error.",
|
| 95 |
stop_btn="Stop Generation",
|
| 96 |
multimodal=True,
|
| 97 |
textbox=chat_input,
|