Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -13,6 +13,9 @@ import gradio as gr
|
|
| 13 |
|
| 14 |
from transformers import Qwen2_5_VLForConditionalGeneration, AutoProcessor
|
| 15 |
|
|
|
|
|
|
|
|
|
|
| 16 |
model_path = "Qwen/Qwen2.5-VL-7B-Instruct"
|
| 17 |
model = Qwen2_5_VLForConditionalGeneration.from_pretrained(model_path,
|
| 18 |
torch_dtype=torch.bfloat16,
|
|
@@ -305,7 +308,8 @@ def numpy_to_pil(numpy_array):
|
|
| 305 |
|
| 306 |
return pil_image
|
| 307 |
|
| 308 |
-
|
|
|
|
| 309 |
prompt_thinking = """Outline the bounding box coordinates and names of each unique edible food and drink item and output all the coordinates in JSON format.
|
| 310 |
Only outline food and drink items that are edible.
|
| 311 |
Ignore hard to see items in the background. Only focus on the foreground.
|
|
|
|
| 13 |
|
| 14 |
from transformers import Qwen2_5_VLForConditionalGeneration, AutoProcessor
|
| 15 |
|
| 16 |
+
|
| 17 |
+
### Load the model and helper functions ###
|
| 18 |
+
|
| 19 |
model_path = "Qwen/Qwen2.5-VL-7B-Instruct"
|
| 20 |
model = Qwen2_5_VLForConditionalGeneration.from_pretrained(model_path,
|
| 21 |
torch_dtype=torch.bfloat16,
|
|
|
|
| 308 |
|
| 309 |
return pil_image
|
| 310 |
|
| 311 |
+
### Prompt section ###
|
| 312 |
+
|
| 313 |
prompt_thinking = """Outline the bounding box coordinates and names of each unique edible food and drink item and output all the coordinates in JSON format.
|
| 314 |
Only outline food and drink items that are edible.
|
| 315 |
Ignore hard to see items in the background. Only focus on the foreground.
|