Update README.md
Browse files
README.md
CHANGED
@@ -46,29 +46,28 @@ pip install git+https://github.com/huggingface/optimum-intel.git
|
|
46 |
from PIL import Image
|
47 |
import requests
|
48 |
from optimum.intel.openvino import OVModelForVisualCausalLM
|
49 |
-
from transformers import
|
50 |
|
51 |
model_id = "OpenVINO/pixtral-12b-int4-ov"
|
52 |
|
53 |
-
|
54 |
-
|
55 |
ov_model = OVModelForVisualCausalLM.from_pretrained(model_id, trust_remote_code=True)
|
56 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
57 |
|
58 |
url = "https://github.com/openvinotoolkit/openvino_notebooks/assets/29454499/d5fbbd1a-d484-415c-88cb-9986625b7b11"
|
59 |
image = Image.open(requests.get(url, stream=True).raw)
|
60 |
|
61 |
-
inputs = ov_model.preprocess_inputs(text=prompt, image=image, tokenizer=tokenizer, config=ov_model.config)
|
62 |
-
|
63 |
-
generation_args = {
|
64 |
-
"max_new_tokens": 100,
|
65 |
-
"streamer": TextStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
|
66 |
-
}
|
67 |
|
68 |
-
|
|
|
69 |
|
70 |
-
|
71 |
-
response = tokenizer.batch_decode(generate_ids, skip_special_tokens=True)[0]
|
72 |
|
73 |
```
|
74 |
|
|
|
46 |
from PIL import Image
|
47 |
import requests
|
48 |
from optimum.intel.openvino import OVModelForVisualCausalLM
|
49 |
+
from transformers import AutoProcessor, TextStreamer
|
50 |
|
51 |
model_id = "OpenVINO/pixtral-12b-int4-ov"
|
52 |
|
53 |
+
processor = AutoProcessor.from_pretrained(model_dir)
|
|
|
54 |
ov_model = OVModelForVisualCausalLM.from_pretrained(model_id, trust_remote_code=True)
|
55 |
+
|
56 |
+
question = "What is unusual in this picture?"
|
57 |
+
messages = [
|
58 |
+
{"role": "user", "content": [{"type": "text", "content": question}, {"type": "image"}]},
|
59 |
+
]
|
60 |
+
text = processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=False)
|
61 |
+
|
62 |
|
63 |
url = "https://github.com/openvinotoolkit/openvino_notebooks/assets/29454499/d5fbbd1a-d484-415c-88cb-9986625b7b11"
|
64 |
image = Image.open(requests.get(url, stream=True).raw)
|
65 |
|
|
|
|
|
|
|
|
|
|
|
|
|
66 |
|
67 |
+
inputs = processor(text=text, images=[resize_with_aspect_ratio(raw_image)], return_tensors="pt")
|
68 |
+
streamer = TextStreamer(processor.tokenizer, skip_prompt=True, skip_special_tokens=True)
|
69 |
|
70 |
+
output = ov_model.generate(**inputs, do_sample=False, max_new_tokens=100, temperature=None, top_p=None, streamer=streamer)
|
|
|
71 |
|
72 |
```
|
73 |
|