Update README.md
Browse files
README.md
CHANGED
@@ -34,32 +34,34 @@ This model was obtained by quantizing the weights of [google/gemma-3-4b-it](http
|
|
34 |
This model can be deployed efficiently using the [vLLM](https://docs.vllm.ai/en/latest/) backend, as shown in the example below.
|
35 |
|
36 |
```python
|
37 |
-
from vllm.assets.image import ImageAsset
|
38 |
from vllm import LLM, SamplingParams
|
|
|
|
|
39 |
|
40 |
-
#
|
41 |
-
|
42 |
-
model="nm-testing/gemma-3-4b-it-quantized.w4a16",
|
43 |
-
trust_remote_code=True,
|
44 |
-
max_model_len=4096,
|
45 |
-
max_num_seqs=2,
|
46 |
-
)
|
47 |
|
48 |
-
#
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
},
|
55 |
-
}
|
56 |
-
|
57 |
-
|
58 |
-
|
|
|
|
|
|
|
|
|
|
|
59 |
outputs = llm.generate(inputs, SamplingParams(temperature=0.2, max_tokens=64))
|
60 |
-
|
61 |
-
|
62 |
-
print("
|
|
|
63 |
```
|
64 |
|
65 |
vLLM also supports OpenAI-compatible serving. See the [documentation](https://docs.vllm.ai/en/latest/) for more details.
|
|
|
34 |
This model can be deployed efficiently using the [vLLM](https://docs.vllm.ai/en/latest/) backend, as shown in the example below.
|
35 |
|
36 |
```python
|
|
|
37 |
from vllm import LLM, SamplingParams
|
38 |
+
from vllm.assets.image import ImageAsset
|
39 |
+
from transformers import AutoProcessor
|
40 |
|
41 |
+
# Define model name once
|
42 |
+
model_name = "RedHatAI/gemma-3-4b-it-quantized.w8a8"
|
|
|
|
|
|
|
|
|
|
|
43 |
|
44 |
+
# Load image and processor
|
45 |
+
image = ImageAsset("cherry_blossom").pil_image.convert("RGB")
|
46 |
+
processor = AutoProcessor.from_pretrained(model_name, trust_remote_code=True)
|
47 |
+
|
48 |
+
# Build multimodal prompt
|
49 |
+
chat = [
|
50 |
+
{"role": "user", "content": [{"type": "image"}, {"type": "text", "text": "What is the content of this image?"}]},
|
51 |
+
{"role": "assistant", "content": []}
|
52 |
+
]
|
53 |
+
prompt = processor.apply_chat_template(chat, add_generation_prompt=True)
|
54 |
+
|
55 |
+
# Initialize model
|
56 |
+
llm = LLM(model=model_name, trust_remote_code=True)
|
57 |
+
|
58 |
+
# Run inference
|
59 |
+
inputs = {"prompt": prompt, "multi_modal_data": {"image": [image]}}
|
60 |
outputs = llm.generate(inputs, SamplingParams(temperature=0.2, max_tokens=64))
|
61 |
+
|
62 |
+
# Display result
|
63 |
+
print("RESPONSE:", outputs[0].outputs[0].text)
|
64 |
+
|
65 |
```
|
66 |
|
67 |
vLLM also supports OpenAI-compatible serving. See the [documentation](https://docs.vllm.ai/en/latest/) for more details.
|