Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -11,6 +11,14 @@ from transformers import TextIteratorStreamer
|
|
11 |
from threading import Thread
|
12 |
import torch
|
13 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
def process(message, history):
|
15 |
"""Generate the model response in streaming mode given message and history
|
16 |
"""
|
|
|
11 |
from threading import Thread
|
12 |
import torch
|
13 |
|
14 |
+
device = 'cuda'
|
15 |
+
model_id = "google/gemma-3-4b-it"
|
16 |
+
processor = AutoProcessor.from_pretrained(model_id, use_fast=True, padding_side="left")
|
17 |
+
model = Gemma3ForConditionalGeneration.from_pretrained(
|
18 |
+
model_id,
|
19 |
+
torch_dtype=torch.bfloat16
|
20 |
+
).to(device).eval()
|
21 |
+
|
22 |
def process(message, history):
|
23 |
"""Generate the model response in streaming mode given message and history
|
24 |
"""
|