Didier commited on
Commit
c078e68
·
verified ·
1 Parent(s): 4f091c8

Upload vlm.py

Browse files
Files changed (1) hide show
  1. vlm.py +125 -0
vlm.py ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ File: vlm.py
3
+ Description: Vision language model utility functions.
4
+ Author: Didier Guillevic
5
+ Date: 2025-03-16
6
+ """
7
+
8
+ from transformers import AutoProcessor, Gemma3ForConditionalGeneration
9
+ from transformers import TextIteratorStreamer
10
+ from threading import Thread
11
+ import torch
12
+
13
+ #
14
+ # Load the model: google/gemma-3-4b-it
15
+ #
16
+ device = 'mps'
17
+ model_id = "google/gemma-3-4b-it"
18
+ processor = AutoProcessor.from_pretrained(model_id, use_fast=True, padding_side="left")
19
+ model = Gemma3ForConditionalGeneration.from_pretrained(
20
+ model_id,
21
+ torch_dtype=torch.bfloat16
22
+ ).to(device).eval()
23
+
24
+ #
25
+ # Build messages
26
+ #
27
+ def build_messages(message: dict, history: list[tuple]):
28
+ """Build messages given message & history from a **multimodal** chat interface.
29
+
30
+ Args:
31
+ message: dictionary with keys: 'text', 'files'
32
+ history: list of tuples with (message, response)
33
+
34
+ Returns:
35
+ list of messages (to be sent to the model)
36
+ """
37
+ # Get the user's text and list of images
38
+ user_text = message.get("text", "")
39
+ user_images = message.get("files", []) # List of images
40
+
41
+ # Build the message list including history
42
+ messages = []
43
+ combined_user_input = [] #Combine images and text if found in same turn.
44
+ for user_turn, bot_turn in history:
45
+ if isinstance(user_turn, tuple): # Image input
46
+ image_content = [{"type": "image", "url": image_url} for image_url in user_turn]
47
+ combined_user_input.extend(image_content)
48
+ elif isinstance(user_turn, str): #Text input
49
+ combined_user_input.append({"type":"text", "text": user_turn})
50
+ if combined_user_input and bot_turn:
51
+ messages.append({'role': 'user', 'content': combined_user_input})
52
+ messages.append({'role': 'assistant', 'content': [{"type": "text", "text": bot_turn}]})
53
+ combined_user_input = [] #reset the combined user input.
54
+
55
+ # Build the user message's content from the provided message
56
+ user_content = []
57
+ if user_text:
58
+ user_content.append({"type": "text", "text": user_text})
59
+ for image in user_images:
60
+ user_content.append({"type": "image", "url": image})
61
+
62
+ messages.append({'role': 'user', 'content': user_content})
63
+
64
+ return messages
65
+
66
+
67
+ #
68
+ # Streaming response
69
+ #
70
+ @torch.inference_mode()
71
+ def stream_response(messages: list[dict]):
72
+ """Stream the model's response to the chat interface.
73
+
74
+ Args:
75
+ messages: list of messages to send to the model
76
+ """
77
+ # Generate model's response
78
+ inputs = processor.apply_chat_template(
79
+ messages, add_generation_prompt=True, tokenize=True,
80
+ return_dict=True, return_tensors="pt"
81
+ ).to(model.device, dtype=torch.bfloat16)
82
+
83
+ streamer = TextIteratorStreamer(
84
+ processor, skip_prompt=True, skip_special_tokens=True)
85
+ generation_kwargs = dict(
86
+ inputs,
87
+ streamer=streamer,
88
+ max_new_tokens=1_024,
89
+ do_sample=False
90
+ )
91
+
92
+ thread = Thread(target=model.generate, kwargs=generation_kwargs)
93
+ thread.start()
94
+
95
+ partial_message = ""
96
+ for new_text in streamer:
97
+ partial_message += new_text
98
+ yield partial_message
99
+
100
+
101
+ #
102
+ # Response (non-streaming)
103
+ #
104
+ @torch.inference_mode()
105
+ def get_response(messages: list[dict]):
106
+ """Get the model's response.
107
+
108
+ Args:
109
+ messages: list of messages to send to the model
110
+ """
111
+ # Generate model's response
112
+ inputs = processor.apply_chat_template(
113
+ messages, add_generation_prompt=True, tokenize=True,
114
+ return_dict=True, return_tensors="pt"
115
+ ).to(model.device, dtype=torch.bfloat16)
116
+
117
+ input_len = inputs["input_ids"].shape[-1]
118
+
119
+ with torch.inference_mode():
120
+ generation = model.generate(**inputs, max_new_tokens=100, do_sample=False)
121
+ generation = generation[0][input_len:]
122
+
123
+ decoded = processor.decode(generation, skip_special_tokens=True)
124
+
125
+ return decoded