kongzym commited on
Commit
ec8728a
·
verified ·
1 Parent(s): fb2c858

Upload 16 files

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
InfiMed.py ADDED
@@ -0,0 +1,522 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pathlib import Path
2
+ from typing import Any, Callable, List, Optional, Tuple, Union
3
+
4
+ import torch
5
+ import torch.nn as nn
6
+ import os
7
+ from accelerate import PartialState
8
+ import PIL
9
+ from transformers import PreTrainedModel, PretrainedConfig, GenerationConfig, AutoTokenizer, LlamaTokenizerFast
10
+ from transformers.utils import WEIGHTS_NAME, SAFE_WEIGHTS_NAME
11
+ from transformers import Qwen3ForCausalLM, SiglipImageProcessor
12
+ from safetensors.torch import load_file
13
+ from transformers.modeling_outputs import CausalLMOutputWithPast
14
+ from modeling_siglip import SiglipVisionModel
15
+ from configuration_siglip import SiglipVisionConfig
16
+ from configuration_qwen3 import Qwen3Config
17
+ from abc import ABC, abstractmethod
18
+ from einops import rearrange
19
+
20
+
21
+ IGNORE_INDEX = -100
22
+ IMAGE_TOKEN_INDEX = -200
23
+
24
+
25
+ class PromptBuilder(ABC):
26
+ def __init__(self, system_prompt: Optional[str] = None) -> None:
27
+ # Only some models define a system prompt => let subclasses handle this logic!
28
+ self.system_prompt = system_prompt
29
+
30
+ @abstractmethod
31
+ def add_turn(self, role: str, message: str) -> str: ...
32
+
33
+ @abstractmethod
34
+ def get_potential_prompt(self, user_msg: str) -> None: ...
35
+
36
+ @abstractmethod
37
+ def get_prompt(self) -> str: ...
38
+
39
+ class Qwen3PromptBuilder(PromptBuilder):
40
+ def __init__(self, system_prompt: Optional[str] = None) -> None:
41
+ super().__init__(system_prompt)
42
+ self.system_prompt = "<s><|im_start|>system\nYou are a helpful assistant.<|im_end|>\n"
43
+ self.bos, self.eos = "<s>", "<|im_end|>"
44
+
45
+ # Get role-specific "wrap" functions
46
+ self.wrap_human = lambda msg: f"<|im_start|>user\n{msg}<|im_end|>assistant\n"
47
+ self.wrap_gpt = lambda msg: f"{msg if msg != '' else ' '}{self.eos}\n"
48
+
49
+ # === `self.prompt` gets built up over multiple turns ===
50
+ self.prompt, self.turn_count = "", 0
51
+
52
+ def add_turn(self, role: str, message: str) -> str:
53
+ # assert (role == "human") if (self.turn_count % 2 == 0) else (role == "gpt")
54
+ message = message.strip() #.replace("<image>", "").strip()
55
+
56
+ # Special Handling for "system" prompt (turn_count == 0)
57
+ if self.turn_count == 0:
58
+ sys_message = self.system_prompt + self.wrap_human(message)
59
+ wrapped_message = sys_message
60
+ elif (self.turn_count % 2) == 0:
61
+ human_message = self.wrap_human(message)
62
+ wrapped_message = human_message
63
+ else:
64
+ gpt_message = self.wrap_gpt(message)
65
+ wrapped_message = gpt_message
66
+
67
+ # Update Prompt
68
+ self.prompt += wrapped_message
69
+
70
+
71
+ # Bump Turn Counter
72
+ self.turn_count += 1
73
+
74
+ # Return "wrapped_message" (effective string added to context)
75
+ return wrapped_message
76
+
77
+ def get_potential_prompt(self, message: str) -> None:
78
+ # Assumes that it's always the user's (human's) turn!
79
+ prompt_copy = str(self.prompt)
80
+
81
+ # Special Handling for "system" prompt (turn_count == 0)
82
+ if self.turn_count == 0:
83
+ sys_message = self.system_prompt + self.wrap_human(message)
84
+ prompt_copy += sys_message
85
+
86
+ else:
87
+ human_message = self.wrap_human(message)
88
+ prompt_copy += human_message
89
+
90
+ # return prompt_copy.removeprefix(self.bos).rstrip()
91
+ return prompt_copy.rstrip()
92
+
93
+ def get_prompt(self) -> str:
94
+ # Remove prefix <bos> (if exists) because it gets auto-inserted by tokenizer!
95
+ # return self.prompt.removeprefix(self.bos).rstrip()
96
+ return self.prompt.rstrip()
97
+
98
+ class InfiMedConfig(PretrainedConfig):
99
+
100
+ def __init__(
101
+ self,
102
+ vision_config=None,
103
+ llm_config=None,
104
+ run_dir: str = None,
105
+ load_precision: str = "bf16",
106
+ max_length: int = 128,
107
+ temperature: float = 1.0,
108
+ **kwargs
109
+ ):
110
+ if vision_config is None:
111
+ vision_config = {}
112
+ print(
113
+ 'vision_config is None. Initializing the SiglipVisionConfig with default values.')
114
+
115
+ if llm_config is None:
116
+ llm_config = {'architectures': ['Qwen3ForCausalLM']}
117
+ print(
118
+ 'llm_config is None. Initializing the Qwen3Config config with default values')
119
+
120
+ self.vision_config = SiglipVisionConfig(**vision_config)
121
+ if llm_config['architectures'][0] == 'Qwen3ForCausalLM':
122
+ self.llm_config = Qwen3Config(**llm_config)
123
+ else:
124
+ raise ValueError('Unsupported architecture: {}'.format(
125
+ llm_config['architectures'][0]))
126
+ self.run_dir = run_dir
127
+ self.load_precision = load_precision
128
+ self.max_length = max_length
129
+ self.temperature = temperature
130
+ super().__init__(**kwargs)
131
+
132
+ class AvgPoolProjector(nn.Module):
133
+ def __init__(
134
+ self,
135
+ layer_num: int = 2,
136
+ query_num: int = 144,
137
+ mm_hidden_size: int = 1024,
138
+ llm_hidden_size: int = 4096,
139
+ ):
140
+ super().__init__()
141
+ self.layer_num = layer_num
142
+ self.query_num = query_num
143
+ self.mm_hidden_size = mm_hidden_size
144
+ self.llm_hidden_size = llm_hidden_size
145
+ self.build_net()
146
+
147
+ def build_net(self):
148
+ hw = int(self.query_num ** 0.5)
149
+ sampler = nn.AdaptiveAvgPool2d((hw, hw))
150
+ self.sampler = sampler
151
+ modules = [nn.Linear(self.mm_hidden_size, self.llm_hidden_size)]
152
+ for _ in range(1, self.layer_num):
153
+ modules.append(nn.GELU())
154
+ modules.append(nn.Linear(self.llm_hidden_size, self.llm_hidden_size))
155
+ self.mlp_projector = nn.Sequential(*modules)
156
+ print(f"patch size {hw} average pooling layer initialized")
157
+
158
+ def forward(self, visual_feat: torch.Tensor) -> torch.Tensor:
159
+ batch_size, seq_len, h_dim = visual_feat.shape
160
+ hw = int(seq_len ** 0.5)
161
+ shaped_visual_feat = rearrange(visual_feat, "b (h w) d -> b d h w", h=hw, w=hw)
162
+ pooled_visual_feat = self.sampler(shaped_visual_feat)
163
+ reshaped_visual_feat = rearrange(pooled_visual_feat, "b d h w -> b (h w) d")
164
+ output_feat = self.mlp_projector(reshaped_visual_feat)
165
+ return output_feat
166
+
167
+ class InfiMed(PreTrainedModel):
168
+ config_class = InfiMedConfig
169
+
170
+ def __init__(self, config: InfiMedConfig, vision_model=None, language_model=None):
171
+ super().__init__(config)
172
+ self.run_dir = Path(config.run_dir) if config.run_dir else None
173
+ self.model_dtype = {"fp32": torch.float32, "fp16": torch.float16, "bf16": torch.bfloat16}[config.load_precision]
174
+ self.distributed_state = PartialState()
175
+ self.max_new_tokens = config.max_length
176
+ self.temperature = config.temperature
177
+ self.top_p = config.top_p
178
+ self.repetition_penalty = config.repetition_penalty
179
+
180
+
181
+ if vision_model is not None:
182
+ self.vision_model = vision_model
183
+ else:
184
+ self.vision_model = SiglipVisionModel.from_pretrained(config.vision_config._name_or_path, hidden_act = "gelu")
185
+
186
+ if language_model is not None:
187
+ self.language_model = language_model
188
+ self.config.llm_config = language_model.config
189
+ else:
190
+ if config.llm_config.architectures[0] == 'Qwen3ForCausalLM':
191
+ self.language_model = Qwen3ForCausalLM.from_pretrained(config.llm_config._name_or_path, pad_token_id = 151670, bos_token_id = 128245, eos_token_id = 151645, tie_word_embeddings = False)
192
+ else:
193
+ raise NotImplementedError(
194
+ f'{config.llm_config.architectures[0]} is not implemented.')
195
+
196
+ self.tokenizer = AutoTokenizer.from_pretrained(config.llm_config._name_or_path, use_fast=True)
197
+ self.tokenizer.add_special_tokens({"additional_special_tokens": ["<|endofchunk|>", "<s>", "<|pad|>"]})
198
+ self.tokenizer.pad_token = "<|pad|>"
199
+ self.tokenizer.bos_token = "<s>"
200
+
201
+ self.offset = 1 if self.tokenizer.encode("\n")[0] == self.tokenizer.bos_token_id else 0
202
+
203
+ if "finetune" in config.run_dir:
204
+ self.arch_specifier = "full-align+729-avgpool"
205
+ else:
206
+ self.arch_specifier = "no-align+avgpool"
207
+
208
+ if self.arch_specifier.split("+")[-1].split("-")[0] != "avgpool":
209
+ query_dim = int(self.arch_specifier.split("+")[-1].split("-")[0])
210
+ else:
211
+ query_dim = 144
212
+ self.projector = AvgPoolProjector(query_num=query_dim, mm_hidden_size=config.vision_config.hidden_size, llm_hidden_size=config.llm_config.hidden_size)
213
+
214
+ self.vision_backbone_requires_grad = False
215
+
216
+ self.img_context_token_id = 151655
217
+
218
+ self.image_processor = SiglipImageProcessor.from_pretrained(
219
+ config.vision_config._name_or_path,
220
+ size={"height": 384, "width": 384},
221
+ resample=PIL.Image.Resampling.BICUBIC,
222
+ crop_size={"height": 384, "width": 384},
223
+ do_center_crop=True,
224
+ do_normalize=True,
225
+ image_mean=[0.5, 0.5, 0.5],
226
+ image_std=[0.5, 0.5, 0.5],
227
+ do_convert_rgb=True
228
+ )
229
+
230
+
231
+ @classmethod
232
+ # load model from .pt file
233
+ def from_pretrained_ckpt(cls, pretrained_model_name_or_path, *args, **kwargs):
234
+ config = InfiMedConfig.from_pretrained(pretrained_model_name_or_path, *args, **kwargs)
235
+ model = cls(config)
236
+ ckpt_base_path = os.path.join(os.path.dirname(pretrained_model_name_or_path), "checkpoints")
237
+ if (Path(ckpt_base_path) / SAFE_WEIGHTS_NAME).exists():
238
+ state_dict = load_file(Path(ckpt_base_path) / SAFE_WEIGHTS_NAME)
239
+ elif (Path(ckpt_base_path) / WEIGHTS_NAME).exists():
240
+ state_dict = torch.load(Path(ckpt_base_path) / WEIGHTS_NAME, map_location="cpu")["model"]
241
+ elif (Path(ckpt_base_path) / "latest-checkpoint.pt").exists():
242
+ state_dict = torch.load(Path(ckpt_base_path) / "latest-checkpoint.pt", map_location="cpu")["model"]
243
+ else:
244
+ raise FileNotFoundError("No model weights found in the directory.")
245
+ if "vision_backbone" in state_dict:
246
+ model.vision_model.load_state_dict(state_dict["vision_backbone"])
247
+
248
+ new_state_dict = {}
249
+ for key, value in state_dict["llm_backbone"].items():
250
+ new_key = key.replace("llm.", "")
251
+ new_state_dict[new_key] = value
252
+ model.language_model.load_state_dict(new_state_dict)
253
+ model.projector.load_state_dict(state_dict["projector"])
254
+
255
+ model.to("cuda", dtype=torch.bfloat16)
256
+
257
+ model.requires_grad_(False)
258
+ model.eval()
259
+ return model
260
+
261
+ def save_checkpoint(self, save_path):
262
+ os.makedirs(save_path, exist_ok=True)
263
+ self.save_pretrained(save_path)
264
+ self.tokenizer.save_pretrained(save_path)
265
+ self.image_processor.save_pretrained(save_path)
266
+
267
+
268
+ def process_messages(self,messages):
269
+ prompt_builder = Qwen3PromptBuilder()
270
+ if "image" in messages:
271
+ processed_prompt = "<image>" + "\n" + messages['prompt'].replace("<image>", '')
272
+ elif "images" in messages:
273
+ processed_prompt = ""
274
+ for i, image in enumerate(messages['images']):
275
+ processed_prompt += f"<image_{i+1}>: "
276
+ processed_prompt += "\n" + messages['prompt'].replace("<image>", '')
277
+
278
+ msg = prompt_builder.add_turn("user", processed_prompt)
279
+ msg = msg.strip()
280
+
281
+ if isinstance(self.tokenizer, LlamaTokenizerFast):
282
+ msg = msg.rstrip()
283
+ else:
284
+ pass
285
+
286
+ turn_input_ids, _ = tokenizer_image_token(msg, self.tokenizer)
287
+
288
+ result = []
289
+ for x in turn_input_ids:
290
+ if x == -200:
291
+ result.extend([self.img_context_token_id] * 729)
292
+ else:
293
+ result.append(x)
294
+
295
+ turn_input_ids = result
296
+
297
+ input_ids = torch.tensor(turn_input_ids)
298
+
299
+ input_ids = input_ids[: self.tokenizer.model_max_length]
300
+
301
+ input_ids = input_ids.unsqueeze(0)
302
+
303
+ if "image" in messages:
304
+ pixel_values = self.image_processor(images=messages["image"], return_tensors="pt")["pixel_values"]
305
+ else:
306
+ pixel_values = None
307
+
308
+ input_ids = input_ids.to("cuda")
309
+ pixel_values = pixel_values.to("cuda") if pixel_values is not None else None
310
+
311
+ return input_ids, pixel_values
312
+
313
+ def forward(
314
+ self,
315
+ pixel_values: torch.FloatTensor,
316
+ input_ids: torch.LongTensor = None,
317
+ attention_mask: Optional[torch.Tensor] = None,
318
+ position_ids: Optional[torch.LongTensor] = None,
319
+ image_flags: Optional[torch.LongTensor] = None,
320
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
321
+ labels: Optional[torch.LongTensor] = None,
322
+ use_cache: Optional[bool] = None,
323
+ output_attentions: Optional[bool] = None,
324
+ output_hidden_states: Optional[bool] = None,
325
+ return_dict: Optional[bool] = None,
326
+ ) -> CausalLMOutputWithPast:
327
+
328
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
329
+
330
+ vit_embeds = self.extract_feature(pixel_values)
331
+
332
+ input_embeds = self.language_model.get_input_embeddings()(input_ids)
333
+
334
+ vit_batch_size = pixel_values.shape[0]
335
+
336
+ B, N, C = input_embeds.shape
337
+ input_embeds = input_embeds.reshape(B * N, C)
338
+
339
+
340
+ input_ids = input_ids.reshape(B * N)
341
+ selected = (input_ids == self.img_context_token_id)
342
+ try:
343
+ input_embeds[selected] = input_embeds[selected] * \
344
+ 0.0 + vit_embeds.reshape(-1, C)
345
+ except Exception as e:
346
+ vit_embeds = vit_embeds.reshape(-1, C)
347
+ print(f'warning: {e}, input_embeds[selected].shape={input_embeds[selected].shape}, '
348
+ f'vit_embeds.shape={vit_embeds.shape}')
349
+ n_token = selected.sum()
350
+ input_embeds[selected] = input_embeds[selected] * \
351
+ 0.0 + vit_embeds[:n_token]
352
+
353
+ input_embeds = input_embeds.reshape(B, N, C)
354
+
355
+ if attention_mask is None:
356
+ batch_size = input_embeds.shape[0]
357
+ max_len = input_embeds.shape[1]
358
+ attention_mask = torch.zeros((batch_size, max_len), device=input_embeds.device).bool()
359
+ for index in range(batch_size):
360
+ if getattr(self.tokenizer, 'tokenizer_padding_side', 'right') == 'left':
361
+ attention_mask[index, -max_len:] = True
362
+ else:
363
+ attention_mask[index, :max_len] = True
364
+
365
+
366
+ outputs = self.language_model(
367
+ inputs_embeds=input_embeds,
368
+ attention_mask=attention_mask,
369
+ position_ids=position_ids,
370
+ past_key_values=past_key_values,
371
+ use_cache=use_cache,
372
+ output_attentions=output_attentions,
373
+ output_hidden_states=output_hidden_states,
374
+ return_dict=return_dict,
375
+ )
376
+ logits = outputs.logits
377
+
378
+ loss = None
379
+ if labels is not None:
380
+ # Shift so that tokens < n predict n
381
+ shift_logits = logits[..., :-1, :].contiguous()
382
+ shift_labels = labels[..., 1:].contiguous()
383
+ # Flatten the tokens
384
+ loss_fct = CrossEntropyLoss()
385
+ shift_logits = shift_logits.view(-1,
386
+ self.language_model.config.vocab_size)
387
+ shift_labels = shift_labels.view(-1)
388
+ # Enable model parallelism
389
+ shift_labels = shift_labels.to(shift_logits.device)
390
+ loss = loss_fct(shift_logits, shift_labels)
391
+
392
+ if not return_dict:
393
+ output = (logits,) + outputs[1:]
394
+ return (loss,) + output if loss is not None else output
395
+
396
+ return CausalLMOutputWithPast(
397
+ loss=loss,
398
+ logits=logits,
399
+ past_key_values=outputs.past_key_values,
400
+ hidden_states=outputs.hidden_states,
401
+ attentions=outputs.attentions,
402
+ )
403
+
404
+
405
+
406
+ def extract_feature(self, pixel_values):
407
+ vit_embeds = self.vision_model(
408
+ pixel_values=pixel_values,
409
+ output_hidden_states=True,
410
+ return_dict=True).hidden_states[-2]
411
+
412
+ h = w = int(vit_embeds.shape[1] ** 0.5)
413
+ vit_embeds = vit_embeds.reshape(vit_embeds.shape[0], h, w, -1)
414
+ vit_embeds = vit_embeds.reshape(
415
+ vit_embeds.shape[0], -1, vit_embeds.shape[-1])
416
+ vit_embeds = self.projector(vit_embeds)
417
+ return vit_embeds
418
+
419
+
420
+ @torch.no_grad()
421
+ def generate(
422
+ self,
423
+ pixel_values: Optional[torch.FloatTensor] = None,
424
+ input_ids: Optional[torch.FloatTensor] = None,
425
+ attention_mask: Optional[torch.LongTensor] = None,
426
+ visual_features: Optional[torch.FloatTensor] = None,
427
+ generation_config: Optional[GenerationConfig] = None,
428
+ output_hidden_states: Optional[bool] = None,
429
+ return_dict: Optional[bool] = None,
430
+ **generate_kwargs,
431
+ ) -> torch.LongTensor:
432
+
433
+ assert self.img_context_token_id is not None
434
+ if pixel_values is not None:
435
+ if visual_features is not None:
436
+ vit_embeds = visual_features
437
+ else:
438
+ vit_embeds = self.extract_feature(pixel_values)
439
+ input_embeds = self.language_model.get_input_embeddings()(input_ids)
440
+ B, N, C = input_embeds.shape
441
+ input_embeds = input_embeds.reshape(B * N, C)
442
+
443
+ input_ids = input_ids.reshape(B * N)
444
+ selected = (input_ids == self.img_context_token_id)
445
+ assert selected.sum() != 0
446
+ input_embeds[selected] = vit_embeds.reshape(
447
+ -1, C).to(input_embeds.device)
448
+
449
+ input_embeds = input_embeds.reshape(B, N, C)
450
+ else:
451
+ input_embeds = self.language_model.get_input_embeddings()(input_ids)
452
+
453
+
454
+ if attention_mask is None:
455
+ batch_size = input_embeds.shape[0]
456
+ max_len = input_embeds.shape[1]
457
+ attention_mask = torch.zeros((batch_size, max_len), device=input_embeds.device).bool()
458
+ for index in range(batch_size):
459
+ if getattr(self.tokenizer, 'tokenizer_padding_side', 'right') == 'left':
460
+ attention_mask[index, -max_len:] = True
461
+ else:
462
+ attention_mask[index, :max_len] = True
463
+
464
+ outputs = self.language_model.generate(
465
+ # input_ids=origin_input_ids,
466
+ inputs_embeds=input_embeds,
467
+ attention_mask=attention_mask,
468
+ generation_config=generation_config,
469
+ output_hidden_states=output_hidden_states,
470
+ # return_dict=return_dict,
471
+ use_cache=True,
472
+ **generate_kwargs,
473
+ )
474
+
475
+ return outputs
476
+
477
+
478
+
479
+ @torch.no_grad()
480
+ def generate_output(self,messages):
481
+ input_ids, pixel_values = self.process_messages(messages)
482
+ do_sample = False if self.temperature == 0 else True
483
+ generated_ids = self.generate(pixel_values=pixel_values, input_ids=input_ids, temperature=self.temperature,top_p=self.top_p,repetition_penalty=self.repetition_penalty,max_new_tokens=self.max_new_tokens,do_sample = do_sample)
484
+ generated_ids_trimmed = generated_ids
485
+ output_text = self.tokenizer.batch_decode(
486
+ generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
487
+ )
488
+ return output_text[0]
489
+
490
+ def generate_outputs(self,messages_list):
491
+ res = []
492
+ for messages in messages_list:
493
+ result = self.generate_output(messages)
494
+ res.append(result)
495
+ return res
496
+
497
+
498
+ def tokenizer_image_token(prompt, tokenizer, image_token_index=IMAGE_TOKEN_INDEX, return_tensors=None):
499
+ prompt_chunks = [tokenizer(chunk).input_ids for chunk in prompt.split('<image>')]
500
+
501
+ def insert_separator(X, sep):
502
+ return [ele for sublist in zip(X, [sep]*len(X)) for ele in sublist][:-1]
503
+
504
+ input_ids = []
505
+ labels = []
506
+ offset = 0
507
+ if len(prompt_chunks) > 0 and len(prompt_chunks[0]) > 0 and prompt_chunks[0][0] == tokenizer.bos_token_id:
508
+ offset = 1
509
+ input_ids.append(prompt_chunks[0][0])
510
+ labels.append(prompt_chunks[0][0])
511
+
512
+ for x in insert_separator(prompt_chunks, [image_token_index] * (offset + 1)):
513
+ input_ids.extend(x[offset:])
514
+
515
+ for x in insert_separator(prompt_chunks, [IGNORE_INDEX] * (offset + 1)):
516
+ labels.extend(x[offset:])
517
+
518
+ if return_tensors is not None:
519
+ if return_tensors == 'pt':
520
+ return torch.tensor(input_ids, dtype=torch.long), torch.tensor(labels, dtype=torch.long)
521
+ raise ValueError(f'Unsupported tensor type: {return_tensors}')
522
+ return input_ids, labels
added_tokens.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</think>": 151668,
3
+ "</tool_call>": 151658,
4
+ "</tool_response>": 151666,
5
+ "<think>": 151667,
6
+ "<tool_call>": 151657,
7
+ "<tool_response>": 151665,
8
+ "<|box_end|>": 151649,
9
+ "<|box_start|>": 151648,
10
+ "<|endofchunk|>": 151669,
11
+ "<|endoftext|>": 151643,
12
+ "<|file_sep|>": 151664,
13
+ "<|fim_middle|>": 151660,
14
+ "<|fim_pad|>": 151662,
15
+ "<|fim_prefix|>": 151659,
16
+ "<|fim_suffix|>": 151661,
17
+ "<|im_end|>": 151645,
18
+ "<|im_start|>": 151644,
19
+ "<|image_pad|>": 151655,
20
+ "<|object_ref_end|>": 151647,
21
+ "<|object_ref_start|>": 151646,
22
+ "<|pad|>": 151670,
23
+ "<|quad_end|>": 151651,
24
+ "<|quad_start|>": 151650,
25
+ "<|repo_name|>": 151663,
26
+ "<|video_pad|>": 151656,
27
+ "<|vision_end|>": 151653,
28
+ "<|vision_pad|>": 151654,
29
+ "<|vision_start|>": 151652
30
+ }
chat_template.jinja ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {%- if tools %}
2
+ {{- '<|im_start|>system\n' }}
3
+ {%- if messages[0].role == 'system' %}
4
+ {{- messages[0].content + '\n\n' }}
5
+ {%- endif %}
6
+ {{- "# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>" }}
7
+ {%- for tool in tools %}
8
+ {{- "\n" }}
9
+ {{- tool | tojson }}
10
+ {%- endfor %}
11
+ {{- "\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n" }}
12
+ {%- else %}
13
+ {%- if messages[0].role == 'system' %}
14
+ {{- '<|im_start|>system\n' + messages[0].content + '<|im_end|>\n' }}
15
+ {%- endif %}
16
+ {%- endif %}
17
+ {%- set ns = namespace(multi_step_tool=true, last_query_index=messages|length - 1) %}
18
+ {%- for message in messages[::-1] %}
19
+ {%- set index = (messages|length - 1) - loop.index0 %}
20
+ {%- if ns.multi_step_tool and message.role == "user" and not(message.content.startswith('<tool_response>') and message.content.endswith('</tool_response>')) %}
21
+ {%- set ns.multi_step_tool = false %}
22
+ {%- set ns.last_query_index = index %}
23
+ {%- endif %}
24
+ {%- endfor %}
25
+ {%- for message in messages %}
26
+ {%- if (message.role == "user") or (message.role == "system" and not loop.first) %}
27
+ {{- '<|im_start|>' + message.role + '\n' + message.content + '<|im_end|>' + '\n' }}
28
+ {%- elif message.role == "assistant" %}
29
+ {%- set content = message.content %}
30
+ {%- set reasoning_content = '' %}
31
+ {%- if message.reasoning_content is defined and message.reasoning_content is not none %}
32
+ {%- set reasoning_content = message.reasoning_content %}
33
+ {%- else %}
34
+ {%- if '</think>' in message.content %}
35
+ {%- set content = message.content.split('</think>')[-1].lstrip('\n') %}
36
+ {%- set reasoning_content = message.content.split('</think>')[0].rstrip('\n').split('<think>')[-1].lstrip('\n') %}
37
+ {%- endif %}
38
+ {%- endif %}
39
+ {%- if loop.index0 > ns.last_query_index %}
40
+ {%- if loop.last or (not loop.last and reasoning_content) %}
41
+ {{- '<|im_start|>' + message.role + '\n<think>\n' + reasoning_content.strip('\n') + '\n</think>\n\n' + content.lstrip('\n') }}
42
+ {%- else %}
43
+ {{- '<|im_start|>' + message.role + '\n' + content }}
44
+ {%- endif %}
45
+ {%- else %}
46
+ {{- '<|im_start|>' + message.role + '\n' + content }}
47
+ {%- endif %}
48
+ {%- if message.tool_calls %}
49
+ {%- for tool_call in message.tool_calls %}
50
+ {%- if (loop.first and content) or (not loop.first) %}
51
+ {{- '\n' }}
52
+ {%- endif %}
53
+ {%- if tool_call.function %}
54
+ {%- set tool_call = tool_call.function %}
55
+ {%- endif %}
56
+ {{- '<tool_call>\n{"name": "' }}
57
+ {{- tool_call.name }}
58
+ {{- '", "arguments": ' }}
59
+ {%- if tool_call.arguments is string %}
60
+ {{- tool_call.arguments }}
61
+ {%- else %}
62
+ {{- tool_call.arguments | tojson }}
63
+ {%- endif %}
64
+ {{- '}\n</tool_call>' }}
65
+ {%- endfor %}
66
+ {%- endif %}
67
+ {{- '<|im_end|>\n' }}
68
+ {%- elif message.role == "tool" %}
69
+ {%- if loop.first or (messages[loop.index0 - 1].role != "tool") %}
70
+ {{- '<|im_start|>user' }}
71
+ {%- endif %}
72
+ {{- '\n<tool_response>\n' }}
73
+ {{- message.content }}
74
+ {{- '\n</tool_response>' }}
75
+ {%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
76
+ {{- '<|im_end|>\n' }}
77
+ {%- endif %}
78
+ {%- endif %}
79
+ {%- endfor %}
80
+ {%- if add_generation_prompt %}
81
+ {{- '<|im_start|>assistant\n' }}
82
+ {%- if enable_thinking is defined and enable_thinking is false %}
83
+ {{- '<think>\n\n</think>\n\n' }}
84
+ {%- endif %}
85
+ {%- endif %}
config.json ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "InfiMed"
4
+ ],
5
+ "llm_config": {
6
+ "_name_or_path": "/lustre/projects/polyullm/models/Qwen3/Qwen3-4B",
7
+ "architectures": [
8
+ "Qwen3ForCausalLM"
9
+ ],
10
+ "attention_bias": false,
11
+ "attention_dropout": 0.0,
12
+ "bos_token_id": 151643,
13
+ "eos_token_id": 151645,
14
+ "head_dim": 128,
15
+ "hidden_act": "silu",
16
+ "hidden_size": 2560,
17
+ "initializer_range": 0.02,
18
+ "intermediate_size": 9728,
19
+ "max_position_embeddings": 40960,
20
+ "max_window_layers": 36,
21
+ "model_type": "qwen3",
22
+ "num_attention_heads": 32,
23
+ "num_hidden_layers": 36,
24
+ "num_key_value_heads": 8,
25
+ "rms_norm_eps": 1e-06,
26
+ "rope_scaling": null,
27
+ "rope_theta": 1000000,
28
+ "sliding_window": null,
29
+ "tie_word_embeddings": true,
30
+ "torch_dtype": "bfloat16",
31
+ "use_cache": true,
32
+ "use_sliding_window": false,
33
+ "vocab_size": 151936
34
+ },
35
+ "load_precision": "bf16",
36
+ "max_length": 32,
37
+ "repetition_penalty": 1.2,
38
+ "run_dir": "qwen3-4b-instruct-continue-training+stage-finetune+x7",
39
+ "seed": 7,
40
+ "stage": "finetune",
41
+ "temperature": 0.0,
42
+ "top_p": 0.0001,
43
+ "torch_dtype": "bfloat16",
44
+ "trackers": [
45
+ "jsonl",
46
+ "wandb"
47
+ ],
48
+ "transformers_version": "4.52.4",
49
+ "vision_config": {
50
+ "_name_or_path": "/home/projects/polyullm/guanghao/train_code/siglip-so400m-patch14-384",
51
+ "architectures": [
52
+ "SiglipModel"
53
+ ],
54
+ "attention_dropout": 0.0,
55
+ "hidden_act": "gelu_pytorch_tanh",
56
+ "hidden_size": 1152,
57
+ "image_size": 384,
58
+ "initializer_factor": 1.0,
59
+ "intermediate_size": 4304,
60
+ "layer_norm_eps": 1e-06,
61
+ "model_type": "siglip_vision_model",
62
+ "num_attention_heads": 16,
63
+ "num_channels": 3,
64
+ "num_hidden_layers": 27,
65
+ "patch_size": 14,
66
+ "torch_dtype": "bfloat16"
67
+ },
68
+ "wandb_entity": null,
69
+ "wandb_project": "mmpretrain"
70
+ }
configuration_qwen3.py ADDED
@@ -0,0 +1,212 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 The Qwen team, Alibaba Group and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Qwen3 model configuration"""
16
+
17
+ from transformers.configuration_utils import PretrainedConfig
18
+ from transformers.modeling_rope_utils import rope_config_validation
19
+ from transformers.utils import logging
20
+
21
+
22
+ logger = logging.get_logger(__name__)
23
+
24
+
25
+ class Qwen3Config(PretrainedConfig):
26
+ r"""
27
+ This is the configuration class to store the configuration of a [`Qwen3Model`]. It is used to instantiate a
28
+ Qwen3 model according to the specified arguments, defining the model architecture. Instantiating a configuration
29
+ with the defaults will yield a similar configuration to that of
30
+ Qwen3-8B [Qwen/Qwen3-8B](https://huggingface.co/Qwen/Qwen3-8B).
31
+
32
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
33
+ documentation from [`PretrainedConfig`] for more information.
34
+
35
+
36
+ Args:
37
+ vocab_size (`int`, *optional*, defaults to 151936):
38
+ Vocabulary size of the Qwen3 model. Defines the number of different tokens that can be represented by the
39
+ `inputs_ids` passed when calling [`Qwen3Model`]
40
+ hidden_size (`int`, *optional*, defaults to 4096):
41
+ Dimension of the hidden representations.
42
+ intermediate_size (`int`, *optional*, defaults to 22016):
43
+ Dimension of the MLP representations.
44
+ num_hidden_layers (`int`, *optional*, defaults to 32):
45
+ Number of hidden layers in the Transformer encoder.
46
+ num_attention_heads (`int`, *optional*, defaults to 32):
47
+ Number of attention heads for each attention layer in the Transformer encoder.
48
+ num_key_value_heads (`int`, *optional*, defaults to 32):
49
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
50
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
51
+ `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
52
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
53
+ by meanpooling all the original heads within that group. For more details checkout [this
54
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to `32`.
55
+ head_dim (`int`, *optional*, defaults to 128):
56
+ The attention head dimension.
57
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
58
+ The non-linear activation function (function or string) in the decoder.
59
+ max_position_embeddings (`int`, *optional*, defaults to 32768):
60
+ The maximum sequence length that this model might ever be used with.
61
+ initializer_range (`float`, *optional*, defaults to 0.02):
62
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
63
+ rms_norm_eps (`float`, *optional*, defaults to 1e-06):
64
+ The epsilon used by the rms normalization layers.
65
+ use_cache (`bool`, *optional*, defaults to `True`):
66
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
67
+ relevant if `config.is_decoder=True`.
68
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
69
+ Whether the model's input and output word embeddings should be tied.
70
+ rope_theta (`float`, *optional*, defaults to 10000.0):
71
+ The base period of the RoPE embeddings.
72
+ rope_scaling (`Dict`, *optional*):
73
+ Dictionary containing the scaling configuration for the RoPE embeddings. NOTE: if you apply new rope type
74
+ and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value
75
+ accordingly.
76
+ Expected contents:
77
+ `rope_type` (`str`):
78
+ The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope',
79
+ 'llama3'], with 'default' being the original RoPE implementation.
80
+ `factor` (`float`, *optional*):
81
+ Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings. In
82
+ most scaling types, a `factor` of x will enable the model to handle sequences of length x *
83
+ original maximum pre-trained length.
84
+ `original_max_position_embeddings` (`int`, *optional*):
85
+ Used with 'dynamic', 'longrope' and 'llama3'. The original max position embeddings used during
86
+ pretraining.
87
+ `attention_factor` (`float`, *optional*):
88
+ Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention
89
+ computation. If unspecified, it defaults to value recommended by the implementation, using the
90
+ `factor` field to infer the suggested value.
91
+ `beta_fast` (`float`, *optional*):
92
+ Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear
93
+ ramp function. If unspecified, it defaults to 32.
94
+ `beta_slow` (`float`, *optional*):
95
+ Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear
96
+ ramp function. If unspecified, it defaults to 1.
97
+ `short_factor` (`List[float]`, *optional*):
98
+ Only used with 'longrope'. The scaling factor to be applied to short contexts (<
99
+ `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
100
+ size divided by the number of attention heads divided by 2
101
+ `long_factor` (`List[float]`, *optional*):
102
+ Only used with 'longrope'. The scaling factor to be applied to long contexts (<
103
+ `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
104
+ size divided by the number of attention heads divided by 2
105
+ `low_freq_factor` (`float`, *optional*):
106
+ Only used with 'llama3'. Scaling factor applied to low frequency components of the RoPE
107
+ `high_freq_factor` (`float`, *optional*):
108
+ Only used with 'llama3'. Scaling factor applied to high frequency components of the RoPE
109
+ attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
110
+ Whether to use a bias in the query, key, value and output projection layers during self-attention.
111
+ use_sliding_window (`bool`, *optional*, defaults to `False`):
112
+ Whether to use sliding window attention.
113
+ sliding_window (`int`, *optional*, defaults to 4096):
114
+ Sliding window attention (SWA) window size. If not specified, will default to `4096`.
115
+ max_window_layers (`int`, *optional*, defaults to 28):
116
+ The number of layers that use SWA (Sliding Window Attention). The bottom layers use SWA while the top use full attention.
117
+ attention_dropout (`float`, *optional*, defaults to 0.0):
118
+ The dropout ratio for the attention probabilities.
119
+
120
+ ```python
121
+ >>> from transformers import Qwen3Model, Qwen3Config
122
+
123
+ >>> # Initializing a Qwen3 style configuration
124
+ >>> configuration = Qwen3Config()
125
+
126
+ >>> # Initializing a model from the Qwen3-8B style configuration
127
+ >>> model = Qwen3Model(configuration)
128
+
129
+ >>> # Accessing the model configuration
130
+ >>> configuration = model.config
131
+ ```"""
132
+
133
+ model_type = "qwen3"
134
+ keys_to_ignore_at_inference = ["past_key_values"]
135
+
136
+ # Default tensor parallel plan for base model `Qwen3`
137
+ base_model_tp_plan = {
138
+ "layers.*.self_attn.q_proj": "colwise",
139
+ "layers.*.self_attn.k_proj": "colwise",
140
+ "layers.*.self_attn.v_proj": "colwise",
141
+ "layers.*.self_attn.o_proj": "rowwise",
142
+ "layers.*.mlp.gate_proj": "colwise",
143
+ "layers.*.mlp.up_proj": "colwise",
144
+ "layers.*.mlp.down_proj": "rowwise",
145
+ }
146
+ base_model_pp_plan = {
147
+ "embed_tokens": (["input_ids"], ["inputs_embeds"]),
148
+ "layers": (["hidden_states", "attention_mask"], ["hidden_states"]),
149
+ "norm": (["hidden_states"], ["hidden_states"]),
150
+ }
151
+
152
+ def __init__(
153
+ self,
154
+ vocab_size=151936,
155
+ hidden_size=4096,
156
+ intermediate_size=22016,
157
+ num_hidden_layers=32,
158
+ num_attention_heads=32,
159
+ num_key_value_heads=32,
160
+ head_dim=128,
161
+ hidden_act="silu",
162
+ max_position_embeddings=32768,
163
+ initializer_range=0.02,
164
+ rms_norm_eps=1e-6,
165
+ use_cache=True,
166
+ tie_word_embeddings=False,
167
+ rope_theta=10000.0,
168
+ rope_scaling=None,
169
+ attention_bias=False,
170
+ use_sliding_window=False,
171
+ sliding_window=4096,
172
+ max_window_layers=28,
173
+ attention_dropout=0.0,
174
+ **kwargs,
175
+ ):
176
+ self.vocab_size = vocab_size
177
+ self.max_position_embeddings = max_position_embeddings
178
+ self.hidden_size = hidden_size
179
+ self.intermediate_size = intermediate_size
180
+ self.num_hidden_layers = num_hidden_layers
181
+ self.num_attention_heads = num_attention_heads
182
+ self.use_sliding_window = use_sliding_window
183
+ self.sliding_window = sliding_window # we check `use_sliding_window` in the modeling code
184
+ self.max_window_layers = max_window_layers
185
+
186
+ # for backward compatibility
187
+ if num_key_value_heads is None:
188
+ num_key_value_heads = num_attention_heads
189
+
190
+ self.num_key_value_heads = num_key_value_heads
191
+ self.head_dim = head_dim
192
+ self.hidden_act = hidden_act
193
+ self.initializer_range = initializer_range
194
+ self.rms_norm_eps = rms_norm_eps
195
+ self.use_cache = use_cache
196
+ self.rope_theta = rope_theta
197
+ self.rope_scaling = rope_scaling
198
+ self.attention_bias = attention_bias
199
+ self.attention_dropout = attention_dropout
200
+ # Validate the correctness of rotary position embeddings parameters
201
+ # BC: if there is a 'type' field, move it to 'rope_type'.
202
+ if self.rope_scaling is not None and "type" in self.rope_scaling:
203
+ self.rope_scaling["rope_type"] = self.rope_scaling["type"]
204
+ rope_config_validation(self)
205
+
206
+ super().__init__(
207
+ tie_word_embeddings=tie_word_embeddings,
208
+ **kwargs,
209
+ )
210
+
211
+
212
+ __all__ = ["Qwen3Config"]
configuration_siglip.py ADDED
@@ -0,0 +1,265 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Siglip model configuration"""
16
+
17
+ from transformers.configuration_utils import PretrainedConfig
18
+ from transformers.utils import logging
19
+
20
+
21
+ logger = logging.get_logger(__name__)
22
+
23
+
24
+ class SiglipTextConfig(PretrainedConfig):
25
+ r"""
26
+ This is the configuration class to store the configuration of a [`SiglipTextModel`]. It is used to instantiate a
27
+ Siglip text encoder according to the specified arguments, defining the model architecture. Instantiating a
28
+ configuration with the defaults will yield a similar configuration to that of the text encoder of the Siglip
29
+ [google/siglip-base-patch16-224](https://huggingface.co/google/siglip-base-patch16-224) architecture.
30
+
31
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
32
+ documentation from [`PretrainedConfig`] for more information.
33
+
34
+ Args:
35
+ vocab_size (`int`, *optional*, defaults to 32000):
36
+ Vocabulary size of the Siglip text model. Defines the number of different tokens that can be represented by
37
+ the `inputs_ids` passed when calling [`SiglipModel`].
38
+ hidden_size (`int`, *optional*, defaults to 768):
39
+ Dimensionality of the encoder layers and the pooler layer.
40
+ intermediate_size (`int`, *optional*, defaults to 3072):
41
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
42
+ num_hidden_layers (`int`, *optional*, defaults to 12):
43
+ Number of hidden layers in the Transformer encoder.
44
+ num_attention_heads (`int`, *optional*, defaults to 12):
45
+ Number of attention heads for each attention layer in the Transformer encoder.
46
+ max_position_embeddings (`int`, *optional*, defaults to 64):
47
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
48
+ just in case (e.g., 512 or 1024 or 2048).
49
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`):
50
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
51
+ `"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported.
52
+ layer_norm_eps (`float`, *optional*, defaults to 1e-06):
53
+ The epsilon used by the layer normalization layers.
54
+ attention_dropout (`float`, *optional*, defaults to 0.0):
55
+ The dropout ratio for the attention probabilities.
56
+ pad_token_id (`int`, *optional*, defaults to 1):
57
+ The id of the padding token in the vocabulary.
58
+ bos_token_id (`int`, *optional*, defaults to 49406):
59
+ The id of the beginning-of-sequence token in the vocabulary.
60
+ eos_token_id (`int`, *optional*, defaults to 49407):
61
+ The id of the end-of-sequence token in the vocabulary.
62
+
63
+ Example:
64
+
65
+ ```python
66
+ >>> from transformers import SiglipTextConfig, SiglipTextModel
67
+
68
+ >>> # Initializing a SiglipTextConfig with google/siglip-base-patch16-224 style configuration
69
+ >>> configuration = SiglipTextConfig()
70
+
71
+ >>> # Initializing a SiglipTextModel (with random weights) from the google/siglip-base-patch16-224 style configuration
72
+ >>> model = SiglipTextModel(configuration)
73
+
74
+ >>> # Accessing the model configuration
75
+ >>> configuration = model.config
76
+ ```"""
77
+
78
+ model_type = "siglip_text_model"
79
+ base_config_key = "text_config"
80
+
81
+ def __init__(
82
+ self,
83
+ vocab_size=32000,
84
+ hidden_size=768,
85
+ intermediate_size=3072,
86
+ num_hidden_layers=12,
87
+ num_attention_heads=12,
88
+ max_position_embeddings=64,
89
+ hidden_act="gelu_pytorch_tanh",
90
+ layer_norm_eps=1e-6,
91
+ attention_dropout=0.0,
92
+ # This differs from `CLIPTokenizer`'s default and from openai/siglip
93
+ # See https://github.com/huggingface/transformers/pull/24773#issuecomment-1632287538
94
+ pad_token_id=1,
95
+ bos_token_id=49406,
96
+ eos_token_id=49407,
97
+ **kwargs,
98
+ ):
99
+ super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
100
+
101
+ self.vocab_size = vocab_size
102
+ self.hidden_size = hidden_size
103
+ self.intermediate_size = intermediate_size
104
+ self.num_hidden_layers = num_hidden_layers
105
+ self.num_attention_heads = num_attention_heads
106
+ self.max_position_embeddings = max_position_embeddings
107
+ self.layer_norm_eps = layer_norm_eps
108
+ self.hidden_act = hidden_act
109
+ self.attention_dropout = attention_dropout
110
+
111
+
112
+ class SiglipVisionConfig(PretrainedConfig):
113
+ r"""
114
+ This is the configuration class to store the configuration of a [`SiglipVisionModel`]. It is used to instantiate a
115
+ Siglip vision encoder according to the specified arguments, defining the model architecture. Instantiating a
116
+ configuration with the defaults will yield a similar configuration to that of the vision encoder of the Siglip
117
+ [google/siglip-base-patch16-224](https://huggingface.co/google/siglip-base-patch16-224) architecture.
118
+
119
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
120
+ documentation from [`PretrainedConfig`] for more information.
121
+
122
+ Args:
123
+ hidden_size (`int`, *optional*, defaults to 768):
124
+ Dimensionality of the encoder layers and the pooler layer.
125
+ intermediate_size (`int`, *optional*, defaults to 3072):
126
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
127
+ num_hidden_layers (`int`, *optional*, defaults to 12):
128
+ Number of hidden layers in the Transformer encoder.
129
+ num_attention_heads (`int`, *optional*, defaults to 12):
130
+ Number of attention heads for each attention layer in the Transformer encoder.
131
+ num_channels (`int`, *optional*, defaults to 3):
132
+ Number of channels in the input images.
133
+ image_size (`int`, *optional*, defaults to 224):
134
+ The size (resolution) of each image.
135
+ patch_size (`int`, *optional*, defaults to 16):
136
+ The size (resolution) of each patch.
137
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`):
138
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
139
+ `"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported.
140
+ layer_norm_eps (`float`, *optional*, defaults to 1e-06):
141
+ The epsilon used by the layer normalization layers.
142
+ attention_dropout (`float`, *optional*, defaults to 0.0):
143
+ The dropout ratio for the attention probabilities.
144
+
145
+ Example:
146
+
147
+ ```python
148
+ >>> from transformers import SiglipVisionConfig, SiglipVisionModel
149
+
150
+ >>> # Initializing a SiglipVisionConfig with google/siglip-base-patch16-224 style configuration
151
+ >>> configuration = SiglipVisionConfig()
152
+
153
+ >>> # Initializing a SiglipVisionModel (with random weights) from the google/siglip-base-patch16-224 style configuration
154
+ >>> model = SiglipVisionModel(configuration)
155
+
156
+ >>> # Accessing the model configuration
157
+ >>> configuration = model.config
158
+ ```"""
159
+
160
+ model_type = "siglip_vision_model"
161
+ base_config_key = "vision_config"
162
+
163
+ def __init__(
164
+ self,
165
+ hidden_size=768,
166
+ intermediate_size=3072,
167
+ num_hidden_layers=12,
168
+ num_attention_heads=12,
169
+ num_channels=3,
170
+ image_size=224,
171
+ patch_size=16,
172
+ hidden_act="gelu_pytorch_tanh",
173
+ layer_norm_eps=1e-6,
174
+ attention_dropout=0.0,
175
+ **kwargs,
176
+ ):
177
+ super().__init__(**kwargs)
178
+
179
+ self.hidden_size = hidden_size
180
+ self.intermediate_size = intermediate_size
181
+ self.num_hidden_layers = num_hidden_layers
182
+ self.num_attention_heads = num_attention_heads
183
+ self.num_channels = num_channels
184
+ self.patch_size = patch_size
185
+ self.image_size = image_size
186
+ self.attention_dropout = attention_dropout
187
+ self.layer_norm_eps = layer_norm_eps
188
+ self.hidden_act = hidden_act
189
+
190
+
191
+ class SiglipConfig(PretrainedConfig):
192
+ r"""
193
+ [`SiglipConfig`] is the configuration class to store the configuration of a [`SiglipModel`]. It is used to
194
+ instantiate a Siglip model according to the specified arguments, defining the text model and vision model configs.
195
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the Siglip
196
+ [google/siglip-base-patch16-224](https://huggingface.co/google/siglip-base-patch16-224) architecture.
197
+
198
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
199
+ documentation from [`PretrainedConfig`] for more information.
200
+
201
+ Args:
202
+ text_config (`dict`, *optional*):
203
+ Dictionary of configuration options used to initialize [`SiglipTextConfig`].
204
+ vision_config (`dict`, *optional*):
205
+ Dictionary of configuration options used to initialize [`SiglipVisionConfig`].
206
+ kwargs (*optional*):
207
+ Dictionary of keyword arguments.
208
+
209
+ Example:
210
+
211
+ ```python
212
+ >>> from transformers import SiglipConfig, SiglipModel
213
+
214
+ >>> # Initializing a SiglipConfig with google/siglip-base-patch16-224 style configuration
215
+ >>> configuration = SiglipConfig()
216
+
217
+ >>> # Initializing a SiglipModel (with random weights) from the google/siglip-base-patch16-224 style configuration
218
+ >>> model = SiglipModel(configuration)
219
+
220
+ >>> # Accessing the model configuration
221
+ >>> configuration = model.config
222
+
223
+ >>> # We can also initialize a SiglipConfig from a SiglipTextConfig and a SiglipVisionConfig
224
+ >>> from transformers import SiglipTextConfig, SiglipVisionConfig
225
+
226
+ >>> # Initializing a SiglipText and SiglipVision configuration
227
+ >>> config_text = SiglipTextConfig()
228
+ >>> config_vision = SiglipVisionConfig()
229
+
230
+ >>> config = SiglipConfig.from_text_vision_configs(config_text, config_vision)
231
+ ```"""
232
+
233
+ model_type = "siglip"
234
+ sub_configs = {"text_config": SiglipTextConfig, "vision_config": SiglipVisionConfig}
235
+
236
+ def __init__(self, text_config=None, vision_config=None, **kwargs):
237
+ super().__init__(**kwargs)
238
+
239
+ if text_config is None:
240
+ text_config = {}
241
+ logger.info("`text_config` is `None`. Initializing the `SiglipTextConfig` with default values.")
242
+
243
+ if vision_config is None:
244
+ vision_config = {}
245
+ logger.info("`vision_config` is `None`. initializing the `SiglipVisionConfig` with default values.")
246
+
247
+ self.text_config = SiglipTextConfig(**text_config)
248
+ self.vision_config = SiglipVisionConfig(**vision_config)
249
+
250
+ self.initializer_factor = 1.0
251
+
252
+ @classmethod
253
+ def from_text_vision_configs(cls, text_config: SiglipTextConfig, vision_config: SiglipVisionConfig, **kwargs):
254
+ r"""
255
+ Instantiate a [`SiglipConfig`] (or a derived class) from siglip text model configuration and siglip vision
256
+ model configuration.
257
+
258
+ Returns:
259
+ [`SiglipConfig`]: An instance of a configuration object
260
+ """
261
+
262
+ return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs)
263
+
264
+
265
+ __all__ = ["SiglipConfig", "SiglipTextConfig", "SiglipVisionConfig"]
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model-00001-of-00002.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1031cef38e5bc62149fd5d1b68bd67d00d7d723a82180c97fd17090c70e63449
3
+ size 4966471968
model-00002-of-00002.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d3b8d072383149ce00cc8183badf0f5abf42ee7fa2e4a795dcfed5208e18b438
3
+ size 4731957576
model.safetensors.index.json ADDED
@@ -0,0 +1,858 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 9698315392
4
+ },
5
+ "weight_map": {
6
+ "language_model.lm_head.weight": "model-00002-of-00002.safetensors",
7
+ "language_model.model.embed_tokens.weight": "model-00001-of-00002.safetensors",
8
+ "language_model.model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
9
+ "language_model.model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
10
+ "language_model.model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
11
+ "language_model.model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
12
+ "language_model.model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
13
+ "language_model.model.layers.0.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
14
+ "language_model.model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
15
+ "language_model.model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
16
+ "language_model.model.layers.0.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
17
+ "language_model.model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
18
+ "language_model.model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
19
+ "language_model.model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
20
+ "language_model.model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
21
+ "language_model.model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
22
+ "language_model.model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
23
+ "language_model.model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
24
+ "language_model.model.layers.1.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
25
+ "language_model.model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
26
+ "language_model.model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
27
+ "language_model.model.layers.1.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
28
+ "language_model.model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
29
+ "language_model.model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
30
+ "language_model.model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
31
+ "language_model.model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
32
+ "language_model.model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
33
+ "language_model.model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
34
+ "language_model.model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
35
+ "language_model.model.layers.10.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
36
+ "language_model.model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
37
+ "language_model.model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
38
+ "language_model.model.layers.10.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
39
+ "language_model.model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
40
+ "language_model.model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
41
+ "language_model.model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
42
+ "language_model.model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
43
+ "language_model.model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
44
+ "language_model.model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
45
+ "language_model.model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
46
+ "language_model.model.layers.11.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
47
+ "language_model.model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
48
+ "language_model.model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
49
+ "language_model.model.layers.11.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
50
+ "language_model.model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
51
+ "language_model.model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
52
+ "language_model.model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
53
+ "language_model.model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
54
+ "language_model.model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
55
+ "language_model.model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
56
+ "language_model.model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
57
+ "language_model.model.layers.12.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
58
+ "language_model.model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
59
+ "language_model.model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
60
+ "language_model.model.layers.12.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
61
+ "language_model.model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
62
+ "language_model.model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
63
+ "language_model.model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
64
+ "language_model.model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
65
+ "language_model.model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
66
+ "language_model.model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
67
+ "language_model.model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
68
+ "language_model.model.layers.13.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
69
+ "language_model.model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
70
+ "language_model.model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
71
+ "language_model.model.layers.13.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
72
+ "language_model.model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
73
+ "language_model.model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
74
+ "language_model.model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
75
+ "language_model.model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
76
+ "language_model.model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
77
+ "language_model.model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
78
+ "language_model.model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
79
+ "language_model.model.layers.14.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
80
+ "language_model.model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
81
+ "language_model.model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
82
+ "language_model.model.layers.14.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
83
+ "language_model.model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
84
+ "language_model.model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
85
+ "language_model.model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
86
+ "language_model.model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
87
+ "language_model.model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
88
+ "language_model.model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
89
+ "language_model.model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
90
+ "language_model.model.layers.15.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
91
+ "language_model.model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
92
+ "language_model.model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
93
+ "language_model.model.layers.15.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
94
+ "language_model.model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
95
+ "language_model.model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
96
+ "language_model.model.layers.16.input_layernorm.weight": "model-00002-of-00002.safetensors",
97
+ "language_model.model.layers.16.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
98
+ "language_model.model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
99
+ "language_model.model.layers.16.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
100
+ "language_model.model.layers.16.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
101
+ "language_model.model.layers.16.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
102
+ "language_model.model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
103
+ "language_model.model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
104
+ "language_model.model.layers.16.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
105
+ "language_model.model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
106
+ "language_model.model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
107
+ "language_model.model.layers.17.input_layernorm.weight": "model-00002-of-00002.safetensors",
108
+ "language_model.model.layers.17.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
109
+ "language_model.model.layers.17.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
110
+ "language_model.model.layers.17.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
111
+ "language_model.model.layers.17.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
112
+ "language_model.model.layers.17.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
113
+ "language_model.model.layers.17.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
114
+ "language_model.model.layers.17.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
115
+ "language_model.model.layers.17.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
116
+ "language_model.model.layers.17.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
117
+ "language_model.model.layers.17.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
118
+ "language_model.model.layers.18.input_layernorm.weight": "model-00002-of-00002.safetensors",
119
+ "language_model.model.layers.18.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
120
+ "language_model.model.layers.18.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
121
+ "language_model.model.layers.18.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
122
+ "language_model.model.layers.18.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
123
+ "language_model.model.layers.18.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
124
+ "language_model.model.layers.18.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
125
+ "language_model.model.layers.18.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
126
+ "language_model.model.layers.18.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
127
+ "language_model.model.layers.18.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
128
+ "language_model.model.layers.18.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
129
+ "language_model.model.layers.19.input_layernorm.weight": "model-00002-of-00002.safetensors",
130
+ "language_model.model.layers.19.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
131
+ "language_model.model.layers.19.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
132
+ "language_model.model.layers.19.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
133
+ "language_model.model.layers.19.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
134
+ "language_model.model.layers.19.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
135
+ "language_model.model.layers.19.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
136
+ "language_model.model.layers.19.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
137
+ "language_model.model.layers.19.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
138
+ "language_model.model.layers.19.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
139
+ "language_model.model.layers.19.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
140
+ "language_model.model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
141
+ "language_model.model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
142
+ "language_model.model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
143
+ "language_model.model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
144
+ "language_model.model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
145
+ "language_model.model.layers.2.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
146
+ "language_model.model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
147
+ "language_model.model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
148
+ "language_model.model.layers.2.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
149
+ "language_model.model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
150
+ "language_model.model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
151
+ "language_model.model.layers.20.input_layernorm.weight": "model-00002-of-00002.safetensors",
152
+ "language_model.model.layers.20.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
153
+ "language_model.model.layers.20.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
154
+ "language_model.model.layers.20.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
155
+ "language_model.model.layers.20.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
156
+ "language_model.model.layers.20.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
157
+ "language_model.model.layers.20.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
158
+ "language_model.model.layers.20.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
159
+ "language_model.model.layers.20.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
160
+ "language_model.model.layers.20.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
161
+ "language_model.model.layers.20.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
162
+ "language_model.model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
163
+ "language_model.model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
164
+ "language_model.model.layers.21.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
165
+ "language_model.model.layers.21.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
166
+ "language_model.model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
167
+ "language_model.model.layers.21.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
168
+ "language_model.model.layers.21.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
169
+ "language_model.model.layers.21.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
170
+ "language_model.model.layers.21.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
171
+ "language_model.model.layers.21.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
172
+ "language_model.model.layers.21.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
173
+ "language_model.model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
174
+ "language_model.model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
175
+ "language_model.model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
176
+ "language_model.model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
177
+ "language_model.model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
178
+ "language_model.model.layers.22.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
179
+ "language_model.model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
180
+ "language_model.model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
181
+ "language_model.model.layers.22.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
182
+ "language_model.model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
183
+ "language_model.model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
184
+ "language_model.model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
185
+ "language_model.model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
186
+ "language_model.model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
187
+ "language_model.model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
188
+ "language_model.model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
189
+ "language_model.model.layers.23.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
190
+ "language_model.model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
191
+ "language_model.model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
192
+ "language_model.model.layers.23.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
193
+ "language_model.model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
194
+ "language_model.model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
195
+ "language_model.model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
196
+ "language_model.model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
197
+ "language_model.model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
198
+ "language_model.model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
199
+ "language_model.model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
200
+ "language_model.model.layers.24.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
201
+ "language_model.model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
202
+ "language_model.model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
203
+ "language_model.model.layers.24.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
204
+ "language_model.model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
205
+ "language_model.model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
206
+ "language_model.model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
207
+ "language_model.model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
208
+ "language_model.model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
209
+ "language_model.model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
210
+ "language_model.model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
211
+ "language_model.model.layers.25.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
212
+ "language_model.model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
213
+ "language_model.model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
214
+ "language_model.model.layers.25.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
215
+ "language_model.model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
216
+ "language_model.model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
217
+ "language_model.model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
218
+ "language_model.model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
219
+ "language_model.model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
220
+ "language_model.model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
221
+ "language_model.model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
222
+ "language_model.model.layers.26.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
223
+ "language_model.model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
224
+ "language_model.model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
225
+ "language_model.model.layers.26.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
226
+ "language_model.model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
227
+ "language_model.model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
228
+ "language_model.model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
229
+ "language_model.model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
230
+ "language_model.model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
231
+ "language_model.model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
232
+ "language_model.model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
233
+ "language_model.model.layers.27.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
234
+ "language_model.model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
235
+ "language_model.model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
236
+ "language_model.model.layers.27.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
237
+ "language_model.model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
238
+ "language_model.model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
239
+ "language_model.model.layers.28.input_layernorm.weight": "model-00002-of-00002.safetensors",
240
+ "language_model.model.layers.28.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
241
+ "language_model.model.layers.28.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
242
+ "language_model.model.layers.28.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
243
+ "language_model.model.layers.28.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
244
+ "language_model.model.layers.28.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
245
+ "language_model.model.layers.28.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
246
+ "language_model.model.layers.28.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
247
+ "language_model.model.layers.28.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
248
+ "language_model.model.layers.28.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
249
+ "language_model.model.layers.28.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
250
+ "language_model.model.layers.29.input_layernorm.weight": "model-00002-of-00002.safetensors",
251
+ "language_model.model.layers.29.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
252
+ "language_model.model.layers.29.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
253
+ "language_model.model.layers.29.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
254
+ "language_model.model.layers.29.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
255
+ "language_model.model.layers.29.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
256
+ "language_model.model.layers.29.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
257
+ "language_model.model.layers.29.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
258
+ "language_model.model.layers.29.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
259
+ "language_model.model.layers.29.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
260
+ "language_model.model.layers.29.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
261
+ "language_model.model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
262
+ "language_model.model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
263
+ "language_model.model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
264
+ "language_model.model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
265
+ "language_model.model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
266
+ "language_model.model.layers.3.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
267
+ "language_model.model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
268
+ "language_model.model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
269
+ "language_model.model.layers.3.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
270
+ "language_model.model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
271
+ "language_model.model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
272
+ "language_model.model.layers.30.input_layernorm.weight": "model-00002-of-00002.safetensors",
273
+ "language_model.model.layers.30.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
274
+ "language_model.model.layers.30.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
275
+ "language_model.model.layers.30.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
276
+ "language_model.model.layers.30.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
277
+ "language_model.model.layers.30.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
278
+ "language_model.model.layers.30.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
279
+ "language_model.model.layers.30.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
280
+ "language_model.model.layers.30.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
281
+ "language_model.model.layers.30.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
282
+ "language_model.model.layers.30.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
283
+ "language_model.model.layers.31.input_layernorm.weight": "model-00002-of-00002.safetensors",
284
+ "language_model.model.layers.31.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
285
+ "language_model.model.layers.31.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
286
+ "language_model.model.layers.31.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
287
+ "language_model.model.layers.31.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
288
+ "language_model.model.layers.31.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
289
+ "language_model.model.layers.31.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
290
+ "language_model.model.layers.31.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
291
+ "language_model.model.layers.31.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
292
+ "language_model.model.layers.31.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
293
+ "language_model.model.layers.31.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
294
+ "language_model.model.layers.32.input_layernorm.weight": "model-00002-of-00002.safetensors",
295
+ "language_model.model.layers.32.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
296
+ "language_model.model.layers.32.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
297
+ "language_model.model.layers.32.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
298
+ "language_model.model.layers.32.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
299
+ "language_model.model.layers.32.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
300
+ "language_model.model.layers.32.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
301
+ "language_model.model.layers.32.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
302
+ "language_model.model.layers.32.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
303
+ "language_model.model.layers.32.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
304
+ "language_model.model.layers.32.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
305
+ "language_model.model.layers.33.input_layernorm.weight": "model-00002-of-00002.safetensors",
306
+ "language_model.model.layers.33.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
307
+ "language_model.model.layers.33.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
308
+ "language_model.model.layers.33.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
309
+ "language_model.model.layers.33.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
310
+ "language_model.model.layers.33.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
311
+ "language_model.model.layers.33.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
312
+ "language_model.model.layers.33.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
313
+ "language_model.model.layers.33.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
314
+ "language_model.model.layers.33.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
315
+ "language_model.model.layers.33.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
316
+ "language_model.model.layers.34.input_layernorm.weight": "model-00002-of-00002.safetensors",
317
+ "language_model.model.layers.34.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
318
+ "language_model.model.layers.34.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
319
+ "language_model.model.layers.34.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
320
+ "language_model.model.layers.34.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
321
+ "language_model.model.layers.34.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
322
+ "language_model.model.layers.34.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
323
+ "language_model.model.layers.34.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
324
+ "language_model.model.layers.34.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
325
+ "language_model.model.layers.34.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
326
+ "language_model.model.layers.34.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
327
+ "language_model.model.layers.35.input_layernorm.weight": "model-00002-of-00002.safetensors",
328
+ "language_model.model.layers.35.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
329
+ "language_model.model.layers.35.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
330
+ "language_model.model.layers.35.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
331
+ "language_model.model.layers.35.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
332
+ "language_model.model.layers.35.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
333
+ "language_model.model.layers.35.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
334
+ "language_model.model.layers.35.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
335
+ "language_model.model.layers.35.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
336
+ "language_model.model.layers.35.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
337
+ "language_model.model.layers.35.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
338
+ "language_model.model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
339
+ "language_model.model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
340
+ "language_model.model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
341
+ "language_model.model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
342
+ "language_model.model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
343
+ "language_model.model.layers.4.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
344
+ "language_model.model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
345
+ "language_model.model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
346
+ "language_model.model.layers.4.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
347
+ "language_model.model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
348
+ "language_model.model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
349
+ "language_model.model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
350
+ "language_model.model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
351
+ "language_model.model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
352
+ "language_model.model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
353
+ "language_model.model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
354
+ "language_model.model.layers.5.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
355
+ "language_model.model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
356
+ "language_model.model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
357
+ "language_model.model.layers.5.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
358
+ "language_model.model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
359
+ "language_model.model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
360
+ "language_model.model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
361
+ "language_model.model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
362
+ "language_model.model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
363
+ "language_model.model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
364
+ "language_model.model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
365
+ "language_model.model.layers.6.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
366
+ "language_model.model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
367
+ "language_model.model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
368
+ "language_model.model.layers.6.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
369
+ "language_model.model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
370
+ "language_model.model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
371
+ "language_model.model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
372
+ "language_model.model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
373
+ "language_model.model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
374
+ "language_model.model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
375
+ "language_model.model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
376
+ "language_model.model.layers.7.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
377
+ "language_model.model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
378
+ "language_model.model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
379
+ "language_model.model.layers.7.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
380
+ "language_model.model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
381
+ "language_model.model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
382
+ "language_model.model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
383
+ "language_model.model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
384
+ "language_model.model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
385
+ "language_model.model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
386
+ "language_model.model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
387
+ "language_model.model.layers.8.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
388
+ "language_model.model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
389
+ "language_model.model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
390
+ "language_model.model.layers.8.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
391
+ "language_model.model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
392
+ "language_model.model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
393
+ "language_model.model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
394
+ "language_model.model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
395
+ "language_model.model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
396
+ "language_model.model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
397
+ "language_model.model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
398
+ "language_model.model.layers.9.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
399
+ "language_model.model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
400
+ "language_model.model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
401
+ "language_model.model.layers.9.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
402
+ "language_model.model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
403
+ "language_model.model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
404
+ "language_model.model.norm.weight": "model-00002-of-00002.safetensors",
405
+ "projector.mlp_projector.0.bias": "model-00002-of-00002.safetensors",
406
+ "projector.mlp_projector.0.weight": "model-00002-of-00002.safetensors",
407
+ "projector.mlp_projector.2.bias": "model-00002-of-00002.safetensors",
408
+ "projector.mlp_projector.2.weight": "model-00002-of-00002.safetensors",
409
+ "vision_model.vision_model.embeddings.patch_embedding.bias": "model-00001-of-00002.safetensors",
410
+ "vision_model.vision_model.embeddings.patch_embedding.weight": "model-00001-of-00002.safetensors",
411
+ "vision_model.vision_model.embeddings.position_embedding.weight": "model-00001-of-00002.safetensors",
412
+ "vision_model.vision_model.encoder.layers.0.layer_norm1.bias": "model-00001-of-00002.safetensors",
413
+ "vision_model.vision_model.encoder.layers.0.layer_norm1.weight": "model-00001-of-00002.safetensors",
414
+ "vision_model.vision_model.encoder.layers.0.layer_norm2.bias": "model-00001-of-00002.safetensors",
415
+ "vision_model.vision_model.encoder.layers.0.layer_norm2.weight": "model-00001-of-00002.safetensors",
416
+ "vision_model.vision_model.encoder.layers.0.mlp.fc1.bias": "model-00001-of-00002.safetensors",
417
+ "vision_model.vision_model.encoder.layers.0.mlp.fc1.weight": "model-00001-of-00002.safetensors",
418
+ "vision_model.vision_model.encoder.layers.0.mlp.fc2.bias": "model-00001-of-00002.safetensors",
419
+ "vision_model.vision_model.encoder.layers.0.mlp.fc2.weight": "model-00001-of-00002.safetensors",
420
+ "vision_model.vision_model.encoder.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
421
+ "vision_model.vision_model.encoder.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
422
+ "vision_model.vision_model.encoder.layers.0.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
423
+ "vision_model.vision_model.encoder.layers.0.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
424
+ "vision_model.vision_model.encoder.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
425
+ "vision_model.vision_model.encoder.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
426
+ "vision_model.vision_model.encoder.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
427
+ "vision_model.vision_model.encoder.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
428
+ "vision_model.vision_model.encoder.layers.1.layer_norm1.bias": "model-00001-of-00002.safetensors",
429
+ "vision_model.vision_model.encoder.layers.1.layer_norm1.weight": "model-00001-of-00002.safetensors",
430
+ "vision_model.vision_model.encoder.layers.1.layer_norm2.bias": "model-00001-of-00002.safetensors",
431
+ "vision_model.vision_model.encoder.layers.1.layer_norm2.weight": "model-00001-of-00002.safetensors",
432
+ "vision_model.vision_model.encoder.layers.1.mlp.fc1.bias": "model-00001-of-00002.safetensors",
433
+ "vision_model.vision_model.encoder.layers.1.mlp.fc1.weight": "model-00001-of-00002.safetensors",
434
+ "vision_model.vision_model.encoder.layers.1.mlp.fc2.bias": "model-00001-of-00002.safetensors",
435
+ "vision_model.vision_model.encoder.layers.1.mlp.fc2.weight": "model-00001-of-00002.safetensors",
436
+ "vision_model.vision_model.encoder.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
437
+ "vision_model.vision_model.encoder.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
438
+ "vision_model.vision_model.encoder.layers.1.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
439
+ "vision_model.vision_model.encoder.layers.1.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
440
+ "vision_model.vision_model.encoder.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
441
+ "vision_model.vision_model.encoder.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
442
+ "vision_model.vision_model.encoder.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
443
+ "vision_model.vision_model.encoder.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
444
+ "vision_model.vision_model.encoder.layers.10.layer_norm1.bias": "model-00001-of-00002.safetensors",
445
+ "vision_model.vision_model.encoder.layers.10.layer_norm1.weight": "model-00001-of-00002.safetensors",
446
+ "vision_model.vision_model.encoder.layers.10.layer_norm2.bias": "model-00001-of-00002.safetensors",
447
+ "vision_model.vision_model.encoder.layers.10.layer_norm2.weight": "model-00001-of-00002.safetensors",
448
+ "vision_model.vision_model.encoder.layers.10.mlp.fc1.bias": "model-00001-of-00002.safetensors",
449
+ "vision_model.vision_model.encoder.layers.10.mlp.fc1.weight": "model-00001-of-00002.safetensors",
450
+ "vision_model.vision_model.encoder.layers.10.mlp.fc2.bias": "model-00001-of-00002.safetensors",
451
+ "vision_model.vision_model.encoder.layers.10.mlp.fc2.weight": "model-00001-of-00002.safetensors",
452
+ "vision_model.vision_model.encoder.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
453
+ "vision_model.vision_model.encoder.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
454
+ "vision_model.vision_model.encoder.layers.10.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
455
+ "vision_model.vision_model.encoder.layers.10.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
456
+ "vision_model.vision_model.encoder.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
457
+ "vision_model.vision_model.encoder.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
458
+ "vision_model.vision_model.encoder.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
459
+ "vision_model.vision_model.encoder.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
460
+ "vision_model.vision_model.encoder.layers.11.layer_norm1.bias": "model-00001-of-00002.safetensors",
461
+ "vision_model.vision_model.encoder.layers.11.layer_norm1.weight": "model-00001-of-00002.safetensors",
462
+ "vision_model.vision_model.encoder.layers.11.layer_norm2.bias": "model-00001-of-00002.safetensors",
463
+ "vision_model.vision_model.encoder.layers.11.layer_norm2.weight": "model-00001-of-00002.safetensors",
464
+ "vision_model.vision_model.encoder.layers.11.mlp.fc1.bias": "model-00001-of-00002.safetensors",
465
+ "vision_model.vision_model.encoder.layers.11.mlp.fc1.weight": "model-00001-of-00002.safetensors",
466
+ "vision_model.vision_model.encoder.layers.11.mlp.fc2.bias": "model-00001-of-00002.safetensors",
467
+ "vision_model.vision_model.encoder.layers.11.mlp.fc2.weight": "model-00001-of-00002.safetensors",
468
+ "vision_model.vision_model.encoder.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
469
+ "vision_model.vision_model.encoder.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
470
+ "vision_model.vision_model.encoder.layers.11.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
471
+ "vision_model.vision_model.encoder.layers.11.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
472
+ "vision_model.vision_model.encoder.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
473
+ "vision_model.vision_model.encoder.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
474
+ "vision_model.vision_model.encoder.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
475
+ "vision_model.vision_model.encoder.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
476
+ "vision_model.vision_model.encoder.layers.12.layer_norm1.bias": "model-00001-of-00002.safetensors",
477
+ "vision_model.vision_model.encoder.layers.12.layer_norm1.weight": "model-00001-of-00002.safetensors",
478
+ "vision_model.vision_model.encoder.layers.12.layer_norm2.bias": "model-00001-of-00002.safetensors",
479
+ "vision_model.vision_model.encoder.layers.12.layer_norm2.weight": "model-00001-of-00002.safetensors",
480
+ "vision_model.vision_model.encoder.layers.12.mlp.fc1.bias": "model-00001-of-00002.safetensors",
481
+ "vision_model.vision_model.encoder.layers.12.mlp.fc1.weight": "model-00001-of-00002.safetensors",
482
+ "vision_model.vision_model.encoder.layers.12.mlp.fc2.bias": "model-00001-of-00002.safetensors",
483
+ "vision_model.vision_model.encoder.layers.12.mlp.fc2.weight": "model-00001-of-00002.safetensors",
484
+ "vision_model.vision_model.encoder.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
485
+ "vision_model.vision_model.encoder.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
486
+ "vision_model.vision_model.encoder.layers.12.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
487
+ "vision_model.vision_model.encoder.layers.12.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
488
+ "vision_model.vision_model.encoder.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
489
+ "vision_model.vision_model.encoder.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
490
+ "vision_model.vision_model.encoder.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
491
+ "vision_model.vision_model.encoder.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
492
+ "vision_model.vision_model.encoder.layers.13.layer_norm1.bias": "model-00001-of-00002.safetensors",
493
+ "vision_model.vision_model.encoder.layers.13.layer_norm1.weight": "model-00001-of-00002.safetensors",
494
+ "vision_model.vision_model.encoder.layers.13.layer_norm2.bias": "model-00001-of-00002.safetensors",
495
+ "vision_model.vision_model.encoder.layers.13.layer_norm2.weight": "model-00001-of-00002.safetensors",
496
+ "vision_model.vision_model.encoder.layers.13.mlp.fc1.bias": "model-00001-of-00002.safetensors",
497
+ "vision_model.vision_model.encoder.layers.13.mlp.fc1.weight": "model-00001-of-00002.safetensors",
498
+ "vision_model.vision_model.encoder.layers.13.mlp.fc2.bias": "model-00001-of-00002.safetensors",
499
+ "vision_model.vision_model.encoder.layers.13.mlp.fc2.weight": "model-00001-of-00002.safetensors",
500
+ "vision_model.vision_model.encoder.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
501
+ "vision_model.vision_model.encoder.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
502
+ "vision_model.vision_model.encoder.layers.13.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
503
+ "vision_model.vision_model.encoder.layers.13.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
504
+ "vision_model.vision_model.encoder.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
505
+ "vision_model.vision_model.encoder.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
506
+ "vision_model.vision_model.encoder.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
507
+ "vision_model.vision_model.encoder.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
508
+ "vision_model.vision_model.encoder.layers.14.layer_norm1.bias": "model-00001-of-00002.safetensors",
509
+ "vision_model.vision_model.encoder.layers.14.layer_norm1.weight": "model-00001-of-00002.safetensors",
510
+ "vision_model.vision_model.encoder.layers.14.layer_norm2.bias": "model-00001-of-00002.safetensors",
511
+ "vision_model.vision_model.encoder.layers.14.layer_norm2.weight": "model-00001-of-00002.safetensors",
512
+ "vision_model.vision_model.encoder.layers.14.mlp.fc1.bias": "model-00001-of-00002.safetensors",
513
+ "vision_model.vision_model.encoder.layers.14.mlp.fc1.weight": "model-00001-of-00002.safetensors",
514
+ "vision_model.vision_model.encoder.layers.14.mlp.fc2.bias": "model-00001-of-00002.safetensors",
515
+ "vision_model.vision_model.encoder.layers.14.mlp.fc2.weight": "model-00001-of-00002.safetensors",
516
+ "vision_model.vision_model.encoder.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
517
+ "vision_model.vision_model.encoder.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
518
+ "vision_model.vision_model.encoder.layers.14.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
519
+ "vision_model.vision_model.encoder.layers.14.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
520
+ "vision_model.vision_model.encoder.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
521
+ "vision_model.vision_model.encoder.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
522
+ "vision_model.vision_model.encoder.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
523
+ "vision_model.vision_model.encoder.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
524
+ "vision_model.vision_model.encoder.layers.15.layer_norm1.bias": "model-00001-of-00002.safetensors",
525
+ "vision_model.vision_model.encoder.layers.15.layer_norm1.weight": "model-00001-of-00002.safetensors",
526
+ "vision_model.vision_model.encoder.layers.15.layer_norm2.bias": "model-00001-of-00002.safetensors",
527
+ "vision_model.vision_model.encoder.layers.15.layer_norm2.weight": "model-00001-of-00002.safetensors",
528
+ "vision_model.vision_model.encoder.layers.15.mlp.fc1.bias": "model-00001-of-00002.safetensors",
529
+ "vision_model.vision_model.encoder.layers.15.mlp.fc1.weight": "model-00001-of-00002.safetensors",
530
+ "vision_model.vision_model.encoder.layers.15.mlp.fc2.bias": "model-00001-of-00002.safetensors",
531
+ "vision_model.vision_model.encoder.layers.15.mlp.fc2.weight": "model-00001-of-00002.safetensors",
532
+ "vision_model.vision_model.encoder.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
533
+ "vision_model.vision_model.encoder.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
534
+ "vision_model.vision_model.encoder.layers.15.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
535
+ "vision_model.vision_model.encoder.layers.15.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
536
+ "vision_model.vision_model.encoder.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
537
+ "vision_model.vision_model.encoder.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
538
+ "vision_model.vision_model.encoder.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
539
+ "vision_model.vision_model.encoder.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
540
+ "vision_model.vision_model.encoder.layers.16.layer_norm1.bias": "model-00001-of-00002.safetensors",
541
+ "vision_model.vision_model.encoder.layers.16.layer_norm1.weight": "model-00001-of-00002.safetensors",
542
+ "vision_model.vision_model.encoder.layers.16.layer_norm2.bias": "model-00001-of-00002.safetensors",
543
+ "vision_model.vision_model.encoder.layers.16.layer_norm2.weight": "model-00001-of-00002.safetensors",
544
+ "vision_model.vision_model.encoder.layers.16.mlp.fc1.bias": "model-00001-of-00002.safetensors",
545
+ "vision_model.vision_model.encoder.layers.16.mlp.fc1.weight": "model-00001-of-00002.safetensors",
546
+ "vision_model.vision_model.encoder.layers.16.mlp.fc2.bias": "model-00001-of-00002.safetensors",
547
+ "vision_model.vision_model.encoder.layers.16.mlp.fc2.weight": "model-00001-of-00002.safetensors",
548
+ "vision_model.vision_model.encoder.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
549
+ "vision_model.vision_model.encoder.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
550
+ "vision_model.vision_model.encoder.layers.16.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
551
+ "vision_model.vision_model.encoder.layers.16.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
552
+ "vision_model.vision_model.encoder.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
553
+ "vision_model.vision_model.encoder.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
554
+ "vision_model.vision_model.encoder.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
555
+ "vision_model.vision_model.encoder.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
556
+ "vision_model.vision_model.encoder.layers.17.layer_norm1.bias": "model-00001-of-00002.safetensors",
557
+ "vision_model.vision_model.encoder.layers.17.layer_norm1.weight": "model-00001-of-00002.safetensors",
558
+ "vision_model.vision_model.encoder.layers.17.layer_norm2.bias": "model-00001-of-00002.safetensors",
559
+ "vision_model.vision_model.encoder.layers.17.layer_norm2.weight": "model-00001-of-00002.safetensors",
560
+ "vision_model.vision_model.encoder.layers.17.mlp.fc1.bias": "model-00001-of-00002.safetensors",
561
+ "vision_model.vision_model.encoder.layers.17.mlp.fc1.weight": "model-00001-of-00002.safetensors",
562
+ "vision_model.vision_model.encoder.layers.17.mlp.fc2.bias": "model-00001-of-00002.safetensors",
563
+ "vision_model.vision_model.encoder.layers.17.mlp.fc2.weight": "model-00001-of-00002.safetensors",
564
+ "vision_model.vision_model.encoder.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
565
+ "vision_model.vision_model.encoder.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
566
+ "vision_model.vision_model.encoder.layers.17.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
567
+ "vision_model.vision_model.encoder.layers.17.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
568
+ "vision_model.vision_model.encoder.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
569
+ "vision_model.vision_model.encoder.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
570
+ "vision_model.vision_model.encoder.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
571
+ "vision_model.vision_model.encoder.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
572
+ "vision_model.vision_model.encoder.layers.18.layer_norm1.bias": "model-00001-of-00002.safetensors",
573
+ "vision_model.vision_model.encoder.layers.18.layer_norm1.weight": "model-00001-of-00002.safetensors",
574
+ "vision_model.vision_model.encoder.layers.18.layer_norm2.bias": "model-00001-of-00002.safetensors",
575
+ "vision_model.vision_model.encoder.layers.18.layer_norm2.weight": "model-00001-of-00002.safetensors",
576
+ "vision_model.vision_model.encoder.layers.18.mlp.fc1.bias": "model-00001-of-00002.safetensors",
577
+ "vision_model.vision_model.encoder.layers.18.mlp.fc1.weight": "model-00001-of-00002.safetensors",
578
+ "vision_model.vision_model.encoder.layers.18.mlp.fc2.bias": "model-00001-of-00002.safetensors",
579
+ "vision_model.vision_model.encoder.layers.18.mlp.fc2.weight": "model-00001-of-00002.safetensors",
580
+ "vision_model.vision_model.encoder.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
581
+ "vision_model.vision_model.encoder.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
582
+ "vision_model.vision_model.encoder.layers.18.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
583
+ "vision_model.vision_model.encoder.layers.18.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
584
+ "vision_model.vision_model.encoder.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
585
+ "vision_model.vision_model.encoder.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
586
+ "vision_model.vision_model.encoder.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
587
+ "vision_model.vision_model.encoder.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
588
+ "vision_model.vision_model.encoder.layers.19.layer_norm1.bias": "model-00001-of-00002.safetensors",
589
+ "vision_model.vision_model.encoder.layers.19.layer_norm1.weight": "model-00001-of-00002.safetensors",
590
+ "vision_model.vision_model.encoder.layers.19.layer_norm2.bias": "model-00001-of-00002.safetensors",
591
+ "vision_model.vision_model.encoder.layers.19.layer_norm2.weight": "model-00001-of-00002.safetensors",
592
+ "vision_model.vision_model.encoder.layers.19.mlp.fc1.bias": "model-00001-of-00002.safetensors",
593
+ "vision_model.vision_model.encoder.layers.19.mlp.fc1.weight": "model-00001-of-00002.safetensors",
594
+ "vision_model.vision_model.encoder.layers.19.mlp.fc2.bias": "model-00001-of-00002.safetensors",
595
+ "vision_model.vision_model.encoder.layers.19.mlp.fc2.weight": "model-00001-of-00002.safetensors",
596
+ "vision_model.vision_model.encoder.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
597
+ "vision_model.vision_model.encoder.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
598
+ "vision_model.vision_model.encoder.layers.19.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
599
+ "vision_model.vision_model.encoder.layers.19.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
600
+ "vision_model.vision_model.encoder.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
601
+ "vision_model.vision_model.encoder.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
602
+ "vision_model.vision_model.encoder.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
603
+ "vision_model.vision_model.encoder.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
604
+ "vision_model.vision_model.encoder.layers.2.layer_norm1.bias": "model-00001-of-00002.safetensors",
605
+ "vision_model.vision_model.encoder.layers.2.layer_norm1.weight": "model-00001-of-00002.safetensors",
606
+ "vision_model.vision_model.encoder.layers.2.layer_norm2.bias": "model-00001-of-00002.safetensors",
607
+ "vision_model.vision_model.encoder.layers.2.layer_norm2.weight": "model-00001-of-00002.safetensors",
608
+ "vision_model.vision_model.encoder.layers.2.mlp.fc1.bias": "model-00001-of-00002.safetensors",
609
+ "vision_model.vision_model.encoder.layers.2.mlp.fc1.weight": "model-00001-of-00002.safetensors",
610
+ "vision_model.vision_model.encoder.layers.2.mlp.fc2.bias": "model-00001-of-00002.safetensors",
611
+ "vision_model.vision_model.encoder.layers.2.mlp.fc2.weight": "model-00001-of-00002.safetensors",
612
+ "vision_model.vision_model.encoder.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
613
+ "vision_model.vision_model.encoder.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
614
+ "vision_model.vision_model.encoder.layers.2.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
615
+ "vision_model.vision_model.encoder.layers.2.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
616
+ "vision_model.vision_model.encoder.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
617
+ "vision_model.vision_model.encoder.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
618
+ "vision_model.vision_model.encoder.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
619
+ "vision_model.vision_model.encoder.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
620
+ "vision_model.vision_model.encoder.layers.20.layer_norm1.bias": "model-00001-of-00002.safetensors",
621
+ "vision_model.vision_model.encoder.layers.20.layer_norm1.weight": "model-00001-of-00002.safetensors",
622
+ "vision_model.vision_model.encoder.layers.20.layer_norm2.bias": "model-00001-of-00002.safetensors",
623
+ "vision_model.vision_model.encoder.layers.20.layer_norm2.weight": "model-00001-of-00002.safetensors",
624
+ "vision_model.vision_model.encoder.layers.20.mlp.fc1.bias": "model-00001-of-00002.safetensors",
625
+ "vision_model.vision_model.encoder.layers.20.mlp.fc1.weight": "model-00001-of-00002.safetensors",
626
+ "vision_model.vision_model.encoder.layers.20.mlp.fc2.bias": "model-00001-of-00002.safetensors",
627
+ "vision_model.vision_model.encoder.layers.20.mlp.fc2.weight": "model-00001-of-00002.safetensors",
628
+ "vision_model.vision_model.encoder.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
629
+ "vision_model.vision_model.encoder.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
630
+ "vision_model.vision_model.encoder.layers.20.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
631
+ "vision_model.vision_model.encoder.layers.20.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
632
+ "vision_model.vision_model.encoder.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
633
+ "vision_model.vision_model.encoder.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
634
+ "vision_model.vision_model.encoder.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
635
+ "vision_model.vision_model.encoder.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
636
+ "vision_model.vision_model.encoder.layers.21.layer_norm1.bias": "model-00001-of-00002.safetensors",
637
+ "vision_model.vision_model.encoder.layers.21.layer_norm1.weight": "model-00001-of-00002.safetensors",
638
+ "vision_model.vision_model.encoder.layers.21.layer_norm2.bias": "model-00001-of-00002.safetensors",
639
+ "vision_model.vision_model.encoder.layers.21.layer_norm2.weight": "model-00001-of-00002.safetensors",
640
+ "vision_model.vision_model.encoder.layers.21.mlp.fc1.bias": "model-00001-of-00002.safetensors",
641
+ "vision_model.vision_model.encoder.layers.21.mlp.fc1.weight": "model-00001-of-00002.safetensors",
642
+ "vision_model.vision_model.encoder.layers.21.mlp.fc2.bias": "model-00001-of-00002.safetensors",
643
+ "vision_model.vision_model.encoder.layers.21.mlp.fc2.weight": "model-00001-of-00002.safetensors",
644
+ "vision_model.vision_model.encoder.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
645
+ "vision_model.vision_model.encoder.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
646
+ "vision_model.vision_model.encoder.layers.21.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
647
+ "vision_model.vision_model.encoder.layers.21.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
648
+ "vision_model.vision_model.encoder.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
649
+ "vision_model.vision_model.encoder.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
650
+ "vision_model.vision_model.encoder.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
651
+ "vision_model.vision_model.encoder.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
652
+ "vision_model.vision_model.encoder.layers.22.layer_norm1.bias": "model-00001-of-00002.safetensors",
653
+ "vision_model.vision_model.encoder.layers.22.layer_norm1.weight": "model-00001-of-00002.safetensors",
654
+ "vision_model.vision_model.encoder.layers.22.layer_norm2.bias": "model-00001-of-00002.safetensors",
655
+ "vision_model.vision_model.encoder.layers.22.layer_norm2.weight": "model-00001-of-00002.safetensors",
656
+ "vision_model.vision_model.encoder.layers.22.mlp.fc1.bias": "model-00001-of-00002.safetensors",
657
+ "vision_model.vision_model.encoder.layers.22.mlp.fc1.weight": "model-00001-of-00002.safetensors",
658
+ "vision_model.vision_model.encoder.layers.22.mlp.fc2.bias": "model-00001-of-00002.safetensors",
659
+ "vision_model.vision_model.encoder.layers.22.mlp.fc2.weight": "model-00001-of-00002.safetensors",
660
+ "vision_model.vision_model.encoder.layers.22.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
661
+ "vision_model.vision_model.encoder.layers.22.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
662
+ "vision_model.vision_model.encoder.layers.22.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
663
+ "vision_model.vision_model.encoder.layers.22.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
664
+ "vision_model.vision_model.encoder.layers.22.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
665
+ "vision_model.vision_model.encoder.layers.22.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
666
+ "vision_model.vision_model.encoder.layers.22.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
667
+ "vision_model.vision_model.encoder.layers.22.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
668
+ "vision_model.vision_model.encoder.layers.23.layer_norm1.bias": "model-00001-of-00002.safetensors",
669
+ "vision_model.vision_model.encoder.layers.23.layer_norm1.weight": "model-00001-of-00002.safetensors",
670
+ "vision_model.vision_model.encoder.layers.23.layer_norm2.bias": "model-00001-of-00002.safetensors",
671
+ "vision_model.vision_model.encoder.layers.23.layer_norm2.weight": "model-00001-of-00002.safetensors",
672
+ "vision_model.vision_model.encoder.layers.23.mlp.fc1.bias": "model-00001-of-00002.safetensors",
673
+ "vision_model.vision_model.encoder.layers.23.mlp.fc1.weight": "model-00001-of-00002.safetensors",
674
+ "vision_model.vision_model.encoder.layers.23.mlp.fc2.bias": "model-00001-of-00002.safetensors",
675
+ "vision_model.vision_model.encoder.layers.23.mlp.fc2.weight": "model-00001-of-00002.safetensors",
676
+ "vision_model.vision_model.encoder.layers.23.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
677
+ "vision_model.vision_model.encoder.layers.23.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
678
+ "vision_model.vision_model.encoder.layers.23.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
679
+ "vision_model.vision_model.encoder.layers.23.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
680
+ "vision_model.vision_model.encoder.layers.23.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
681
+ "vision_model.vision_model.encoder.layers.23.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
682
+ "vision_model.vision_model.encoder.layers.23.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
683
+ "vision_model.vision_model.encoder.layers.23.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
684
+ "vision_model.vision_model.encoder.layers.24.layer_norm1.bias": "model-00001-of-00002.safetensors",
685
+ "vision_model.vision_model.encoder.layers.24.layer_norm1.weight": "model-00001-of-00002.safetensors",
686
+ "vision_model.vision_model.encoder.layers.24.layer_norm2.bias": "model-00001-of-00002.safetensors",
687
+ "vision_model.vision_model.encoder.layers.24.layer_norm2.weight": "model-00001-of-00002.safetensors",
688
+ "vision_model.vision_model.encoder.layers.24.mlp.fc1.bias": "model-00001-of-00002.safetensors",
689
+ "vision_model.vision_model.encoder.layers.24.mlp.fc1.weight": "model-00001-of-00002.safetensors",
690
+ "vision_model.vision_model.encoder.layers.24.mlp.fc2.bias": "model-00001-of-00002.safetensors",
691
+ "vision_model.vision_model.encoder.layers.24.mlp.fc2.weight": "model-00001-of-00002.safetensors",
692
+ "vision_model.vision_model.encoder.layers.24.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
693
+ "vision_model.vision_model.encoder.layers.24.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
694
+ "vision_model.vision_model.encoder.layers.24.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
695
+ "vision_model.vision_model.encoder.layers.24.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
696
+ "vision_model.vision_model.encoder.layers.24.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
697
+ "vision_model.vision_model.encoder.layers.24.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
698
+ "vision_model.vision_model.encoder.layers.24.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
699
+ "vision_model.vision_model.encoder.layers.24.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
700
+ "vision_model.vision_model.encoder.layers.25.layer_norm1.bias": "model-00001-of-00002.safetensors",
701
+ "vision_model.vision_model.encoder.layers.25.layer_norm1.weight": "model-00001-of-00002.safetensors",
702
+ "vision_model.vision_model.encoder.layers.25.layer_norm2.bias": "model-00001-of-00002.safetensors",
703
+ "vision_model.vision_model.encoder.layers.25.layer_norm2.weight": "model-00001-of-00002.safetensors",
704
+ "vision_model.vision_model.encoder.layers.25.mlp.fc1.bias": "model-00001-of-00002.safetensors",
705
+ "vision_model.vision_model.encoder.layers.25.mlp.fc1.weight": "model-00001-of-00002.safetensors",
706
+ "vision_model.vision_model.encoder.layers.25.mlp.fc2.bias": "model-00001-of-00002.safetensors",
707
+ "vision_model.vision_model.encoder.layers.25.mlp.fc2.weight": "model-00001-of-00002.safetensors",
708
+ "vision_model.vision_model.encoder.layers.25.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
709
+ "vision_model.vision_model.encoder.layers.25.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
710
+ "vision_model.vision_model.encoder.layers.25.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
711
+ "vision_model.vision_model.encoder.layers.25.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
712
+ "vision_model.vision_model.encoder.layers.25.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
713
+ "vision_model.vision_model.encoder.layers.25.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
714
+ "vision_model.vision_model.encoder.layers.25.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
715
+ "vision_model.vision_model.encoder.layers.25.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
716
+ "vision_model.vision_model.encoder.layers.26.layer_norm1.bias": "model-00001-of-00002.safetensors",
717
+ "vision_model.vision_model.encoder.layers.26.layer_norm1.weight": "model-00001-of-00002.safetensors",
718
+ "vision_model.vision_model.encoder.layers.26.layer_norm2.bias": "model-00001-of-00002.safetensors",
719
+ "vision_model.vision_model.encoder.layers.26.layer_norm2.weight": "model-00001-of-00002.safetensors",
720
+ "vision_model.vision_model.encoder.layers.26.mlp.fc1.bias": "model-00001-of-00002.safetensors",
721
+ "vision_model.vision_model.encoder.layers.26.mlp.fc1.weight": "model-00001-of-00002.safetensors",
722
+ "vision_model.vision_model.encoder.layers.26.mlp.fc2.bias": "model-00001-of-00002.safetensors",
723
+ "vision_model.vision_model.encoder.layers.26.mlp.fc2.weight": "model-00001-of-00002.safetensors",
724
+ "vision_model.vision_model.encoder.layers.26.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
725
+ "vision_model.vision_model.encoder.layers.26.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
726
+ "vision_model.vision_model.encoder.layers.26.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
727
+ "vision_model.vision_model.encoder.layers.26.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
728
+ "vision_model.vision_model.encoder.layers.26.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
729
+ "vision_model.vision_model.encoder.layers.26.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
730
+ "vision_model.vision_model.encoder.layers.26.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
731
+ "vision_model.vision_model.encoder.layers.26.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
732
+ "vision_model.vision_model.encoder.layers.3.layer_norm1.bias": "model-00001-of-00002.safetensors",
733
+ "vision_model.vision_model.encoder.layers.3.layer_norm1.weight": "model-00001-of-00002.safetensors",
734
+ "vision_model.vision_model.encoder.layers.3.layer_norm2.bias": "model-00001-of-00002.safetensors",
735
+ "vision_model.vision_model.encoder.layers.3.layer_norm2.weight": "model-00001-of-00002.safetensors",
736
+ "vision_model.vision_model.encoder.layers.3.mlp.fc1.bias": "model-00001-of-00002.safetensors",
737
+ "vision_model.vision_model.encoder.layers.3.mlp.fc1.weight": "model-00001-of-00002.safetensors",
738
+ "vision_model.vision_model.encoder.layers.3.mlp.fc2.bias": "model-00001-of-00002.safetensors",
739
+ "vision_model.vision_model.encoder.layers.3.mlp.fc2.weight": "model-00001-of-00002.safetensors",
740
+ "vision_model.vision_model.encoder.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
741
+ "vision_model.vision_model.encoder.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
742
+ "vision_model.vision_model.encoder.layers.3.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
743
+ "vision_model.vision_model.encoder.layers.3.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
744
+ "vision_model.vision_model.encoder.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
745
+ "vision_model.vision_model.encoder.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
746
+ "vision_model.vision_model.encoder.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
747
+ "vision_model.vision_model.encoder.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
748
+ "vision_model.vision_model.encoder.layers.4.layer_norm1.bias": "model-00001-of-00002.safetensors",
749
+ "vision_model.vision_model.encoder.layers.4.layer_norm1.weight": "model-00001-of-00002.safetensors",
750
+ "vision_model.vision_model.encoder.layers.4.layer_norm2.bias": "model-00001-of-00002.safetensors",
751
+ "vision_model.vision_model.encoder.layers.4.layer_norm2.weight": "model-00001-of-00002.safetensors",
752
+ "vision_model.vision_model.encoder.layers.4.mlp.fc1.bias": "model-00001-of-00002.safetensors",
753
+ "vision_model.vision_model.encoder.layers.4.mlp.fc1.weight": "model-00001-of-00002.safetensors",
754
+ "vision_model.vision_model.encoder.layers.4.mlp.fc2.bias": "model-00001-of-00002.safetensors",
755
+ "vision_model.vision_model.encoder.layers.4.mlp.fc2.weight": "model-00001-of-00002.safetensors",
756
+ "vision_model.vision_model.encoder.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
757
+ "vision_model.vision_model.encoder.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
758
+ "vision_model.vision_model.encoder.layers.4.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
759
+ "vision_model.vision_model.encoder.layers.4.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
760
+ "vision_model.vision_model.encoder.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
761
+ "vision_model.vision_model.encoder.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
762
+ "vision_model.vision_model.encoder.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
763
+ "vision_model.vision_model.encoder.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
764
+ "vision_model.vision_model.encoder.layers.5.layer_norm1.bias": "model-00001-of-00002.safetensors",
765
+ "vision_model.vision_model.encoder.layers.5.layer_norm1.weight": "model-00001-of-00002.safetensors",
766
+ "vision_model.vision_model.encoder.layers.5.layer_norm2.bias": "model-00001-of-00002.safetensors",
767
+ "vision_model.vision_model.encoder.layers.5.layer_norm2.weight": "model-00001-of-00002.safetensors",
768
+ "vision_model.vision_model.encoder.layers.5.mlp.fc1.bias": "model-00001-of-00002.safetensors",
769
+ "vision_model.vision_model.encoder.layers.5.mlp.fc1.weight": "model-00001-of-00002.safetensors",
770
+ "vision_model.vision_model.encoder.layers.5.mlp.fc2.bias": "model-00001-of-00002.safetensors",
771
+ "vision_model.vision_model.encoder.layers.5.mlp.fc2.weight": "model-00001-of-00002.safetensors",
772
+ "vision_model.vision_model.encoder.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
773
+ "vision_model.vision_model.encoder.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
774
+ "vision_model.vision_model.encoder.layers.5.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
775
+ "vision_model.vision_model.encoder.layers.5.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
776
+ "vision_model.vision_model.encoder.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
777
+ "vision_model.vision_model.encoder.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
778
+ "vision_model.vision_model.encoder.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
779
+ "vision_model.vision_model.encoder.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
780
+ "vision_model.vision_model.encoder.layers.6.layer_norm1.bias": "model-00001-of-00002.safetensors",
781
+ "vision_model.vision_model.encoder.layers.6.layer_norm1.weight": "model-00001-of-00002.safetensors",
782
+ "vision_model.vision_model.encoder.layers.6.layer_norm2.bias": "model-00001-of-00002.safetensors",
783
+ "vision_model.vision_model.encoder.layers.6.layer_norm2.weight": "model-00001-of-00002.safetensors",
784
+ "vision_model.vision_model.encoder.layers.6.mlp.fc1.bias": "model-00001-of-00002.safetensors",
785
+ "vision_model.vision_model.encoder.layers.6.mlp.fc1.weight": "model-00001-of-00002.safetensors",
786
+ "vision_model.vision_model.encoder.layers.6.mlp.fc2.bias": "model-00001-of-00002.safetensors",
787
+ "vision_model.vision_model.encoder.layers.6.mlp.fc2.weight": "model-00001-of-00002.safetensors",
788
+ "vision_model.vision_model.encoder.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
789
+ "vision_model.vision_model.encoder.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
790
+ "vision_model.vision_model.encoder.layers.6.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
791
+ "vision_model.vision_model.encoder.layers.6.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
792
+ "vision_model.vision_model.encoder.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
793
+ "vision_model.vision_model.encoder.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
794
+ "vision_model.vision_model.encoder.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
795
+ "vision_model.vision_model.encoder.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
796
+ "vision_model.vision_model.encoder.layers.7.layer_norm1.bias": "model-00001-of-00002.safetensors",
797
+ "vision_model.vision_model.encoder.layers.7.layer_norm1.weight": "model-00001-of-00002.safetensors",
798
+ "vision_model.vision_model.encoder.layers.7.layer_norm2.bias": "model-00001-of-00002.safetensors",
799
+ "vision_model.vision_model.encoder.layers.7.layer_norm2.weight": "model-00001-of-00002.safetensors",
800
+ "vision_model.vision_model.encoder.layers.7.mlp.fc1.bias": "model-00001-of-00002.safetensors",
801
+ "vision_model.vision_model.encoder.layers.7.mlp.fc1.weight": "model-00001-of-00002.safetensors",
802
+ "vision_model.vision_model.encoder.layers.7.mlp.fc2.bias": "model-00001-of-00002.safetensors",
803
+ "vision_model.vision_model.encoder.layers.7.mlp.fc2.weight": "model-00001-of-00002.safetensors",
804
+ "vision_model.vision_model.encoder.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
805
+ "vision_model.vision_model.encoder.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
806
+ "vision_model.vision_model.encoder.layers.7.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
807
+ "vision_model.vision_model.encoder.layers.7.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
808
+ "vision_model.vision_model.encoder.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
809
+ "vision_model.vision_model.encoder.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
810
+ "vision_model.vision_model.encoder.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
811
+ "vision_model.vision_model.encoder.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
812
+ "vision_model.vision_model.encoder.layers.8.layer_norm1.bias": "model-00001-of-00002.safetensors",
813
+ "vision_model.vision_model.encoder.layers.8.layer_norm1.weight": "model-00001-of-00002.safetensors",
814
+ "vision_model.vision_model.encoder.layers.8.layer_norm2.bias": "model-00001-of-00002.safetensors",
815
+ "vision_model.vision_model.encoder.layers.8.layer_norm2.weight": "model-00001-of-00002.safetensors",
816
+ "vision_model.vision_model.encoder.layers.8.mlp.fc1.bias": "model-00001-of-00002.safetensors",
817
+ "vision_model.vision_model.encoder.layers.8.mlp.fc1.weight": "model-00001-of-00002.safetensors",
818
+ "vision_model.vision_model.encoder.layers.8.mlp.fc2.bias": "model-00001-of-00002.safetensors",
819
+ "vision_model.vision_model.encoder.layers.8.mlp.fc2.weight": "model-00001-of-00002.safetensors",
820
+ "vision_model.vision_model.encoder.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
821
+ "vision_model.vision_model.encoder.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
822
+ "vision_model.vision_model.encoder.layers.8.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
823
+ "vision_model.vision_model.encoder.layers.8.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
824
+ "vision_model.vision_model.encoder.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
825
+ "vision_model.vision_model.encoder.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
826
+ "vision_model.vision_model.encoder.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
827
+ "vision_model.vision_model.encoder.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
828
+ "vision_model.vision_model.encoder.layers.9.layer_norm1.bias": "model-00001-of-00002.safetensors",
829
+ "vision_model.vision_model.encoder.layers.9.layer_norm1.weight": "model-00001-of-00002.safetensors",
830
+ "vision_model.vision_model.encoder.layers.9.layer_norm2.bias": "model-00001-of-00002.safetensors",
831
+ "vision_model.vision_model.encoder.layers.9.layer_norm2.weight": "model-00001-of-00002.safetensors",
832
+ "vision_model.vision_model.encoder.layers.9.mlp.fc1.bias": "model-00001-of-00002.safetensors",
833
+ "vision_model.vision_model.encoder.layers.9.mlp.fc1.weight": "model-00001-of-00002.safetensors",
834
+ "vision_model.vision_model.encoder.layers.9.mlp.fc2.bias": "model-00001-of-00002.safetensors",
835
+ "vision_model.vision_model.encoder.layers.9.mlp.fc2.weight": "model-00001-of-00002.safetensors",
836
+ "vision_model.vision_model.encoder.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
837
+ "vision_model.vision_model.encoder.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
838
+ "vision_model.vision_model.encoder.layers.9.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
839
+ "vision_model.vision_model.encoder.layers.9.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
840
+ "vision_model.vision_model.encoder.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
841
+ "vision_model.vision_model.encoder.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
842
+ "vision_model.vision_model.encoder.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
843
+ "vision_model.vision_model.encoder.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
844
+ "vision_model.vision_model.head.attention.in_proj_bias": "model-00001-of-00002.safetensors",
845
+ "vision_model.vision_model.head.attention.in_proj_weight": "model-00001-of-00002.safetensors",
846
+ "vision_model.vision_model.head.attention.out_proj.bias": "model-00001-of-00002.safetensors",
847
+ "vision_model.vision_model.head.attention.out_proj.weight": "model-00001-of-00002.safetensors",
848
+ "vision_model.vision_model.head.layernorm.bias": "model-00001-of-00002.safetensors",
849
+ "vision_model.vision_model.head.layernorm.weight": "model-00001-of-00002.safetensors",
850
+ "vision_model.vision_model.head.mlp.fc1.bias": "model-00001-of-00002.safetensors",
851
+ "vision_model.vision_model.head.mlp.fc1.weight": "model-00001-of-00002.safetensors",
852
+ "vision_model.vision_model.head.mlp.fc2.bias": "model-00001-of-00002.safetensors",
853
+ "vision_model.vision_model.head.mlp.fc2.weight": "model-00001-of-00002.safetensors",
854
+ "vision_model.vision_model.head.probe": "model-00001-of-00002.safetensors",
855
+ "vision_model.vision_model.post_layernorm.bias": "model-00001-of-00002.safetensors",
856
+ "vision_model.vision_model.post_layernorm.weight": "model-00001-of-00002.safetensors"
857
+ }
858
+ }
modeling_siglip.py ADDED
@@ -0,0 +1,1585 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 Google AI and The HuggingFace Team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """PyTorch Siglip model."""
16
+
17
+ import math
18
+ import warnings
19
+ from dataclasses import dataclass
20
+ from typing import Any, Optional, Tuple, Union
21
+
22
+ import numpy as np
23
+ import torch
24
+ import torch.utils.checkpoint
25
+ from torch import nn
26
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
27
+ from torch.nn.init import _calculate_fan_in_and_fan_out
28
+
29
+ from transformers.activations import ACT2FN
30
+ from transformers.modeling_attn_mask_utils import _prepare_4d_attention_mask
31
+ from transformers.modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, ImageClassifierOutput
32
+ from transformers.modeling_utils import PreTrainedModel
33
+ from transformers.utils import (
34
+ ModelOutput,
35
+ add_start_docstrings,
36
+ add_start_docstrings_to_model_forward,
37
+ is_flash_attn_2_available,
38
+ is_flash_attn_greater_or_equal_2_10,
39
+ logging,
40
+ replace_return_docstrings,
41
+ torch_int,
42
+ )
43
+ from configuration_siglip import SiglipConfig, SiglipTextConfig, SiglipVisionConfig
44
+
45
+
46
+ if is_flash_attn_2_available():
47
+ from transformers.modeling_flash_attention_utils import _flash_attention_forward
48
+
49
+
50
+ logger = logging.get_logger(__name__)
51
+
52
+ # General docstring
53
+ _CONFIG_FOR_DOC = "SiglipConfig"
54
+ _CHECKPOINT_FOR_DOC = "google/siglip-base-patch16-224"
55
+
56
+
57
+ def _trunc_normal_(tensor, mean, std, a, b):
58
+ # Cut & paste from PyTorch official master until it's in a few official releases - RW
59
+ # Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
60
+ def norm_cdf(x):
61
+ # Computes standard normal cumulative distribution function
62
+ return (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0
63
+
64
+ if (mean < a - 2 * std) or (mean > b + 2 * std):
65
+ warnings.warn(
66
+ "mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
67
+ "The distribution of values may be incorrect.",
68
+ stacklevel=2,
69
+ )
70
+
71
+ # Values are generated by using a truncated uniform distribution and
72
+ # then using the inverse CDF for the normal distribution.
73
+ # Get upper and lower cdf values
74
+ l = norm_cdf((a - mean) / std)
75
+ u = norm_cdf((b - mean) / std)
76
+
77
+ # Uniformly fill tensor with values from [l, u], then translate to
78
+ # [2l-1, 2u-1].
79
+ tensor.uniform_(2 * l - 1, 2 * u - 1)
80
+
81
+ # Use inverse cdf transform for normal distribution to get truncated
82
+ # standard normal
83
+ tensor.erfinv_()
84
+
85
+ # Transform to proper mean, std
86
+ tensor.mul_(std * math.sqrt(2.0))
87
+ tensor.add_(mean)
88
+
89
+ # Clamp to ensure it's in the proper range
90
+ tensor.clamp_(min=a, max=b)
91
+
92
+
93
+ def trunc_normal_tf_(
94
+ tensor: torch.Tensor, mean: float = 0.0, std: float = 1.0, a: float = -2.0, b: float = 2.0
95
+ ) -> torch.Tensor:
96
+ """Fills the input Tensor with values drawn from a truncated
97
+ normal distribution. The values are effectively drawn from the
98
+ normal distribution :math:`\\mathcal{N}(\text{mean}, \text{std}^2)`
99
+ with values outside :math:`[a, b]` redrawn until they are within
100
+ the bounds. The method used for generating the random values works
101
+ best when :math:`a \\leq \text{mean} \\leq b`.
102
+
103
+ NOTE: this 'tf' variant behaves closer to Tensorflow / JAX impl where the
104
+ bounds [a, b] are applied when sampling the normal distribution with mean=0, std=1.0
105
+ and the result is subsequently scaled and shifted by the mean and std args.
106
+
107
+ Args:
108
+ tensor: an n-dimensional `torch.Tensor`
109
+ mean: the mean of the normal distribution
110
+ std: the standard deviation of the normal distribution
111
+ a: the minimum cutoff value
112
+ b: the maximum cutoff value
113
+ """
114
+ with torch.no_grad():
115
+ _trunc_normal_(tensor, 0, 1.0, a, b)
116
+ tensor.mul_(std).add_(mean)
117
+
118
+
119
+ def variance_scaling_(tensor, scale=1.0, mode="fan_in", distribution="normal"):
120
+ fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
121
+ if mode == "fan_in":
122
+ denom = fan_in
123
+ elif mode == "fan_out":
124
+ denom = fan_out
125
+ elif mode == "fan_avg":
126
+ denom = (fan_in + fan_out) / 2
127
+
128
+ variance = scale / denom
129
+
130
+ if distribution == "truncated_normal":
131
+ # constant is stddev of standard normal truncated to (-2, 2)
132
+ trunc_normal_tf_(tensor, std=math.sqrt(variance) / 0.87962566103423978)
133
+ elif distribution == "normal":
134
+ with torch.no_grad():
135
+ tensor.normal_(std=math.sqrt(variance))
136
+ elif distribution == "uniform":
137
+ bound = math.sqrt(3 * variance)
138
+ with torch.no_grad():
139
+ tensor.uniform_(-bound, bound)
140
+ else:
141
+ raise ValueError(f"invalid distribution {distribution}")
142
+
143
+
144
+ def lecun_normal_(tensor):
145
+ variance_scaling_(tensor, mode="fan_in", distribution="truncated_normal")
146
+
147
+
148
+ def default_flax_embed_init(tensor):
149
+ variance_scaling_(tensor, mode="fan_in", distribution="normal")
150
+
151
+
152
+ @dataclass
153
+ # Copied from transformers.models.clip.modeling_clip.CLIPVisionModelOutput with CLIP->Siglip
154
+ class SiglipVisionModelOutput(ModelOutput):
155
+ """
156
+ Base class for vision model's outputs that also contains image embeddings of the pooling of the last hidden states.
157
+
158
+ Args:
159
+ image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
160
+ The image embeddings obtained by applying the projection layer to the pooler_output.
161
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
162
+ Sequence of hidden-states at the output of the last layer of the model.
163
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
164
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
165
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
166
+
167
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
168
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
169
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
170
+ sequence_length)`.
171
+
172
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
173
+ heads.
174
+ """
175
+
176
+ image_embeds: Optional[torch.FloatTensor] = None
177
+ last_hidden_state: torch.FloatTensor = None
178
+ hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
179
+ attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
180
+
181
+
182
+ @dataclass
183
+ # Copied from transformers.models.clip.modeling_clip.CLIPTextModelOutput with CLIP->Siglip
184
+ class SiglipTextModelOutput(ModelOutput):
185
+ """
186
+ Base class for text model's outputs that also contains a pooling of the last hidden states.
187
+
188
+ Args:
189
+ text_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
190
+ The text embeddings obtained by applying the projection layer to the pooler_output.
191
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
192
+ Sequence of hidden-states at the output of the last layer of the model.
193
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
194
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
195
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
196
+
197
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
198
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
199
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
200
+ sequence_length)`.
201
+
202
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
203
+ heads.
204
+ """
205
+
206
+ text_embeds: Optional[torch.FloatTensor] = None
207
+ last_hidden_state: torch.FloatTensor = None
208
+ hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
209
+ attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
210
+
211
+
212
+ @dataclass
213
+ # Copied from transformers.models.clip.modeling_clip.CLIPOutput with CLIP->Siglip
214
+ class SiglipOutput(ModelOutput):
215
+ """
216
+ Args:
217
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`):
218
+ Contrastive loss for image-text similarity.
219
+ logits_per_image (`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`):
220
+ The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text
221
+ similarity scores.
222
+ logits_per_text (`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`):
223
+ The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image
224
+ similarity scores.
225
+ text_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`):
226
+ The text embeddings obtained by applying the projection layer to the pooled output of [`SiglipTextModel`].
227
+ image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`):
228
+ The image embeddings obtained by applying the projection layer to the pooled output of [`SiglipVisionModel`].
229
+ text_model_output (`BaseModelOutputWithPooling`):
230
+ The output of the [`SiglipTextModel`].
231
+ vision_model_output (`BaseModelOutputWithPooling`):
232
+ The output of the [`SiglipVisionModel`].
233
+ """
234
+
235
+ loss: Optional[torch.FloatTensor] = None
236
+ logits_per_image: torch.FloatTensor = None
237
+ logits_per_text: torch.FloatTensor = None
238
+ text_embeds: torch.FloatTensor = None
239
+ image_embeds: torch.FloatTensor = None
240
+ text_model_output: BaseModelOutputWithPooling = None
241
+ vision_model_output: BaseModelOutputWithPooling = None
242
+
243
+ def to_tuple(self) -> Tuple[Any]:
244
+ return tuple(
245
+ self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple()
246
+ for k in self.keys()
247
+ )
248
+
249
+
250
+ class SiglipVisionEmbeddings(nn.Module):
251
+ def __init__(self, config: SiglipVisionConfig):
252
+ super().__init__()
253
+ self.config = config
254
+ self.embed_dim = config.hidden_size
255
+ self.image_size = config.image_size
256
+ self.patch_size = config.patch_size
257
+
258
+ self.patch_embedding = nn.Conv2d(
259
+ in_channels=config.num_channels,
260
+ out_channels=self.embed_dim,
261
+ kernel_size=self.patch_size,
262
+ stride=self.patch_size,
263
+ padding="valid",
264
+ )
265
+
266
+ self.num_patches = (self.image_size // self.patch_size) ** 2
267
+ self.num_positions = self.num_patches
268
+ self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim)
269
+ self.register_buffer("position_ids", torch.arange(self.num_positions).expand((1, -1)), persistent=False)
270
+
271
+ def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
272
+ """
273
+ This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution
274
+ images. This method is also adapted to support torch.jit tracing and no class embeddings.
275
+
276
+ Adapted from:
277
+ - https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and
278
+ - https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211
279
+ """
280
+
281
+ num_patches = embeddings.shape[1]
282
+ num_positions = self.position_embedding.weight.shape[0]
283
+
284
+ # always interpolate when tracing to ensure the exported model works for dynamic input shapes
285
+ if not torch.jit.is_tracing() and num_patches == num_positions and height == width:
286
+ return self.position_embedding(self.position_ids)
287
+
288
+ patch_pos_embed = self.position_embedding.weight.unsqueeze(0)
289
+
290
+ dim = embeddings.shape[-1]
291
+
292
+ new_height = height // self.patch_size
293
+ new_width = width // self.patch_size
294
+
295
+ sqrt_num_positions = torch_int(num_positions**0.5)
296
+ patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim)
297
+ patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2)
298
+
299
+ patch_pos_embed = nn.functional.interpolate(
300
+ patch_pos_embed,
301
+ size=(new_height, new_width),
302
+ mode="bicubic",
303
+ align_corners=False,
304
+ )
305
+
306
+ patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
307
+ return patch_pos_embed
308
+
309
+ def forward(self, pixel_values: torch.FloatTensor, interpolate_pos_encoding=False) -> torch.Tensor:
310
+ _, _, height, width = pixel_values.shape
311
+ target_dtype = self.patch_embedding.weight.dtype
312
+ # import pdb;pdb.set_trace()
313
+ patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype)) # shape = [*, width, grid, grid]
314
+ embeddings = patch_embeds.flatten(2).transpose(1, 2)
315
+
316
+ if interpolate_pos_encoding:
317
+ embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width)
318
+ else:
319
+ embeddings = embeddings + self.position_embedding(self.position_ids)
320
+ return embeddings
321
+
322
+
323
+ # Copied from transformers.models.clip.modeling_clip.CLIPTextEmbeddings with CLIP->Siglip
324
+ class SiglipTextEmbeddings(nn.Module):
325
+ def __init__(self, config: SiglipTextConfig):
326
+ super().__init__()
327
+ embed_dim = config.hidden_size
328
+
329
+ self.token_embedding = nn.Embedding(config.vocab_size, embed_dim)
330
+ self.position_embedding = nn.Embedding(config.max_position_embeddings, embed_dim)
331
+
332
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
333
+ self.register_buffer(
334
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
335
+ )
336
+
337
+ def forward(
338
+ self,
339
+ input_ids: Optional[torch.LongTensor] = None,
340
+ position_ids: Optional[torch.LongTensor] = None,
341
+ inputs_embeds: Optional[torch.FloatTensor] = None,
342
+ ) -> torch.Tensor:
343
+ seq_length = input_ids.shape[-1] if input_ids is not None else inputs_embeds.shape[-2]
344
+ max_position_embedding = self.position_embedding.weight.shape[0]
345
+
346
+ if seq_length > max_position_embedding:
347
+ raise ValueError(
348
+ f"Sequence length must be less than max_position_embeddings (got `sequence length`: "
349
+ f"{seq_length} and max_position_embeddings: {max_position_embedding}"
350
+ )
351
+
352
+ if position_ids is None:
353
+ position_ids = self.position_ids[:, :seq_length]
354
+
355
+ if inputs_embeds is None:
356
+ inputs_embeds = self.token_embedding(input_ids)
357
+
358
+ position_embeddings = self.position_embedding(position_ids)
359
+ embeddings = inputs_embeds + position_embeddings
360
+
361
+ return embeddings
362
+
363
+
364
+ class SiglipAttention(nn.Module):
365
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
366
+
367
+ # Copied from transformers.models.clip.modeling_clip.CLIPAttention.__init__
368
+ def __init__(self, config):
369
+ super().__init__()
370
+ self.config = config
371
+ self.embed_dim = config.hidden_size
372
+ self.num_heads = config.num_attention_heads
373
+ self.head_dim = self.embed_dim // self.num_heads
374
+ if self.head_dim * self.num_heads != self.embed_dim:
375
+ raise ValueError(
376
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
377
+ f" {self.num_heads})."
378
+ )
379
+ self.scale = self.head_dim**-0.5
380
+ self.dropout = config.attention_dropout
381
+
382
+ self.k_proj = nn.Linear(self.embed_dim, self.embed_dim)
383
+ self.v_proj = nn.Linear(self.embed_dim, self.embed_dim)
384
+ self.q_proj = nn.Linear(self.embed_dim, self.embed_dim)
385
+ self.out_proj = nn.Linear(self.embed_dim, self.embed_dim)
386
+
387
+ def forward(
388
+ self,
389
+ hidden_states: torch.Tensor,
390
+ attention_mask: Optional[torch.Tensor] = None,
391
+ output_attentions: Optional[bool] = False,
392
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
393
+ """Input shape: Batch x Time x Channel"""
394
+
395
+ batch_size, q_len, _ = hidden_states.size()
396
+
397
+ query_states = self.q_proj(hidden_states)
398
+ key_states = self.k_proj(hidden_states)
399
+ value_states = self.v_proj(hidden_states)
400
+
401
+ query_states = query_states.view(batch_size, q_len, self.num_heads, self.head_dim).transpose(1, 2)
402
+ key_states = key_states.view(batch_size, q_len, self.num_heads, self.head_dim).transpose(1, 2)
403
+ value_states = value_states.view(batch_size, q_len, self.num_heads, self.head_dim).transpose(1, 2)
404
+
405
+ k_v_seq_len = key_states.shape[-2]
406
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) * self.scale
407
+
408
+ if attn_weights.size() != (batch_size, self.num_heads, q_len, k_v_seq_len):
409
+ raise ValueError(
410
+ f"Attention weights should be of size {(batch_size, self.num_heads, q_len, k_v_seq_len)}, but is"
411
+ f" {attn_weights.size()}"
412
+ )
413
+
414
+ if attention_mask is not None:
415
+ if attention_mask.size() != (batch_size, 1, q_len, k_v_seq_len):
416
+ raise ValueError(
417
+ f"Attention mask should be of size {(batch_size, 1, q_len, k_v_seq_len)}, but is {attention_mask.size()}"
418
+ )
419
+ attn_weights = attn_weights + attention_mask
420
+
421
+ # upcast attention to fp32
422
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
423
+ attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
424
+ attn_output = torch.matmul(attn_weights, value_states)
425
+
426
+ if attn_output.size() != (batch_size, self.num_heads, q_len, self.head_dim):
427
+ raise ValueError(
428
+ f"`attn_output` should be of size {(batch_size, self.num_heads, q_len, self.head_dim)}, but is"
429
+ f" {attn_output.size()}"
430
+ )
431
+
432
+ attn_output = attn_output.transpose(1, 2).contiguous()
433
+ attn_output = attn_output.reshape(batch_size, q_len, self.embed_dim)
434
+
435
+ attn_output = self.out_proj(attn_output)
436
+
437
+ return attn_output, attn_weights
438
+
439
+
440
+ class SiglipFlashAttention2(SiglipAttention):
441
+ """
442
+ SiglipAttention flash attention module. This module inherits from `SiglipAttention` as the weights of the module stays
443
+ untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
444
+ flash attention and deal with padding tokens in case the input contains any of them.
445
+ """
446
+
447
+ is_causal = False
448
+
449
+ def __init__(self, *args, **kwargs):
450
+ super().__init__(*args, **kwargs)
451
+
452
+ # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
453
+ # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
454
+ # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
455
+ self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
456
+
457
+ # Adapted from transformers.models.llama.modeling_llama.LlamaFlashAttention2.forward
458
+ def forward(
459
+ self,
460
+ hidden_states: torch.Tensor,
461
+ attention_mask: Optional[torch.LongTensor] = None,
462
+ output_attentions: bool = False,
463
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
464
+ output_attentions = False
465
+
466
+ batch_size, q_len, _ = hidden_states.size()
467
+
468
+ query_states = self.q_proj(hidden_states)
469
+ key_states = self.k_proj(hidden_states)
470
+ value_states = self.v_proj(hidden_states)
471
+
472
+ # Flash attention requires the input to have the shape
473
+ # batch_size x seq_length x head_dim x hidden_dim
474
+ # therefore we just need to keep the original shape
475
+ query_states = query_states.view(batch_size, q_len, self.num_heads, self.head_dim).transpose(1, 2)
476
+ key_states = key_states.view(batch_size, q_len, self.num_heads, self.head_dim).transpose(1, 2)
477
+ value_states = value_states.view(batch_size, q_len, self.num_heads, self.head_dim).transpose(1, 2)
478
+
479
+ # TODO: These transpose are quite inefficient but Flash Attention requires the layout [batch_size, sequence_length, num_heads, head_dim]. We would need to refactor the KV cache
480
+ # to be able to avoid many of these transpose/reshape/view.
481
+ query_states = query_states.transpose(1, 2)
482
+ key_states = key_states.transpose(1, 2)
483
+ value_states = value_states.transpose(1, 2)
484
+
485
+ dropout_rate = self.dropout if self.training else 0.0
486
+
487
+ # In PEFT, usually we cast the layer norms in float32 for training stability reasons
488
+ # therefore the input hidden states gets silently casted in float32. Hence, we need
489
+ # cast them back in the correct dtype just to be sure everything works as expected.
490
+ # This might slowdown training & inference so it is recommended to not cast the LayerNorms
491
+ # in fp32.
492
+
493
+ input_dtype = query_states.dtype
494
+ if input_dtype == torch.float32:
495
+ if torch.is_autocast_enabled():
496
+ target_dtype = torch.get_autocast_gpu_dtype()
497
+ # Handle the case where the model is quantized
498
+ elif hasattr(self.config, "_pre_quantization_dtype"):
499
+ target_dtype = self.config._pre_quantization_dtype
500
+ else:
501
+ target_dtype = self.q_proj.weight.dtype
502
+
503
+ logger.warning_once(
504
+ f"The input hidden states seems to be silently casted in float32, this might be related to"
505
+ f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
506
+ f" {target_dtype}."
507
+ )
508
+
509
+ query_states = query_states.to(target_dtype)
510
+ key_states = key_states.to(target_dtype)
511
+ value_states = value_states.to(target_dtype)
512
+
513
+ attn_output = _flash_attention_forward(
514
+ query_states,
515
+ key_states,
516
+ value_states,
517
+ attention_mask,
518
+ q_len,
519
+ dropout=dropout_rate,
520
+ is_causal=self.is_causal,
521
+ use_top_left_mask=self._flash_attn_uses_top_left_mask,
522
+ )
523
+
524
+ attn_output = attn_output.reshape(batch_size, q_len, self.embed_dim).contiguous()
525
+ attn_output = self.out_proj(attn_output)
526
+
527
+ if not output_attentions:
528
+ attn_weights = None
529
+
530
+ return attn_output, attn_weights
531
+
532
+
533
+ class SiglipSdpaAttention(SiglipAttention):
534
+ """
535
+ Siglip attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from
536
+ `SiglipAttention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to
537
+ SDPA API.
538
+ """
539
+
540
+ is_causal = False
541
+
542
+ # Adapted from SiglipAttention.forward and transformers.models.llama.modeling_llama.LlamaSdpaAttention.forward
543
+ def forward(
544
+ self,
545
+ hidden_states: torch.Tensor,
546
+ attention_mask: Optional[torch.Tensor] = None,
547
+ output_attentions: Optional[bool] = False,
548
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
549
+ if output_attentions:
550
+ # TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented.
551
+ logger.warning_once(
552
+ "SiglipModel is using SiglipSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, "
553
+ 'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
554
+ )
555
+ return super().forward(
556
+ hidden_states=hidden_states,
557
+ attention_mask=attention_mask,
558
+ output_attentions=output_attentions,
559
+ )
560
+
561
+ batch_size, q_len, _ = hidden_states.size()
562
+
563
+ query_states = self.q_proj(hidden_states)
564
+ key_states = self.k_proj(hidden_states)
565
+ value_states = self.v_proj(hidden_states)
566
+
567
+ query_states = query_states.view(batch_size, q_len, self.num_heads, self.head_dim).transpose(1, 2)
568
+ key_states = key_states.view(batch_size, q_len, self.num_heads, self.head_dim).transpose(1, 2)
569
+ value_states = value_states.view(batch_size, q_len, self.num_heads, self.head_dim).transpose(1, 2)
570
+
571
+ # SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask,
572
+ # Reference: https://github.com/pytorch/pytorch/issues/112577.
573
+ if query_states.device.type == "cuda" and attention_mask is not None:
574
+ query_states = query_states.contiguous()
575
+ key_states = key_states.contiguous()
576
+ value_states = value_states.contiguous()
577
+
578
+ # We dispatch to SDPA's Flash Attention or Efficient kernels via this `is_causal` if statement instead of an inline conditional assignment
579
+ # in SDPA to support both torch.compile's dynamic shapes and full graph options. An inline conditional prevents dynamic shapes from compiling.
580
+ is_causal = True if self.is_causal and q_len > 1 else False
581
+
582
+ attn_output = torch.nn.functional.scaled_dot_product_attention(
583
+ query_states,
584
+ key_states,
585
+ value_states,
586
+ attn_mask=attention_mask,
587
+ dropout_p=self.dropout if self.training else 0.0,
588
+ is_causal=is_causal,
589
+ )
590
+
591
+ attn_output = attn_output.transpose(1, 2).contiguous()
592
+ attn_output = attn_output.view(batch_size, q_len, self.embed_dim)
593
+
594
+ attn_output = self.out_proj(attn_output)
595
+
596
+ return attn_output, None
597
+
598
+
599
+ SIGLIP_ATTENTION_CLASSES = {
600
+ "eager": SiglipAttention,
601
+ "flash_attention_2": SiglipFlashAttention2,
602
+ "sdpa": SiglipSdpaAttention,
603
+ }
604
+
605
+
606
+ # Copied from transformers.models.clip.modeling_clip.CLIPMLP with CLIP->Siglip
607
+ class SiglipMLP(nn.Module):
608
+ def __init__(self, config):
609
+ super().__init__()
610
+ self.config = config
611
+ self.activation_fn = ACT2FN[config.hidden_act]
612
+ self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
613
+ self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
614
+
615
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
616
+ hidden_states = self.fc1(hidden_states)
617
+ hidden_states = self.activation_fn(hidden_states)
618
+ hidden_states = self.fc2(hidden_states)
619
+ return hidden_states
620
+
621
+
622
+ class SiglipEncoderLayer(nn.Module):
623
+ def __init__(self, config: SiglipConfig):
624
+ super().__init__()
625
+ self.embed_dim = config.hidden_size
626
+ self.self_attn = SIGLIP_ATTENTION_CLASSES[config._attn_implementation](config=config)
627
+ self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
628
+ self.mlp = SiglipMLP(config)
629
+ self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
630
+
631
+ # Ignore copy
632
+ def forward(
633
+ self,
634
+ hidden_states: torch.Tensor,
635
+ attention_mask: torch.Tensor,
636
+ output_attentions: Optional[bool] = False,
637
+ ) -> Tuple[torch.FloatTensor]:
638
+ """
639
+ Args:
640
+ hidden_states (`torch.FloatTensor`):
641
+ Input to the layer of shape `(batch, seq_len, embed_dim)`.
642
+ attention_mask (`torch.FloatTensor`):
643
+ Attention mask of shape `(batch, 1, q_len, k_v_seq_len)` where padding elements are indicated by very large negative values.
644
+ output_attentions (`bool`, *optional*, defaults to `False`):
645
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
646
+ returned tensors for more detail.
647
+ """
648
+ residual = hidden_states
649
+
650
+ hidden_states = self.layer_norm1(hidden_states)
651
+ hidden_states, attn_weights = self.self_attn(
652
+ hidden_states=hidden_states,
653
+ attention_mask=attention_mask,
654
+ output_attentions=output_attentions,
655
+ )
656
+ hidden_states = residual + hidden_states
657
+
658
+ residual = hidden_states
659
+ hidden_states = self.layer_norm2(hidden_states)
660
+ hidden_states = self.mlp(hidden_states)
661
+ hidden_states = residual + hidden_states
662
+
663
+ outputs = (hidden_states,)
664
+
665
+ if output_attentions:
666
+ outputs += (attn_weights,)
667
+
668
+ return outputs
669
+
670
+
671
+ class SiglipPreTrainedModel(PreTrainedModel):
672
+ """
673
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
674
+ models.
675
+ """
676
+
677
+ config_class = SiglipConfig
678
+ base_model_prefix = "siglip"
679
+ supports_gradient_checkpointing = True
680
+
681
+ _no_split_modules = [
682
+ "SiglipTextEmbeddings",
683
+ "SiglipEncoderLayer",
684
+ "SiglipVisionEmbeddings",
685
+ "SiglipEncoderLayer",
686
+ "SiglipMultiheadAttentionPoolingHead",
687
+ ]
688
+ _supports_flash_attn_2 = True
689
+ _supports_sdpa = True
690
+
691
+ def _init_weights(self, module):
692
+ """Initialize the weights"""
693
+ if isinstance(module, SiglipVisionEmbeddings):
694
+ width = (
695
+ self.config.vision_config.hidden_size
696
+ if isinstance(self.config, SiglipConfig)
697
+ else self.config.hidden_size
698
+ )
699
+ nn.init.normal_(module.position_embedding.weight, std=1 / np.sqrt(width))
700
+ elif isinstance(module, nn.Embedding):
701
+ default_flax_embed_init(module.weight)
702
+ elif isinstance(module, SiglipAttention):
703
+ nn.init.xavier_uniform_(module.q_proj.weight)
704
+ nn.init.xavier_uniform_(module.k_proj.weight)
705
+ nn.init.xavier_uniform_(module.v_proj.weight)
706
+ nn.init.xavier_uniform_(module.out_proj.weight)
707
+ nn.init.zeros_(module.q_proj.bias)
708
+ nn.init.zeros_(module.k_proj.bias)
709
+ nn.init.zeros_(module.v_proj.bias)
710
+ nn.init.zeros_(module.out_proj.bias)
711
+ elif isinstance(module, SiglipMLP):
712
+ nn.init.xavier_uniform_(module.fc1.weight)
713
+ nn.init.xavier_uniform_(module.fc2.weight)
714
+ nn.init.normal_(module.fc1.bias, std=1e-6)
715
+ nn.init.normal_(module.fc2.bias, std=1e-6)
716
+ elif isinstance(module, SiglipMultiheadAttentionPoolingHead):
717
+ nn.init.xavier_uniform_(module.probe.data)
718
+ nn.init.xavier_uniform_(module.attention.in_proj_weight.data)
719
+ nn.init.zeros_(module.attention.in_proj_bias.data)
720
+ elif isinstance(module, SiglipModel):
721
+ logit_scale_init = torch.log(torch.tensor(1.0))
722
+ module.logit_scale.data.fill_(logit_scale_init)
723
+ module.logit_bias.data.zero_()
724
+ elif isinstance(module, SiglipForImageClassification):
725
+ nn.init.normal_(
726
+ module.classifier.weight,
727
+ std=self.config.vision_config.hidden_size**-0.5 * self.config.initializer_factor,
728
+ )
729
+ elif isinstance(module, (nn.Linear, nn.Conv2d)):
730
+ lecun_normal_(module.weight)
731
+ if module.bias is not None:
732
+ nn.init.zeros_(module.bias)
733
+ elif isinstance(module, nn.LayerNorm):
734
+ module.bias.data.zero_()
735
+ module.weight.data.fill_(1.0)
736
+
737
+
738
+ SIGLIP_START_DOCSTRING = r"""
739
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
740
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
741
+ etc.)
742
+
743
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
744
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
745
+ and behavior.
746
+
747
+ Parameters:
748
+ config ([`SiglipConfig`]): Model configuration class with all the parameters of the model.
749
+ Initializing with a config file does not load the weights associated with the model, only the
750
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
751
+ """
752
+
753
+ SIGLIP_TEXT_INPUTS_DOCSTRING = r"""
754
+ Args:
755
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
756
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
757
+ it.
758
+
759
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
760
+ [`PreTrainedTokenizer.__call__`] for details.
761
+
762
+ [What are input IDs?](../glossary#input-ids)
763
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
764
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
765
+
766
+ - 1 for tokens that are **not masked**,
767
+ - 0 for tokens that are **masked**.
768
+
769
+ [What are attention masks?](../glossary#attention-mask)
770
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
771
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
772
+ config.max_position_embeddings - 1]`.
773
+
774
+ [What are position IDs?](../glossary#position-ids)
775
+ output_attentions (`bool`, *optional*):
776
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
777
+ tensors for more detail.
778
+ output_hidden_states (`bool`, *optional*):
779
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
780
+ more detail.
781
+ return_dict (`bool`, *optional*):
782
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
783
+ """
784
+
785
+ SIGLIP_VISION_INPUTS_DOCSTRING = r"""
786
+ Args:
787
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
788
+ Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
789
+ [`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details.
790
+ output_attentions (`bool`, *optional*):
791
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
792
+ tensors for more detail.
793
+ output_hidden_states (`bool`, *optional*):
794
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
795
+ more detail.
796
+ interpolate_pos_encoding (`bool`, *optional*, defaults to `False`):
797
+ Whether to interpolate the pre-trained position encodings.
798
+ return_dict (`bool`, *optional*):
799
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
800
+ """
801
+
802
+ SIGLIP_INPUTS_DOCSTRING = r"""
803
+ Args:
804
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
805
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
806
+ it.
807
+
808
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
809
+ [`PreTrainedTokenizer.__call__`] for details.
810
+
811
+ [What are input IDs?](../glossary#input-ids)
812
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
813
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
814
+
815
+ - 1 for tokens that are **not masked**,
816
+ - 0 for tokens that are **masked**.
817
+
818
+ [What are attention masks?](../glossary#attention-mask)
819
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
820
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
821
+ config.max_position_embeddings - 1]`.
822
+
823
+ [What are position IDs?](../glossary#position-ids)
824
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
825
+ Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
826
+ [`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details.
827
+ return_loss (`bool`, *optional*):
828
+ Whether or not to return the contrastive loss.
829
+ output_attentions (`bool`, *optional*):
830
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
831
+ tensors for more detail.
832
+ output_hidden_states (`bool`, *optional*):
833
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
834
+ more detail.
835
+ interpolate_pos_encoding (`bool`, *optional*, defaults to `False`):
836
+ Whether to interpolate the pre-trained position encodings.
837
+ return_dict (`bool`, *optional*):
838
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
839
+ """
840
+
841
+
842
+ # Copied from transformers.models.altclip.modeling_altclip.AltCLIPEncoder with AltCLIP->Siglip
843
+ class SiglipEncoder(nn.Module):
844
+ """
845
+ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
846
+ [`SiglipEncoderLayer`].
847
+
848
+ Args:
849
+ config: SiglipConfig
850
+ """
851
+
852
+ def __init__(self, config: SiglipConfig):
853
+ super().__init__()
854
+ self.config = config
855
+ self.layers = nn.ModuleList([SiglipEncoderLayer(config) for _ in range(config.num_hidden_layers)])
856
+ self.gradient_checkpointing = False
857
+
858
+ # Ignore copy
859
+ def forward(
860
+ self,
861
+ inputs_embeds,
862
+ attention_mask: Optional[torch.Tensor] = None,
863
+ output_attentions: Optional[bool] = None,
864
+ output_hidden_states: Optional[bool] = None,
865
+ return_dict: Optional[bool] = None,
866
+ ) -> Union[Tuple, BaseModelOutput]:
867
+ r"""
868
+ Args:
869
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
870
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
871
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
872
+ than the model's internal embedding lookup matrix.
873
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
874
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
875
+
876
+ - 1 for tokens that are **not masked**,
877
+ - 0 for tokens that are **masked**.
878
+
879
+ [What are attention masks?](../glossary#attention-mask)
880
+ output_attentions (`bool`, *optional*):
881
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
882
+ returned tensors for more detail.
883
+ output_hidden_states (`bool`, *optional*):
884
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
885
+ for more detail.
886
+ return_dict (`bool`, *optional*):
887
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
888
+ """
889
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
890
+ output_hidden_states = (
891
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
892
+ )
893
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
894
+
895
+ encoder_states = () if output_hidden_states else None
896
+ all_attentions = () if output_attentions else None
897
+
898
+ hidden_states = inputs_embeds
899
+ for encoder_layer in self.layers:
900
+ if output_hidden_states:
901
+ encoder_states = encoder_states + (hidden_states,)
902
+ if self.gradient_checkpointing and self.training:
903
+ layer_outputs = self._gradient_checkpointing_func(
904
+ encoder_layer.__call__,
905
+ hidden_states,
906
+ attention_mask,
907
+ output_attentions,
908
+ )
909
+ else:
910
+ layer_outputs = encoder_layer(
911
+ hidden_states,
912
+ attention_mask,
913
+ output_attentions=output_attentions,
914
+ )
915
+
916
+ hidden_states = layer_outputs[0]
917
+
918
+ if output_attentions:
919
+ all_attentions = all_attentions + (layer_outputs[1],)
920
+
921
+ if output_hidden_states:
922
+ encoder_states = encoder_states + (hidden_states,)
923
+
924
+ if not return_dict:
925
+ return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
926
+ return BaseModelOutput(
927
+ last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
928
+ )
929
+
930
+
931
+ class SiglipTextTransformer(nn.Module):
932
+ def __init__(self, config: SiglipTextConfig):
933
+ super().__init__()
934
+ self.config = config
935
+ embed_dim = config.hidden_size
936
+ self.embeddings = SiglipTextEmbeddings(config)
937
+ self.encoder = SiglipEncoder(config)
938
+ self.final_layer_norm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
939
+
940
+ self.head = nn.Linear(embed_dim, embed_dim)
941
+ self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2"
942
+
943
+ @add_start_docstrings_to_model_forward(SIGLIP_TEXT_INPUTS_DOCSTRING)
944
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=SiglipTextConfig)
945
+ def forward(
946
+ self,
947
+ input_ids: Optional[torch.Tensor] = None,
948
+ attention_mask: Optional[torch.Tensor] = None,
949
+ position_ids: Optional[torch.Tensor] = None,
950
+ output_attentions: Optional[bool] = None,
951
+ output_hidden_states: Optional[bool] = None,
952
+ return_dict: Optional[bool] = None,
953
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
954
+ r"""
955
+ Returns:
956
+
957
+ """
958
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
959
+ output_hidden_states = (
960
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
961
+ )
962
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
963
+
964
+ if input_ids is None:
965
+ raise ValueError("You have to specify input_ids")
966
+
967
+ input_shape = input_ids.size()
968
+ input_ids = input_ids.view(-1, input_shape[-1])
969
+
970
+ hidden_states = self.embeddings(input_ids=input_ids, position_ids=position_ids)
971
+
972
+ # note: SigLIP's text model does not use a causal mask, unlike the original CLIP model.
973
+ # expand attention_mask
974
+ if attention_mask is not None and not self._use_flash_attention_2:
975
+ # [batch_size, seq_len] -> [batch_size, 1, tgt_seq_len, src_seq_len]
976
+ attention_mask = _prepare_4d_attention_mask(attention_mask, hidden_states.dtype)
977
+
978
+ encoder_outputs = self.encoder(
979
+ inputs_embeds=hidden_states,
980
+ attention_mask=attention_mask,
981
+ output_attentions=output_attentions,
982
+ output_hidden_states=output_hidden_states,
983
+ return_dict=return_dict,
984
+ )
985
+
986
+ last_hidden_state = encoder_outputs[0]
987
+ last_hidden_state = self.final_layer_norm(last_hidden_state)
988
+
989
+ # Assuming "sticky" EOS tokenization, last token is always EOS.
990
+ pooled_output = last_hidden_state[:, -1, :]
991
+ pooled_output = self.head(pooled_output)
992
+
993
+ if not return_dict:
994
+ return (last_hidden_state, pooled_output) + encoder_outputs[1:]
995
+
996
+ return BaseModelOutputWithPooling(
997
+ last_hidden_state=last_hidden_state,
998
+ pooler_output=pooled_output,
999
+ hidden_states=encoder_outputs.hidden_states,
1000
+ attentions=encoder_outputs.attentions,
1001
+ )
1002
+
1003
+
1004
+ @add_start_docstrings(
1005
+ """The text model from SigLIP without any head or projection on top.""",
1006
+ SIGLIP_START_DOCSTRING,
1007
+ )
1008
+ class SiglipTextModel(SiglipPreTrainedModel):
1009
+ config_class = SiglipTextConfig
1010
+
1011
+ def __init__(self, config: SiglipTextConfig):
1012
+ super().__init__(config)
1013
+ self.text_model = SiglipTextTransformer(config)
1014
+ # Initialize weights and apply final processing
1015
+ self.post_init()
1016
+
1017
+ def get_input_embeddings(self) -> nn.Module:
1018
+ return self.text_model.embeddings.token_embedding
1019
+
1020
+ def set_input_embeddings(self, value):
1021
+ self.text_model.embeddings.token_embedding = value
1022
+
1023
+ @add_start_docstrings_to_model_forward(SIGLIP_TEXT_INPUTS_DOCSTRING)
1024
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=SiglipTextConfig)
1025
+ def forward(
1026
+ self,
1027
+ input_ids: Optional[torch.Tensor] = None,
1028
+ attention_mask: Optional[torch.Tensor] = None,
1029
+ position_ids: Optional[torch.Tensor] = None,
1030
+ output_attentions: Optional[bool] = None,
1031
+ output_hidden_states: Optional[bool] = None,
1032
+ return_dict: Optional[bool] = None,
1033
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
1034
+ r"""
1035
+ Returns:
1036
+
1037
+ Examples:
1038
+
1039
+ ```python
1040
+ >>> from transformers import AutoTokenizer, SiglipTextModel
1041
+
1042
+ >>> model = SiglipTextModel.from_pretrained("google/siglip-base-patch16-224")
1043
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/siglip-base-patch16-224")
1044
+
1045
+ >>> # important: make sure to set padding="max_length" as that's how the model was trained
1046
+ >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding="max_length", return_tensors="pt")
1047
+
1048
+ >>> outputs = model(**inputs)
1049
+ >>> last_hidden_state = outputs.last_hidden_state
1050
+ >>> pooled_output = outputs.pooler_output # pooled (EOS token) states
1051
+ ```"""
1052
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1053
+
1054
+ return self.text_model(
1055
+ input_ids=input_ids,
1056
+ attention_mask=attention_mask,
1057
+ position_ids=position_ids,
1058
+ output_attentions=output_attentions,
1059
+ output_hidden_states=output_hidden_states,
1060
+ return_dict=return_dict,
1061
+ )
1062
+
1063
+
1064
+ class SiglipVisionTransformer(nn.Module):
1065
+ def __init__(self, config: SiglipVisionConfig):
1066
+ super().__init__()
1067
+ self.config = config
1068
+ embed_dim = config.hidden_size
1069
+
1070
+ self.embeddings = SiglipVisionEmbeddings(config)
1071
+ self.encoder = SiglipEncoder(config)
1072
+ self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
1073
+ self.use_head = True if not hasattr(config, "vision_use_head") else config.vision_use_head
1074
+ if self.use_head:
1075
+ self.head = SiglipMultiheadAttentionPoolingHead(config)
1076
+
1077
+ @add_start_docstrings_to_model_forward(SIGLIP_VISION_INPUTS_DOCSTRING)
1078
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=SiglipVisionConfig)
1079
+ def forward(
1080
+ self,
1081
+ pixel_values,
1082
+ output_attentions: Optional[bool] = None,
1083
+ output_hidden_states: Optional[bool] = None,
1084
+ return_dict: Optional[bool] = None,
1085
+ interpolate_pos_encoding: Optional[bool] = False,
1086
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
1087
+ r"""
1088
+ Returns:
1089
+
1090
+ """
1091
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1092
+ output_hidden_states = (
1093
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1094
+ )
1095
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1096
+
1097
+ hidden_states = self.embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding)
1098
+
1099
+ encoder_outputs = self.encoder(
1100
+ inputs_embeds=hidden_states,
1101
+ output_attentions=output_attentions,
1102
+ output_hidden_states=output_hidden_states,
1103
+ return_dict=return_dict,
1104
+ )
1105
+
1106
+ last_hidden_state = encoder_outputs[0]
1107
+ last_hidden_state = self.post_layernorm(last_hidden_state)
1108
+
1109
+ pooler_output = self.head(last_hidden_state) if self.use_head else None
1110
+ if not return_dict:
1111
+ return (last_hidden_state, pooler_output) + encoder_outputs[1:]
1112
+
1113
+ return BaseModelOutputWithPooling(
1114
+ last_hidden_state=last_hidden_state,
1115
+ pooler_output=pooler_output,
1116
+ hidden_states=encoder_outputs.hidden_states,
1117
+ attentions=encoder_outputs.attentions,
1118
+ )
1119
+
1120
+
1121
+ class SiglipMultiheadAttentionPoolingHead(nn.Module):
1122
+ """Multihead Attention Pooling."""
1123
+
1124
+ def __init__(self, config: SiglipVisionConfig):
1125
+ super().__init__()
1126
+
1127
+ self.probe = nn.Parameter(torch.randn(1, 1, config.hidden_size))
1128
+ self.attention = torch.nn.MultiheadAttention(config.hidden_size, config.num_attention_heads, batch_first=True)
1129
+ self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
1130
+ self.mlp = SiglipMLP(config)
1131
+
1132
+ def forward(self, hidden_state):
1133
+ batch_size = hidden_state.shape[0]
1134
+ probe = self.probe.repeat(batch_size, 1, 1)
1135
+
1136
+ hidden_state = self.attention(probe, hidden_state, hidden_state)[0]
1137
+
1138
+ residual = hidden_state
1139
+ hidden_state = self.layernorm(hidden_state)
1140
+ hidden_state = residual + self.mlp(hidden_state)
1141
+
1142
+ return hidden_state[:, 0]
1143
+
1144
+
1145
+ @add_start_docstrings(
1146
+ """The vision model from SigLIP without any head or projection on top.""",
1147
+ SIGLIP_START_DOCSTRING,
1148
+ )
1149
+ class SiglipVisionModel(SiglipPreTrainedModel):
1150
+ config_class = SiglipVisionConfig
1151
+ main_input_name = "pixel_values"
1152
+
1153
+ def __init__(self, config: SiglipVisionConfig):
1154
+ super().__init__(config)
1155
+
1156
+ self.vision_model = SiglipVisionTransformer(config)
1157
+
1158
+ # Initialize weights and apply final processing
1159
+ self.post_init()
1160
+
1161
+ def get_input_embeddings(self) -> nn.Module:
1162
+ return self.vision_model.embeddings.patch_embedding
1163
+
1164
+ @add_start_docstrings_to_model_forward(SIGLIP_VISION_INPUTS_DOCSTRING)
1165
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=SiglipVisionConfig)
1166
+ def forward(
1167
+ self,
1168
+ pixel_values,
1169
+ output_attentions: Optional[bool] = None,
1170
+ output_hidden_states: Optional[bool] = None,
1171
+ return_dict: Optional[bool] = None,
1172
+ interpolate_pos_encoding: bool = False,
1173
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
1174
+ r"""
1175
+ Returns:
1176
+
1177
+ Examples:
1178
+
1179
+ ```python
1180
+ >>> from PIL import Image
1181
+ >>> import requests
1182
+ >>> from transformers import AutoProcessor, SiglipVisionModel
1183
+
1184
+ >>> model = SiglipVisionModel.from_pretrained("google/siglip-base-patch16-224")
1185
+ >>> processor = AutoProcessor.from_pretrained("google/siglip-base-patch16-224")
1186
+
1187
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1188
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1189
+
1190
+ >>> inputs = processor(images=image, return_tensors="pt")
1191
+
1192
+ >>> outputs = model(**inputs)
1193
+ >>> last_hidden_state = outputs.last_hidden_state
1194
+ >>> pooled_output = outputs.pooler_output # pooled features
1195
+ ```"""
1196
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1197
+
1198
+ return self.vision_model(
1199
+ pixel_values=pixel_values,
1200
+ output_attentions=output_attentions,
1201
+ output_hidden_states=output_hidden_states,
1202
+ return_dict=return_dict,
1203
+ interpolate_pos_encoding=interpolate_pos_encoding,
1204
+ )
1205
+
1206
+
1207
+ @add_start_docstrings(SIGLIP_START_DOCSTRING)
1208
+ class SiglipModel(SiglipPreTrainedModel):
1209
+ config_class = SiglipConfig
1210
+
1211
+ def __init__(self, config: SiglipConfig):
1212
+ super().__init__(config)
1213
+
1214
+ if not isinstance(config.text_config, SiglipTextConfig):
1215
+ raise TypeError(
1216
+ "config.text_config is expected to be of type SiglipTextConfig but is of type"
1217
+ f" {type(config.text_config)}."
1218
+ )
1219
+
1220
+ if not isinstance(config.vision_config, SiglipVisionConfig):
1221
+ raise TypeError(
1222
+ "config.vision_config is expected to be of type SiglipVisionConfig but is of type"
1223
+ f" {type(config.vision_config)}."
1224
+ )
1225
+
1226
+ text_config = config.text_config
1227
+ vision_config = config.vision_config
1228
+
1229
+ # First, initialize the text and vision models with proper attention implementation
1230
+ text_model = SiglipTextModel._from_config(text_config)
1231
+ vision_model = SiglipVisionModel._from_config(vision_config)
1232
+
1233
+ # Second, get the text and vision submodules (for backward compatibility)
1234
+ self.text_model = text_model.text_model
1235
+ self.vision_model = vision_model.vision_model
1236
+
1237
+ self.logit_scale = nn.Parameter(torch.randn(1))
1238
+ self.logit_bias = nn.Parameter(torch.randn(1))
1239
+
1240
+ # Initialize weights and apply final processing
1241
+ self.post_init()
1242
+
1243
+ @add_start_docstrings_to_model_forward(SIGLIP_TEXT_INPUTS_DOCSTRING)
1244
+ def get_text_features(
1245
+ self,
1246
+ input_ids: Optional[torch.Tensor] = None,
1247
+ attention_mask: Optional[torch.Tensor] = None,
1248
+ position_ids: Optional[torch.Tensor] = None,
1249
+ output_attentions: Optional[bool] = None,
1250
+ output_hidden_states: Optional[bool] = None,
1251
+ return_dict: Optional[bool] = None,
1252
+ ) -> torch.FloatTensor:
1253
+ r"""
1254
+ Returns:
1255
+ text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by
1256
+ applying the projection layer to the pooled output of [`SiglipTextModel`].
1257
+
1258
+ Examples:
1259
+
1260
+ ```python
1261
+ >>> from transformers import AutoTokenizer, AutoModel
1262
+ >>> import torch
1263
+
1264
+ >>> model = AutoModel.from_pretrained("google/siglip-base-patch16-224")
1265
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/siglip-base-patch16-224")
1266
+
1267
+ >>> # important: make sure to set padding="max_length" as that's how the model was trained
1268
+ >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding="max_length", return_tensors="pt")
1269
+ >>> with torch.no_grad():
1270
+ ... text_features = model.get_text_features(**inputs)
1271
+ ```"""
1272
+ # Use SigLIP model's config for some fields (if specified) instead of those of vision & text components.
1273
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1274
+ output_hidden_states = (
1275
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1276
+ )
1277
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1278
+
1279
+ text_outputs = self.text_model(
1280
+ input_ids=input_ids,
1281
+ attention_mask=attention_mask,
1282
+ position_ids=position_ids,
1283
+ output_attentions=output_attentions,
1284
+ output_hidden_states=output_hidden_states,
1285
+ return_dict=return_dict,
1286
+ )
1287
+
1288
+ pooled_output = text_outputs[1]
1289
+
1290
+ return pooled_output
1291
+
1292
+ @add_start_docstrings_to_model_forward(SIGLIP_VISION_INPUTS_DOCSTRING)
1293
+ def get_image_features(
1294
+ self,
1295
+ pixel_values: Optional[torch.FloatTensor] = None,
1296
+ output_attentions: Optional[bool] = None,
1297
+ output_hidden_states: Optional[bool] = None,
1298
+ return_dict: Optional[bool] = None,
1299
+ interpolate_pos_encoding: bool = False,
1300
+ ) -> torch.FloatTensor:
1301
+ r"""
1302
+ Returns:
1303
+ image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by
1304
+ applying the projection layer to the pooled output of [`SiglipVisionModel`].
1305
+
1306
+ Examples:
1307
+
1308
+ ```python
1309
+ >>> from PIL import Image
1310
+ >>> import requests
1311
+ >>> from transformers import AutoProcessor, AutoModel
1312
+ >>> import torch
1313
+
1314
+ >>> model = AutoModel.from_pretrained("google/siglip-base-patch16-224")
1315
+ >>> processor = AutoProcessor.from_pretrained("google/siglip-base-patch16-224")
1316
+
1317
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1318
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1319
+
1320
+ >>> inputs = processor(images=image, return_tensors="pt")
1321
+
1322
+ >>> with torch.no_grad():
1323
+ ... image_features = model.get_image_features(**inputs)
1324
+ ```"""
1325
+ # Use SiglipModel's config for some fields (if specified) instead of those of vision & text components.
1326
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1327
+ output_hidden_states = (
1328
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1329
+ )
1330
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1331
+
1332
+ vision_outputs = self.vision_model(
1333
+ pixel_values=pixel_values,
1334
+ output_attentions=output_attentions,
1335
+ output_hidden_states=output_hidden_states,
1336
+ return_dict=return_dict,
1337
+ interpolate_pos_encoding=interpolate_pos_encoding,
1338
+ )
1339
+
1340
+ pooled_output = vision_outputs[1]
1341
+
1342
+ return pooled_output
1343
+
1344
+ @add_start_docstrings_to_model_forward(SIGLIP_INPUTS_DOCSTRING)
1345
+ @replace_return_docstrings(output_type=SiglipOutput, config_class=SiglipConfig)
1346
+ def forward(
1347
+ self,
1348
+ input_ids: Optional[torch.LongTensor] = None,
1349
+ pixel_values: Optional[torch.FloatTensor] = None,
1350
+ attention_mask: Optional[torch.Tensor] = None,
1351
+ position_ids: Optional[torch.LongTensor] = None,
1352
+ return_loss: Optional[bool] = None,
1353
+ output_attentions: Optional[bool] = None,
1354
+ output_hidden_states: Optional[bool] = None,
1355
+ return_dict: Optional[bool] = None,
1356
+ interpolate_pos_encoding: bool = False,
1357
+ ) -> Union[Tuple, SiglipOutput]:
1358
+ r"""
1359
+ Returns:
1360
+
1361
+ Examples:
1362
+
1363
+ ```python
1364
+ >>> from PIL import Image
1365
+ >>> import requests
1366
+ >>> from transformers import AutoProcessor, AutoModel
1367
+ >>> import torch
1368
+
1369
+ >>> model = AutoModel.from_pretrained("google/siglip-base-patch16-224")
1370
+ >>> processor = AutoProcessor.from_pretrained("google/siglip-base-patch16-224")
1371
+
1372
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1373
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1374
+
1375
+ >>> texts = ["a photo of 2 cats", "a photo of 2 dogs"]
1376
+ >>> # important: we pass `padding=max_length` since the model was trained with this
1377
+ >>> inputs = processor(text=texts, images=image, padding="max_length", return_tensors="pt")
1378
+
1379
+ >>> with torch.no_grad():
1380
+ ... outputs = model(**inputs)
1381
+
1382
+ >>> logits_per_image = outputs.logits_per_image
1383
+ >>> probs = torch.sigmoid(logits_per_image) # these are the probabilities
1384
+ >>> print(f"{probs[0][0]:.1%} that image 0 is '{texts[0]}'")
1385
+ 31.9% that image 0 is 'a photo of 2 cats'
1386
+ ```"""
1387
+ # Use SigLIP model's config for some fields (if specified) instead of those of vision & text components.
1388
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1389
+ output_hidden_states = (
1390
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1391
+ )
1392
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1393
+
1394
+ vision_outputs = self.vision_model(
1395
+ pixel_values=pixel_values,
1396
+ output_attentions=output_attentions,
1397
+ output_hidden_states=output_hidden_states,
1398
+ return_dict=return_dict,
1399
+ interpolate_pos_encoding=interpolate_pos_encoding,
1400
+ )
1401
+
1402
+ text_outputs = self.text_model(
1403
+ input_ids=input_ids,
1404
+ attention_mask=attention_mask,
1405
+ position_ids=position_ids,
1406
+ output_attentions=output_attentions,
1407
+ output_hidden_states=output_hidden_states,
1408
+ return_dict=return_dict,
1409
+ )
1410
+
1411
+ image_embeds = vision_outputs[1]
1412
+ text_embeds = text_outputs[1]
1413
+
1414
+ # normalized features
1415
+ image_embeds = image_embeds / image_embeds.norm(p=2, dim=-1, keepdim=True)
1416
+ text_embeds = text_embeds / text_embeds.norm(p=2, dim=-1, keepdim=True)
1417
+
1418
+ # cosine similarity as logits
1419
+ logits_per_text = (
1420
+ torch.matmul(text_embeds, image_embeds.t().to(text_embeds.device)) * self.logit_scale.exp()
1421
+ + self.logit_bias
1422
+ )
1423
+ logits_per_image = logits_per_text.t()
1424
+
1425
+ loss = None
1426
+ if return_loss:
1427
+ # Adapted from https://github.com/google-research/big_vision/blob/01edb81a4716f93a48be43b3a4af14e29cdb3a7f/big_vision/trainers/proj/image_text/siglip.py#L287
1428
+ eye = torch.eye(logits_per_text.size(0), device=logits_per_text.device)
1429
+ m1_diag1 = -torch.ones_like(logits_per_text) + 2 * eye
1430
+ loglik = torch.nn.functional.logsigmoid(m1_diag1 * logits_per_text)
1431
+ nll = -torch.sum(loglik, dim=-1)
1432
+ loss = nll.mean()
1433
+
1434
+ if not return_dict:
1435
+ output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs)
1436
+ return ((loss,) + output) if loss is not None else output
1437
+
1438
+ return SiglipOutput(
1439
+ loss=loss,
1440
+ logits_per_image=logits_per_image,
1441
+ logits_per_text=logits_per_text,
1442
+ text_embeds=text_embeds,
1443
+ image_embeds=image_embeds,
1444
+ text_model_output=text_outputs,
1445
+ vision_model_output=vision_outputs,
1446
+ )
1447
+
1448
+
1449
+ @add_start_docstrings(
1450
+ """
1451
+ SigLIP vision encoder with an image classification head on top (a linear layer on top of the pooled final hidden states of
1452
+ the patch tokens) e.g. for ImageNet.
1453
+ """,
1454
+ SIGLIP_START_DOCSTRING,
1455
+ )
1456
+ class SiglipForImageClassification(SiglipPreTrainedModel):
1457
+ main_input_name = "pixel_values"
1458
+
1459
+ def __init__(self, config: SiglipConfig) -> None:
1460
+ super().__init__(config)
1461
+
1462
+ self.num_labels = config.num_labels
1463
+
1464
+ # Create the vision model with proper attention
1465
+ # and take only vision_model submodule (for backward compatibility)
1466
+ vision_model = SiglipVisionModel._from_config(config.vision_config)
1467
+ self.vision_model = vision_model.vision_model
1468
+
1469
+ # Classifier head
1470
+ self.classifier = (
1471
+ nn.Linear(config.vision_config.hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity()
1472
+ )
1473
+
1474
+ # Initialize weights and apply final processing
1475
+ self.post_init()
1476
+
1477
+ @add_start_docstrings_to_model_forward(SIGLIP_INPUTS_DOCSTRING)
1478
+ @replace_return_docstrings(output_type=ImageClassifierOutput, config_class=_CONFIG_FOR_DOC)
1479
+ def forward(
1480
+ self,
1481
+ pixel_values: Optional[torch.Tensor] = None,
1482
+ labels: Optional[torch.Tensor] = None,
1483
+ output_attentions: Optional[bool] = None,
1484
+ output_hidden_states: Optional[bool] = None,
1485
+ return_dict: Optional[bool] = None,
1486
+ interpolate_pos_encoding: bool = False,
1487
+ ) -> Union[tuple, ImageClassifierOutput]:
1488
+ r"""
1489
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1490
+ Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
1491
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1492
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1493
+
1494
+ Returns:
1495
+
1496
+ Examples:
1497
+
1498
+ ```python
1499
+ >>> from transformers import AutoImageProcessor, SiglipForImageClassification
1500
+ >>> import torch
1501
+ >>> from PIL import Image
1502
+ >>> import requests
1503
+
1504
+ >>> torch.manual_seed(3) # doctest: +IGNORE_RESULT
1505
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1506
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1507
+
1508
+ >>> # note: we are loading a `SiglipModel` from the hub here,
1509
+ >>> # so the head will be randomly initialized, hence the predictions will be random if seed is not set above.
1510
+ >>> image_processor = AutoImageProcessor.from_pretrained("google/siglip-base-patch16-224")
1511
+ >>> model = SiglipForImageClassification.from_pretrained("google/siglip-base-patch16-224")
1512
+
1513
+ >>> inputs = image_processor(images=image, return_tensors="pt")
1514
+ >>> outputs = model(**inputs)
1515
+ >>> logits = outputs.logits
1516
+ >>> # model predicts one of the two classes
1517
+ >>> predicted_class_idx = logits.argmax(-1).item()
1518
+ >>> print("Predicted class:", model.config.id2label[predicted_class_idx])
1519
+ Predicted class: LABEL_1
1520
+ ```"""
1521
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1522
+ output_hidden_states = (
1523
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1524
+ )
1525
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1526
+
1527
+ outputs = self.vision_model(
1528
+ pixel_values,
1529
+ output_attentions=output_attentions,
1530
+ output_hidden_states=output_hidden_states,
1531
+ return_dict=return_dict,
1532
+ interpolate_pos_encoding=interpolate_pos_encoding,
1533
+ )
1534
+
1535
+ sequence_output = outputs[0]
1536
+
1537
+ # average pool the patch tokens
1538
+ sequence_output = torch.mean(sequence_output, dim=1)
1539
+ # apply classifier
1540
+ logits = self.classifier(sequence_output)
1541
+
1542
+ loss = None
1543
+ if labels is not None:
1544
+ # move labels to correct device to enable model parallelism
1545
+ labels = labels.to(logits.device)
1546
+ if self.config.problem_type is None:
1547
+ if self.num_labels == 1:
1548
+ self.config.problem_type = "regression"
1549
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1550
+ self.config.problem_type = "single_label_classification"
1551
+ else:
1552
+ self.config.problem_type = "multi_label_classification"
1553
+
1554
+ if self.config.problem_type == "regression":
1555
+ loss_fct = MSELoss()
1556
+ if self.num_labels == 1:
1557
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
1558
+ else:
1559
+ loss = loss_fct(logits, labels)
1560
+ elif self.config.problem_type == "single_label_classification":
1561
+ loss_fct = CrossEntropyLoss()
1562
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1563
+ elif self.config.problem_type == "multi_label_classification":
1564
+ loss_fct = BCEWithLogitsLoss()
1565
+ loss = loss_fct(logits, labels)
1566
+
1567
+ if not return_dict:
1568
+ output = (logits,) + outputs[2:]
1569
+ return ((loss,) + output) if loss is not None else output
1570
+
1571
+ return ImageClassifierOutput(
1572
+ loss=loss,
1573
+ logits=logits,
1574
+ hidden_states=outputs.hidden_states,
1575
+ attentions=outputs.attentions,
1576
+ )
1577
+
1578
+
1579
+ __all__ = [
1580
+ "SiglipModel",
1581
+ "SiglipPreTrainedModel",
1582
+ "SiglipTextModel",
1583
+ "SiglipVisionModel",
1584
+ "SiglipForImageClassification",
1585
+ ]
preprocessor_config.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_convert_rgb": true,
3
+ "do_normalize": true,
4
+ "do_rescale": true,
5
+ "do_resize": true,
6
+ "image_mean": [
7
+ 0.5,
8
+ 0.5,
9
+ 0.5
10
+ ],
11
+ "image_processor_type": "SiglipImageProcessor",
12
+ "image_std": [
13
+ 0.5,
14
+ 0.5,
15
+ 0.5
16
+ ],
17
+ "processor_class": "SiglipProcessor",
18
+ "resample": 3,
19
+ "rescale_factor": 0.00392156862745098,
20
+ "size": {
21
+ "height": 384,
22
+ "width": 384
23
+ }
24
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ {
4
+ "content": "<|endofchunk|>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false
9
+ },
10
+ {
11
+ "content": "<s>",
12
+ "lstrip": false,
13
+ "normalized": false,
14
+ "rstrip": false,
15
+ "single_word": false
16
+ },
17
+ {
18
+ "content": "<|pad|>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ ],
25
+ "bos_token": "<s>",
26
+ "eos_token": {
27
+ "content": "<|im_end|>",
28
+ "lstrip": false,
29
+ "normalized": false,
30
+ "rstrip": false,
31
+ "single_word": false
32
+ },
33
+ "pad_token": "<|pad|>"
34
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:da321834704c685bd2e638824b76f3beb841556558a6df5b8a43bf2ad31f7b94
3
+ size 11423209
tokenizer_config.json ADDED
@@ -0,0 +1,253 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "128245": {
6
+ "content": "<s>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "151643": {
14
+ "content": "<|endoftext|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "151644": {
22
+ "content": "<|im_start|>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "151645": {
30
+ "content": "<|im_end|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "151646": {
38
+ "content": "<|object_ref_start|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "151647": {
46
+ "content": "<|object_ref_end|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "151648": {
54
+ "content": "<|box_start|>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "151649": {
62
+ "content": "<|box_end|>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "151650": {
70
+ "content": "<|quad_start|>",
71
+ "lstrip": false,
72
+ "normalized": false,
73
+ "rstrip": false,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "151651": {
78
+ "content": "<|quad_end|>",
79
+ "lstrip": false,
80
+ "normalized": false,
81
+ "rstrip": false,
82
+ "single_word": false,
83
+ "special": true
84
+ },
85
+ "151652": {
86
+ "content": "<|vision_start|>",
87
+ "lstrip": false,
88
+ "normalized": false,
89
+ "rstrip": false,
90
+ "single_word": false,
91
+ "special": true
92
+ },
93
+ "151653": {
94
+ "content": "<|vision_end|>",
95
+ "lstrip": false,
96
+ "normalized": false,
97
+ "rstrip": false,
98
+ "single_word": false,
99
+ "special": true
100
+ },
101
+ "151654": {
102
+ "content": "<|vision_pad|>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false,
107
+ "special": true
108
+ },
109
+ "151655": {
110
+ "content": "<|image_pad|>",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": false,
114
+ "single_word": false,
115
+ "special": true
116
+ },
117
+ "151656": {
118
+ "content": "<|video_pad|>",
119
+ "lstrip": false,
120
+ "normalized": false,
121
+ "rstrip": false,
122
+ "single_word": false,
123
+ "special": true
124
+ },
125
+ "151657": {
126
+ "content": "<tool_call>",
127
+ "lstrip": false,
128
+ "normalized": false,
129
+ "rstrip": false,
130
+ "single_word": false,
131
+ "special": false
132
+ },
133
+ "151658": {
134
+ "content": "</tool_call>",
135
+ "lstrip": false,
136
+ "normalized": false,
137
+ "rstrip": false,
138
+ "single_word": false,
139
+ "special": false
140
+ },
141
+ "151659": {
142
+ "content": "<|fim_prefix|>",
143
+ "lstrip": false,
144
+ "normalized": false,
145
+ "rstrip": false,
146
+ "single_word": false,
147
+ "special": false
148
+ },
149
+ "151660": {
150
+ "content": "<|fim_middle|>",
151
+ "lstrip": false,
152
+ "normalized": false,
153
+ "rstrip": false,
154
+ "single_word": false,
155
+ "special": false
156
+ },
157
+ "151661": {
158
+ "content": "<|fim_suffix|>",
159
+ "lstrip": false,
160
+ "normalized": false,
161
+ "rstrip": false,
162
+ "single_word": false,
163
+ "special": false
164
+ },
165
+ "151662": {
166
+ "content": "<|fim_pad|>",
167
+ "lstrip": false,
168
+ "normalized": false,
169
+ "rstrip": false,
170
+ "single_word": false,
171
+ "special": false
172
+ },
173
+ "151663": {
174
+ "content": "<|repo_name|>",
175
+ "lstrip": false,
176
+ "normalized": false,
177
+ "rstrip": false,
178
+ "single_word": false,
179
+ "special": false
180
+ },
181
+ "151664": {
182
+ "content": "<|file_sep|>",
183
+ "lstrip": false,
184
+ "normalized": false,
185
+ "rstrip": false,
186
+ "single_word": false,
187
+ "special": false
188
+ },
189
+ "151665": {
190
+ "content": "<tool_response>",
191
+ "lstrip": false,
192
+ "normalized": false,
193
+ "rstrip": false,
194
+ "single_word": false,
195
+ "special": false
196
+ },
197
+ "151666": {
198
+ "content": "</tool_response>",
199
+ "lstrip": false,
200
+ "normalized": false,
201
+ "rstrip": false,
202
+ "single_word": false,
203
+ "special": false
204
+ },
205
+ "151667": {
206
+ "content": "<think>",
207
+ "lstrip": false,
208
+ "normalized": false,
209
+ "rstrip": false,
210
+ "single_word": false,
211
+ "special": false
212
+ },
213
+ "151668": {
214
+ "content": "</think>",
215
+ "lstrip": false,
216
+ "normalized": false,
217
+ "rstrip": false,
218
+ "single_word": false,
219
+ "special": false
220
+ },
221
+ "151669": {
222
+ "content": "<|endofchunk|>",
223
+ "lstrip": false,
224
+ "normalized": false,
225
+ "rstrip": false,
226
+ "single_word": false,
227
+ "special": true
228
+ },
229
+ "151670": {
230
+ "content": "<|pad|>",
231
+ "lstrip": false,
232
+ "normalized": false,
233
+ "rstrip": false,
234
+ "single_word": false,
235
+ "special": true
236
+ }
237
+ },
238
+ "additional_special_tokens": [
239
+ "<|endofchunk|>",
240
+ "<s>",
241
+ "<|pad|>"
242
+ ],
243
+ "bos_token": "<s>",
244
+ "clean_up_tokenization_spaces": false,
245
+ "eos_token": "<|im_end|>",
246
+ "errors": "replace",
247
+ "extra_special_tokens": {},
248
+ "model_max_length": 131072,
249
+ "pad_token": "<|pad|>",
250
+ "split_special_tokens": false,
251
+ "tokenizer_class": "Qwen2Tokenizer",
252
+ "unk_token": null
253
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff