Jackbrocp commited on
Commit
d2ab27b
·
verified ·
1 Parent(s): 91f1273

Upload folder using huggingface_hub

Browse files
added_tokens.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</box>": 92552,
3
+ "</img>": 92545,
4
+ "</quad>": 92548,
5
+ "</ref>": 92550,
6
+ "<IMG_CONTEXT>": 92546,
7
+ "<box>": 92551,
8
+ "<img>": 92544,
9
+ "<quad>": 92547,
10
+ "<ref>": 92549
11
+ }
configuration_intern_vit.py ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # --------------------------------------------------------
2
+ # InternVL
3
+ # Copyright (c) 2024 OpenGVLab
4
+ # Licensed under The MIT License [see LICENSE for details]
5
+ # --------------------------------------------------------
6
+ import os
7
+ from typing import Union
8
+
9
+ from transformers.configuration_utils import PretrainedConfig
10
+ from transformers.utils import logging
11
+
12
+ logger = logging.get_logger(__name__)
13
+
14
+
15
+ class InternVisionConfig(PretrainedConfig):
16
+ r"""
17
+ This is the configuration class to store the configuration of a [`InternVisionModel`]. It is used to
18
+ instantiate a vision encoder according to the specified arguments, defining the model architecture.
19
+
20
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
21
+ documentation from [`PretrainedConfig`] for more information.
22
+
23
+ Args:
24
+ num_channels (`int`, *optional*, defaults to 3):
25
+ Number of color channels in the input images (e.g., 3 for RGB).
26
+ patch_size (`int`, *optional*, defaults to 14):
27
+ The size (resolution) of each patch.
28
+ image_size (`int`, *optional*, defaults to 224):
29
+ The size (resolution) of each image.
30
+ qkv_bias (`bool`, *optional*, defaults to `False`):
31
+ Whether to add a bias to the queries and values in the self-attention layers.
32
+ hidden_size (`int`, *optional*, defaults to 3200):
33
+ Dimensionality of the encoder layers and the pooler layer.
34
+ num_attention_heads (`int`, *optional*, defaults to 25):
35
+ Number of attention heads for each attention layer in the Transformer encoder.
36
+ intermediate_size (`int`, *optional*, defaults to 12800):
37
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
38
+ qk_normalization (`bool`, *optional*, defaults to `True`):
39
+ Whether to normalize the queries and keys in the self-attention layers.
40
+ num_hidden_layers (`int`, *optional*, defaults to 48):
41
+ Number of hidden layers in the Transformer encoder.
42
+ use_flash_attn (`bool`, *optional*, defaults to `True`):
43
+ Whether to use flash attention mechanism.
44
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
45
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
46
+ `"relu"`, `"selu"` and `"gelu_new"` ``"gelu"` are supported.
47
+ layer_norm_eps (`float`, *optional*, defaults to 1e-6):
48
+ The epsilon used by the layer normalization layers.
49
+ dropout (`float`, *optional*, defaults to 0.0):
50
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
51
+ drop_path_rate (`float`, *optional*, defaults to 0.0):
52
+ Dropout rate for stochastic depth.
53
+ attention_dropout (`float`, *optional*, defaults to 0.0):
54
+ The dropout ratio for the attention probabilities.
55
+ initializer_range (`float`, *optional*, defaults to 0.02):
56
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
57
+ initializer_factor (`float`, *optional*, defaults to 0.1):
58
+ A factor for layer scale.
59
+ """
60
+
61
+ model_type = 'intern_vit_6b'
62
+
63
+ def __init__(
64
+ self,
65
+ num_channels=3,
66
+ patch_size=14,
67
+ image_size=224,
68
+ qkv_bias=False,
69
+ hidden_size=3200,
70
+ num_attention_heads=25,
71
+ intermediate_size=12800,
72
+ qk_normalization=True,
73
+ num_hidden_layers=48,
74
+ use_flash_attn=True,
75
+ hidden_act='gelu',
76
+ norm_type='rms_norm',
77
+ layer_norm_eps=1e-6,
78
+ dropout=0.0,
79
+ drop_path_rate=0.0,
80
+ attention_dropout=0.0,
81
+ initializer_range=0.02,
82
+ initializer_factor=0.1,
83
+ **kwargs,
84
+ ):
85
+ super().__init__(**kwargs)
86
+
87
+ self.hidden_size = hidden_size
88
+ self.intermediate_size = intermediate_size
89
+ self.dropout = dropout
90
+ self.drop_path_rate = drop_path_rate
91
+ self.num_hidden_layers = num_hidden_layers
92
+ self.num_attention_heads = num_attention_heads
93
+ self.num_channels = num_channels
94
+ self.patch_size = patch_size
95
+ self.image_size = image_size
96
+ self.initializer_range = initializer_range
97
+ self.initializer_factor = initializer_factor
98
+ self.attention_dropout = attention_dropout
99
+ self.layer_norm_eps = layer_norm_eps
100
+ self.hidden_act = hidden_act
101
+ self.norm_type = norm_type
102
+ self.qkv_bias = qkv_bias
103
+ self.qk_normalization = qk_normalization
104
+ self.use_flash_attn = use_flash_attn
105
+
106
+ @classmethod
107
+ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> 'PretrainedConfig':
108
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
109
+
110
+ if 'vision_config' in config_dict:
111
+ config_dict = config_dict['vision_config']
112
+
113
+ if 'model_type' in config_dict and hasattr(cls, 'model_type') and config_dict['model_type'] != cls.model_type:
114
+ logger.warning(
115
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
116
+ f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.'
117
+ )
118
+
119
+ return cls.from_dict(config_dict, **kwargs)
configuration_internlm2.py ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) The InternLM team and The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # This code is based on transformers/src/transformers/models/llama/configuration_llama.py
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """ InternLM2 model configuration"""
17
+
18
+ from transformers.configuration_utils import PretrainedConfig
19
+ from transformers.utils import logging
20
+
21
+ logger = logging.get_logger(__name__)
22
+
23
+ INTERNLM2_PRETRAINED_CONFIG_ARCHIVE_MAP = {}
24
+
25
+
26
+ # Modified from transformers.model.llama.configuration_llama.LlamaConfig
27
+ class InternLM2Config(PretrainedConfig):
28
+ r"""
29
+ This is the configuration class to store the configuration of a [`InternLM2Model`]. It is used to instantiate
30
+ an InternLM2 model according to the specified arguments, defining the model architecture. Instantiating a
31
+ configuration with the defaults will yield a similar configuration to that of the InternLM2-7B.
32
+
33
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
34
+ documentation from [`PretrainedConfig`] for more information.
35
+
36
+
37
+ Args:
38
+ vocab_size (`int`, *optional*, defaults to 32000):
39
+ Vocabulary size of the InternLM2 model. Defines the number of different tokens that can be represented by the
40
+ `inputs_ids` passed when calling [`InternLM2Model`]
41
+ hidden_size (`int`, *optional*, defaults to 4096):
42
+ Dimension of the hidden representations.
43
+ intermediate_size (`int`, *optional*, defaults to 11008):
44
+ Dimension of the MLP representations.
45
+ num_hidden_layers (`int`, *optional*, defaults to 32):
46
+ Number of hidden layers in the Transformer encoder.
47
+ num_attention_heads (`int`, *optional*, defaults to 32):
48
+ Number of attention heads for each attention layer in the Transformer encoder.
49
+ num_key_value_heads (`int`, *optional*):
50
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
51
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
52
+ `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
53
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
54
+ by meanpooling all the original heads within that group. For more details checkout [this
55
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
56
+ `num_attention_heads`.
57
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
58
+ The non-linear activation function (function or string) in the decoder.
59
+ max_position_embeddings (`int`, *optional*, defaults to 2048):
60
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
61
+ just in case (e.g., 512 or 1024 or 2048).
62
+ initializer_range (`float`, *optional*, defaults to 0.02):
63
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
64
+ rms_norm_eps (`float`, *optional*, defaults to 1e-12):
65
+ The epsilon used by the rms normalization layers.
66
+ use_cache (`bool`, *optional*, defaults to `True`):
67
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
68
+ relevant if `config.is_decoder=True`.
69
+ tie_word_embeddings(`bool`, *optional*, defaults to `False`):
70
+ Whether to tie weight embeddings
71
+ Example:
72
+
73
+ """
74
+ model_type = 'internlm2'
75
+ _auto_class = 'AutoConfig'
76
+
77
+ def __init__( # pylint: disable=W0102
78
+ self,
79
+ vocab_size=103168,
80
+ hidden_size=4096,
81
+ intermediate_size=11008,
82
+ num_hidden_layers=32,
83
+ num_attention_heads=32,
84
+ num_key_value_heads=None,
85
+ hidden_act='silu',
86
+ max_position_embeddings=2048,
87
+ initializer_range=0.02,
88
+ rms_norm_eps=1e-6,
89
+ use_cache=True,
90
+ pad_token_id=0,
91
+ bos_token_id=1,
92
+ eos_token_id=2,
93
+ tie_word_embeddings=False,
94
+ bias=True,
95
+ rope_theta=10000,
96
+ rope_scaling=None,
97
+ attn_implementation='eager',
98
+ **kwargs,
99
+ ):
100
+ self.vocab_size = vocab_size
101
+ self.max_position_embeddings = max_position_embeddings
102
+ self.hidden_size = hidden_size
103
+ self.intermediate_size = intermediate_size
104
+ self.num_hidden_layers = num_hidden_layers
105
+ self.num_attention_heads = num_attention_heads
106
+ self.bias = bias
107
+
108
+ if num_key_value_heads is None:
109
+ num_key_value_heads = num_attention_heads
110
+ self.num_key_value_heads = num_key_value_heads
111
+
112
+ self.hidden_act = hidden_act
113
+ self.initializer_range = initializer_range
114
+ self.rms_norm_eps = rms_norm_eps
115
+ self.use_cache = use_cache
116
+ self.rope_theta = rope_theta
117
+ self.rope_scaling = rope_scaling
118
+ self._rope_scaling_validation()
119
+
120
+ self.attn_implementation = attn_implementation
121
+ if self.attn_implementation is None:
122
+ self.attn_implementation = 'eager'
123
+ super().__init__(
124
+ pad_token_id=pad_token_id,
125
+ bos_token_id=bos_token_id,
126
+ eos_token_id=eos_token_id,
127
+ tie_word_embeddings=tie_word_embeddings,
128
+ **kwargs,
129
+ )
130
+
131
+ def _rope_scaling_validation(self):
132
+ """
133
+ Validate the `rope_scaling` configuration.
134
+ """
135
+ if self.rope_scaling is None:
136
+ return
137
+
138
+ if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:
139
+ raise ValueError(
140
+ '`rope_scaling` must be a dictionary with with two fields, `type` and `factor`, '
141
+ f'got {self.rope_scaling}'
142
+ )
143
+ rope_scaling_type = self.rope_scaling.get('type', None)
144
+ rope_scaling_factor = self.rope_scaling.get('factor', None)
145
+ if rope_scaling_type is None or rope_scaling_type not in ['linear', 'dynamic']:
146
+ raise ValueError(
147
+ f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}"
148
+ )
149
+ if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor < 1.0:
150
+ raise ValueError(f"`rope_scaling`'s factor field must be a float >= 1, got {rope_scaling_factor}")
configuration_internvl_chat.py ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # --------------------------------------------------------
2
+ # InternVL
3
+ # Copyright (c) 2024 OpenGVLab
4
+ # Licensed under The MIT License [see LICENSE for details]
5
+ # --------------------------------------------------------
6
+
7
+ import copy
8
+
9
+ from transformers import AutoConfig, LlamaConfig
10
+ from transformers.configuration_utils import PretrainedConfig
11
+ from transformers.utils import logging
12
+
13
+ from .configuration_intern_vit import InternVisionConfig
14
+ from .configuration_internlm2 import InternLM2Config
15
+
16
+ logger = logging.get_logger(__name__)
17
+
18
+
19
+ class InternVLChatConfig(PretrainedConfig):
20
+ model_type = 'internvl_chat'
21
+ is_composition = True
22
+
23
+ def __init__(
24
+ self,
25
+ vision_config=None,
26
+ llm_config=None,
27
+ use_backbone_lora=0,
28
+ use_llm_lora=0,
29
+ select_layer=-1,
30
+ force_image_size=None,
31
+ downsample_ratio=0.5,
32
+ template=None,
33
+ dynamic_image_size=False,
34
+ use_thumbnail=False,
35
+ ps_version='v1',
36
+ min_dynamic_patch=1,
37
+ max_dynamic_patch=6,
38
+ **kwargs):
39
+ super().__init__(**kwargs)
40
+
41
+ if vision_config is None:
42
+ vision_config = {}
43
+ logger.info('vision_config is None. Initializing the InternVisionConfig with default values.')
44
+
45
+ if llm_config is None:
46
+ llm_config = {}
47
+ logger.info('llm_config is None. Initializing the LlamaConfig config with default values (`LlamaConfig`).')
48
+
49
+ self.vision_config = InternVisionConfig(**vision_config)
50
+ if llm_config['architectures'][0] == 'LlamaForCausalLM':
51
+ self.llm_config = LlamaConfig(**llm_config)
52
+ elif llm_config['architectures'][0] == 'InternLM2ForCausalLM':
53
+ self.llm_config = InternLM2Config(**llm_config)
54
+ else:
55
+ raise ValueError('Unsupported architecture: {}'.format(llm_config['architectures'][0]))
56
+ self.use_backbone_lora = use_backbone_lora
57
+ self.use_llm_lora = use_llm_lora
58
+ self.select_layer = select_layer
59
+ self.force_image_size = force_image_size
60
+ self.downsample_ratio = downsample_ratio
61
+ self.template = template
62
+ self.dynamic_image_size = dynamic_image_size
63
+ self.use_thumbnail = use_thumbnail
64
+ self.ps_version = ps_version # pixel shuffle version
65
+ self.min_dynamic_patch = min_dynamic_patch
66
+ self.max_dynamic_patch = max_dynamic_patch
67
+
68
+ logger.info(f'vision_select_layer: {self.select_layer}')
69
+ logger.info(f'ps_version: {self.ps_version}')
70
+ logger.info(f'min_dynamic_patch: {self.min_dynamic_patch}')
71
+ logger.info(f'max_dynamic_patch: {self.max_dynamic_patch}')
72
+
73
+ def to_dict(self):
74
+ """
75
+ Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`].
76
+
77
+ Returns:
78
+ `Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,
79
+ """
80
+ output = copy.deepcopy(self.__dict__)
81
+ output['vision_config'] = self.vision_config.to_dict()
82
+ output['llm_config'] = self.llm_config.to_dict()
83
+ output['model_type'] = self.__class__.model_type
84
+ output['use_backbone_lora'] = self.use_backbone_lora
85
+ output['use_llm_lora'] = self.use_llm_lora
86
+ output['select_layer'] = self.select_layer
87
+ output['force_image_size'] = self.force_image_size
88
+ output['downsample_ratio'] = self.downsample_ratio
89
+ output['template'] = self.template
90
+ output['dynamic_image_size'] = self.dynamic_image_size
91
+ output['use_thumbnail'] = self.use_thumbnail
92
+ output['ps_version'] = self.ps_version
93
+ output['min_dynamic_patch'] = self.min_dynamic_patch
94
+ output['max_dynamic_patch'] = self.max_dynamic_patch
95
+
96
+ return output
generation_config.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "transformers_version": "4.39.3"
4
+ }
model-00001-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0ffa49c03299ed3c3fb1aa86e04f26b511f38047356473ad72d5d42ec2c3503f
3
+ size 4939944336
model-00002-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5088b9dba4d87bd022e35ab00cdc40d2f15e4ad9c61e44375f523f2f331ce883
3
+ size 4915914584
model-00003-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d761b20908afb4fdbdbe9c0f74ba2f533eb85755308e0cfc478156e3ba27a12d
3
+ size 4915914592
model-00004-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4404e9def49d54a79a21cf21daa87245d4b4ae6612e78a5c770efc541f36eb0e
3
+ size 1379026920
model.safetensors.index.json ADDED
@@ -0,0 +1,580 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 16150730752
4
+ },
5
+ "weight_map": {
6
+ "language_model.model.layers.0.attention.wo.weight": "model-00001-of-00004.safetensors",
7
+ "language_model.model.layers.0.attention.wqkv.weight": "model-00001-of-00004.safetensors",
8
+ "language_model.model.layers.0.attention_norm.weight": "model-00001-of-00004.safetensors",
9
+ "language_model.model.layers.0.feed_forward.w1.weight": "model-00001-of-00004.safetensors",
10
+ "language_model.model.layers.0.feed_forward.w2.weight": "model-00001-of-00004.safetensors",
11
+ "language_model.model.layers.0.feed_forward.w3.weight": "model-00001-of-00004.safetensors",
12
+ "language_model.model.layers.0.ffn_norm.weight": "model-00001-of-00004.safetensors",
13
+ "language_model.model.layers.1.attention.wo.weight": "model-00001-of-00004.safetensors",
14
+ "language_model.model.layers.1.attention.wqkv.weight": "model-00001-of-00004.safetensors",
15
+ "language_model.model.layers.1.attention_norm.weight": "model-00001-of-00004.safetensors",
16
+ "language_model.model.layers.1.feed_forward.w1.weight": "model-00001-of-00004.safetensors",
17
+ "language_model.model.layers.1.feed_forward.w2.weight": "model-00001-of-00004.safetensors",
18
+ "language_model.model.layers.1.feed_forward.w3.weight": "model-00001-of-00004.safetensors",
19
+ "language_model.model.layers.1.ffn_norm.weight": "model-00001-of-00004.safetensors",
20
+ "language_model.model.layers.10.attention.wo.weight": "model-00002-of-00004.safetensors",
21
+ "language_model.model.layers.10.attention.wqkv.weight": "model-00002-of-00004.safetensors",
22
+ "language_model.model.layers.10.attention_norm.weight": "model-00002-of-00004.safetensors",
23
+ "language_model.model.layers.10.feed_forward.w1.weight": "model-00002-of-00004.safetensors",
24
+ "language_model.model.layers.10.feed_forward.w2.weight": "model-00002-of-00004.safetensors",
25
+ "language_model.model.layers.10.feed_forward.w3.weight": "model-00002-of-00004.safetensors",
26
+ "language_model.model.layers.10.ffn_norm.weight": "model-00002-of-00004.safetensors",
27
+ "language_model.model.layers.11.attention.wo.weight": "model-00002-of-00004.safetensors",
28
+ "language_model.model.layers.11.attention.wqkv.weight": "model-00002-of-00004.safetensors",
29
+ "language_model.model.layers.11.attention_norm.weight": "model-00002-of-00004.safetensors",
30
+ "language_model.model.layers.11.feed_forward.w1.weight": "model-00002-of-00004.safetensors",
31
+ "language_model.model.layers.11.feed_forward.w2.weight": "model-00002-of-00004.safetensors",
32
+ "language_model.model.layers.11.feed_forward.w3.weight": "model-00002-of-00004.safetensors",
33
+ "language_model.model.layers.11.ffn_norm.weight": "model-00002-of-00004.safetensors",
34
+ "language_model.model.layers.12.attention.wo.weight": "model-00002-of-00004.safetensors",
35
+ "language_model.model.layers.12.attention.wqkv.weight": "model-00002-of-00004.safetensors",
36
+ "language_model.model.layers.12.attention_norm.weight": "model-00002-of-00004.safetensors",
37
+ "language_model.model.layers.12.feed_forward.w1.weight": "model-00002-of-00004.safetensors",
38
+ "language_model.model.layers.12.feed_forward.w2.weight": "model-00002-of-00004.safetensors",
39
+ "language_model.model.layers.12.feed_forward.w3.weight": "model-00002-of-00004.safetensors",
40
+ "language_model.model.layers.12.ffn_norm.weight": "model-00002-of-00004.safetensors",
41
+ "language_model.model.layers.13.attention.wo.weight": "model-00002-of-00004.safetensors",
42
+ "language_model.model.layers.13.attention.wqkv.weight": "model-00002-of-00004.safetensors",
43
+ "language_model.model.layers.13.attention_norm.weight": "model-00002-of-00004.safetensors",
44
+ "language_model.model.layers.13.feed_forward.w1.weight": "model-00002-of-00004.safetensors",
45
+ "language_model.model.layers.13.feed_forward.w2.weight": "model-00002-of-00004.safetensors",
46
+ "language_model.model.layers.13.feed_forward.w3.weight": "model-00002-of-00004.safetensors",
47
+ "language_model.model.layers.13.ffn_norm.weight": "model-00002-of-00004.safetensors",
48
+ "language_model.model.layers.14.attention.wo.weight": "model-00002-of-00004.safetensors",
49
+ "language_model.model.layers.14.attention.wqkv.weight": "model-00002-of-00004.safetensors",
50
+ "language_model.model.layers.14.attention_norm.weight": "model-00002-of-00004.safetensors",
51
+ "language_model.model.layers.14.feed_forward.w1.weight": "model-00002-of-00004.safetensors",
52
+ "language_model.model.layers.14.feed_forward.w2.weight": "model-00002-of-00004.safetensors",
53
+ "language_model.model.layers.14.feed_forward.w3.weight": "model-00002-of-00004.safetensors",
54
+ "language_model.model.layers.14.ffn_norm.weight": "model-00002-of-00004.safetensors",
55
+ "language_model.model.layers.15.attention.wo.weight": "model-00002-of-00004.safetensors",
56
+ "language_model.model.layers.15.attention.wqkv.weight": "model-00002-of-00004.safetensors",
57
+ "language_model.model.layers.15.attention_norm.weight": "model-00002-of-00004.safetensors",
58
+ "language_model.model.layers.15.feed_forward.w1.weight": "model-00002-of-00004.safetensors",
59
+ "language_model.model.layers.15.feed_forward.w2.weight": "model-00002-of-00004.safetensors",
60
+ "language_model.model.layers.15.feed_forward.w3.weight": "model-00002-of-00004.safetensors",
61
+ "language_model.model.layers.15.ffn_norm.weight": "model-00002-of-00004.safetensors",
62
+ "language_model.model.layers.16.attention.wo.weight": "model-00002-of-00004.safetensors",
63
+ "language_model.model.layers.16.attention.wqkv.weight": "model-00002-of-00004.safetensors",
64
+ "language_model.model.layers.16.attention_norm.weight": "model-00002-of-00004.safetensors",
65
+ "language_model.model.layers.16.feed_forward.w1.weight": "model-00002-of-00004.safetensors",
66
+ "language_model.model.layers.16.feed_forward.w2.weight": "model-00002-of-00004.safetensors",
67
+ "language_model.model.layers.16.feed_forward.w3.weight": "model-00002-of-00004.safetensors",
68
+ "language_model.model.layers.16.ffn_norm.weight": "model-00002-of-00004.safetensors",
69
+ "language_model.model.layers.17.attention.wo.weight": "model-00002-of-00004.safetensors",
70
+ "language_model.model.layers.17.attention.wqkv.weight": "model-00002-of-00004.safetensors",
71
+ "language_model.model.layers.17.attention_norm.weight": "model-00002-of-00004.safetensors",
72
+ "language_model.model.layers.17.feed_forward.w1.weight": "model-00002-of-00004.safetensors",
73
+ "language_model.model.layers.17.feed_forward.w2.weight": "model-00002-of-00004.safetensors",
74
+ "language_model.model.layers.17.feed_forward.w3.weight": "model-00002-of-00004.safetensors",
75
+ "language_model.model.layers.17.ffn_norm.weight": "model-00002-of-00004.safetensors",
76
+ "language_model.model.layers.18.attention.wo.weight": "model-00002-of-00004.safetensors",
77
+ "language_model.model.layers.18.attention.wqkv.weight": "model-00002-of-00004.safetensors",
78
+ "language_model.model.layers.18.attention_norm.weight": "model-00002-of-00004.safetensors",
79
+ "language_model.model.layers.18.feed_forward.w1.weight": "model-00002-of-00004.safetensors",
80
+ "language_model.model.layers.18.feed_forward.w2.weight": "model-00002-of-00004.safetensors",
81
+ "language_model.model.layers.18.feed_forward.w3.weight": "model-00002-of-00004.safetensors",
82
+ "language_model.model.layers.18.ffn_norm.weight": "model-00002-of-00004.safetensors",
83
+ "language_model.model.layers.19.attention.wo.weight": "model-00002-of-00004.safetensors",
84
+ "language_model.model.layers.19.attention.wqkv.weight": "model-00002-of-00004.safetensors",
85
+ "language_model.model.layers.19.attention_norm.weight": "model-00003-of-00004.safetensors",
86
+ "language_model.model.layers.19.feed_forward.w1.weight": "model-00002-of-00004.safetensors",
87
+ "language_model.model.layers.19.feed_forward.w2.weight": "model-00003-of-00004.safetensors",
88
+ "language_model.model.layers.19.feed_forward.w3.weight": "model-00003-of-00004.safetensors",
89
+ "language_model.model.layers.19.ffn_norm.weight": "model-00003-of-00004.safetensors",
90
+ "language_model.model.layers.2.attention.wo.weight": "model-00001-of-00004.safetensors",
91
+ "language_model.model.layers.2.attention.wqkv.weight": "model-00001-of-00004.safetensors",
92
+ "language_model.model.layers.2.attention_norm.weight": "model-00001-of-00004.safetensors",
93
+ "language_model.model.layers.2.feed_forward.w1.weight": "model-00001-of-00004.safetensors",
94
+ "language_model.model.layers.2.feed_forward.w2.weight": "model-00001-of-00004.safetensors",
95
+ "language_model.model.layers.2.feed_forward.w3.weight": "model-00001-of-00004.safetensors",
96
+ "language_model.model.layers.2.ffn_norm.weight": "model-00001-of-00004.safetensors",
97
+ "language_model.model.layers.20.attention.wo.weight": "model-00003-of-00004.safetensors",
98
+ "language_model.model.layers.20.attention.wqkv.weight": "model-00003-of-00004.safetensors",
99
+ "language_model.model.layers.20.attention_norm.weight": "model-00003-of-00004.safetensors",
100
+ "language_model.model.layers.20.feed_forward.w1.weight": "model-00003-of-00004.safetensors",
101
+ "language_model.model.layers.20.feed_forward.w2.weight": "model-00003-of-00004.safetensors",
102
+ "language_model.model.layers.20.feed_forward.w3.weight": "model-00003-of-00004.safetensors",
103
+ "language_model.model.layers.20.ffn_norm.weight": "model-00003-of-00004.safetensors",
104
+ "language_model.model.layers.21.attention.wo.weight": "model-00003-of-00004.safetensors",
105
+ "language_model.model.layers.21.attention.wqkv.weight": "model-00003-of-00004.safetensors",
106
+ "language_model.model.layers.21.attention_norm.weight": "model-00003-of-00004.safetensors",
107
+ "language_model.model.layers.21.feed_forward.w1.weight": "model-00003-of-00004.safetensors",
108
+ "language_model.model.layers.21.feed_forward.w2.weight": "model-00003-of-00004.safetensors",
109
+ "language_model.model.layers.21.feed_forward.w3.weight": "model-00003-of-00004.safetensors",
110
+ "language_model.model.layers.21.ffn_norm.weight": "model-00003-of-00004.safetensors",
111
+ "language_model.model.layers.22.attention.wo.weight": "model-00003-of-00004.safetensors",
112
+ "language_model.model.layers.22.attention.wqkv.weight": "model-00003-of-00004.safetensors",
113
+ "language_model.model.layers.22.attention_norm.weight": "model-00003-of-00004.safetensors",
114
+ "language_model.model.layers.22.feed_forward.w1.weight": "model-00003-of-00004.safetensors",
115
+ "language_model.model.layers.22.feed_forward.w2.weight": "model-00003-of-00004.safetensors",
116
+ "language_model.model.layers.22.feed_forward.w3.weight": "model-00003-of-00004.safetensors",
117
+ "language_model.model.layers.22.ffn_norm.weight": "model-00003-of-00004.safetensors",
118
+ "language_model.model.layers.23.attention.wo.weight": "model-00003-of-00004.safetensors",
119
+ "language_model.model.layers.23.attention.wqkv.weight": "model-00003-of-00004.safetensors",
120
+ "language_model.model.layers.23.attention_norm.weight": "model-00003-of-00004.safetensors",
121
+ "language_model.model.layers.23.feed_forward.w1.weight": "model-00003-of-00004.safetensors",
122
+ "language_model.model.layers.23.feed_forward.w2.weight": "model-00003-of-00004.safetensors",
123
+ "language_model.model.layers.23.feed_forward.w3.weight": "model-00003-of-00004.safetensors",
124
+ "language_model.model.layers.23.ffn_norm.weight": "model-00003-of-00004.safetensors",
125
+ "language_model.model.layers.24.attention.wo.weight": "model-00003-of-00004.safetensors",
126
+ "language_model.model.layers.24.attention.wqkv.weight": "model-00003-of-00004.safetensors",
127
+ "language_model.model.layers.24.attention_norm.weight": "model-00003-of-00004.safetensors",
128
+ "language_model.model.layers.24.feed_forward.w1.weight": "model-00003-of-00004.safetensors",
129
+ "language_model.model.layers.24.feed_forward.w2.weight": "model-00003-of-00004.safetensors",
130
+ "language_model.model.layers.24.feed_forward.w3.weight": "model-00003-of-00004.safetensors",
131
+ "language_model.model.layers.24.ffn_norm.weight": "model-00003-of-00004.safetensors",
132
+ "language_model.model.layers.25.attention.wo.weight": "model-00003-of-00004.safetensors",
133
+ "language_model.model.layers.25.attention.wqkv.weight": "model-00003-of-00004.safetensors",
134
+ "language_model.model.layers.25.attention_norm.weight": "model-00003-of-00004.safetensors",
135
+ "language_model.model.layers.25.feed_forward.w1.weight": "model-00003-of-00004.safetensors",
136
+ "language_model.model.layers.25.feed_forward.w2.weight": "model-00003-of-00004.safetensors",
137
+ "language_model.model.layers.25.feed_forward.w3.weight": "model-00003-of-00004.safetensors",
138
+ "language_model.model.layers.25.ffn_norm.weight": "model-00003-of-00004.safetensors",
139
+ "language_model.model.layers.26.attention.wo.weight": "model-00003-of-00004.safetensors",
140
+ "language_model.model.layers.26.attention.wqkv.weight": "model-00003-of-00004.safetensors",
141
+ "language_model.model.layers.26.attention_norm.weight": "model-00003-of-00004.safetensors",
142
+ "language_model.model.layers.26.feed_forward.w1.weight": "model-00003-of-00004.safetensors",
143
+ "language_model.model.layers.26.feed_forward.w2.weight": "model-00003-of-00004.safetensors",
144
+ "language_model.model.layers.26.feed_forward.w3.weight": "model-00003-of-00004.safetensors",
145
+ "language_model.model.layers.26.ffn_norm.weight": "model-00003-of-00004.safetensors",
146
+ "language_model.model.layers.27.attention.wo.weight": "model-00003-of-00004.safetensors",
147
+ "language_model.model.layers.27.attention.wqkv.weight": "model-00003-of-00004.safetensors",
148
+ "language_model.model.layers.27.attention_norm.weight": "model-00003-of-00004.safetensors",
149
+ "language_model.model.layers.27.feed_forward.w1.weight": "model-00003-of-00004.safetensors",
150
+ "language_model.model.layers.27.feed_forward.w2.weight": "model-00003-of-00004.safetensors",
151
+ "language_model.model.layers.27.feed_forward.w3.weight": "model-00003-of-00004.safetensors",
152
+ "language_model.model.layers.27.ffn_norm.weight": "model-00003-of-00004.safetensors",
153
+ "language_model.model.layers.28.attention.wo.weight": "model-00003-of-00004.safetensors",
154
+ "language_model.model.layers.28.attention.wqkv.weight": "model-00003-of-00004.safetensors",
155
+ "language_model.model.layers.28.attention_norm.weight": "model-00003-of-00004.safetensors",
156
+ "language_model.model.layers.28.feed_forward.w1.weight": "model-00003-of-00004.safetensors",
157
+ "language_model.model.layers.28.feed_forward.w2.weight": "model-00003-of-00004.safetensors",
158
+ "language_model.model.layers.28.feed_forward.w3.weight": "model-00003-of-00004.safetensors",
159
+ "language_model.model.layers.28.ffn_norm.weight": "model-00003-of-00004.safetensors",
160
+ "language_model.model.layers.29.attention.wo.weight": "model-00003-of-00004.safetensors",
161
+ "language_model.model.layers.29.attention.wqkv.weight": "model-00003-of-00004.safetensors",
162
+ "language_model.model.layers.29.attention_norm.weight": "model-00003-of-00004.safetensors",
163
+ "language_model.model.layers.29.feed_forward.w1.weight": "model-00003-of-00004.safetensors",
164
+ "language_model.model.layers.29.feed_forward.w2.weight": "model-00003-of-00004.safetensors",
165
+ "language_model.model.layers.29.feed_forward.w3.weight": "model-00003-of-00004.safetensors",
166
+ "language_model.model.layers.29.ffn_norm.weight": "model-00003-of-00004.safetensors",
167
+ "language_model.model.layers.3.attention.wo.weight": "model-00001-of-00004.safetensors",
168
+ "language_model.model.layers.3.attention.wqkv.weight": "model-00001-of-00004.safetensors",
169
+ "language_model.model.layers.3.attention_norm.weight": "model-00001-of-00004.safetensors",
170
+ "language_model.model.layers.3.feed_forward.w1.weight": "model-00001-of-00004.safetensors",
171
+ "language_model.model.layers.3.feed_forward.w2.weight": "model-00001-of-00004.safetensors",
172
+ "language_model.model.layers.3.feed_forward.w3.weight": "model-00001-of-00004.safetensors",
173
+ "language_model.model.layers.3.ffn_norm.weight": "model-00001-of-00004.safetensors",
174
+ "language_model.model.layers.30.attention.wo.weight": "model-00003-of-00004.safetensors",
175
+ "language_model.model.layers.30.attention.wqkv.weight": "model-00003-of-00004.safetensors",
176
+ "language_model.model.layers.30.attention_norm.weight": "model-00004-of-00004.safetensors",
177
+ "language_model.model.layers.30.feed_forward.w1.weight": "model-00003-of-00004.safetensors",
178
+ "language_model.model.layers.30.feed_forward.w2.weight": "model-00004-of-00004.safetensors",
179
+ "language_model.model.layers.30.feed_forward.w3.weight": "model-00003-of-00004.safetensors",
180
+ "language_model.model.layers.30.ffn_norm.weight": "model-00004-of-00004.safetensors",
181
+ "language_model.model.layers.31.attention.wo.weight": "model-00004-of-00004.safetensors",
182
+ "language_model.model.layers.31.attention.wqkv.weight": "model-00004-of-00004.safetensors",
183
+ "language_model.model.layers.31.attention_norm.weight": "model-00004-of-00004.safetensors",
184
+ "language_model.model.layers.31.feed_forward.w1.weight": "model-00004-of-00004.safetensors",
185
+ "language_model.model.layers.31.feed_forward.w2.weight": "model-00004-of-00004.safetensors",
186
+ "language_model.model.layers.31.feed_forward.w3.weight": "model-00004-of-00004.safetensors",
187
+ "language_model.model.layers.31.ffn_norm.weight": "model-00004-of-00004.safetensors",
188
+ "language_model.model.layers.4.attention.wo.weight": "model-00001-of-00004.safetensors",
189
+ "language_model.model.layers.4.attention.wqkv.weight": "model-00001-of-00004.safetensors",
190
+ "language_model.model.layers.4.attention_norm.weight": "model-00001-of-00004.safetensors",
191
+ "language_model.model.layers.4.feed_forward.w1.weight": "model-00001-of-00004.safetensors",
192
+ "language_model.model.layers.4.feed_forward.w2.weight": "model-00001-of-00004.safetensors",
193
+ "language_model.model.layers.4.feed_forward.w3.weight": "model-00001-of-00004.safetensors",
194
+ "language_model.model.layers.4.ffn_norm.weight": "model-00001-of-00004.safetensors",
195
+ "language_model.model.layers.5.attention.wo.weight": "model-00001-of-00004.safetensors",
196
+ "language_model.model.layers.5.attention.wqkv.weight": "model-00001-of-00004.safetensors",
197
+ "language_model.model.layers.5.attention_norm.weight": "model-00001-of-00004.safetensors",
198
+ "language_model.model.layers.5.feed_forward.w1.weight": "model-00001-of-00004.safetensors",
199
+ "language_model.model.layers.5.feed_forward.w2.weight": "model-00001-of-00004.safetensors",
200
+ "language_model.model.layers.5.feed_forward.w3.weight": "model-00001-of-00004.safetensors",
201
+ "language_model.model.layers.5.ffn_norm.weight": "model-00001-of-00004.safetensors",
202
+ "language_model.model.layers.6.attention.wo.weight": "model-00001-of-00004.safetensors",
203
+ "language_model.model.layers.6.attention.wqkv.weight": "model-00001-of-00004.safetensors",
204
+ "language_model.model.layers.6.attention_norm.weight": "model-00001-of-00004.safetensors",
205
+ "language_model.model.layers.6.feed_forward.w1.weight": "model-00001-of-00004.safetensors",
206
+ "language_model.model.layers.6.feed_forward.w2.weight": "model-00001-of-00004.safetensors",
207
+ "language_model.model.layers.6.feed_forward.w3.weight": "model-00001-of-00004.safetensors",
208
+ "language_model.model.layers.6.ffn_norm.weight": "model-00001-of-00004.safetensors",
209
+ "language_model.model.layers.7.attention.wo.weight": "model-00001-of-00004.safetensors",
210
+ "language_model.model.layers.7.attention.wqkv.weight": "model-00001-of-00004.safetensors",
211
+ "language_model.model.layers.7.attention_norm.weight": "model-00001-of-00004.safetensors",
212
+ "language_model.model.layers.7.feed_forward.w1.weight": "model-00001-of-00004.safetensors",
213
+ "language_model.model.layers.7.feed_forward.w2.weight": "model-00001-of-00004.safetensors",
214
+ "language_model.model.layers.7.feed_forward.w3.weight": "model-00001-of-00004.safetensors",
215
+ "language_model.model.layers.7.ffn_norm.weight": "model-00001-of-00004.safetensors",
216
+ "language_model.model.layers.8.attention.wo.weight": "model-00001-of-00004.safetensors",
217
+ "language_model.model.layers.8.attention.wqkv.weight": "model-00001-of-00004.safetensors",
218
+ "language_model.model.layers.8.attention_norm.weight": "model-00002-of-00004.safetensors",
219
+ "language_model.model.layers.8.feed_forward.w1.weight": "model-00002-of-00004.safetensors",
220
+ "language_model.model.layers.8.feed_forward.w2.weight": "model-00002-of-00004.safetensors",
221
+ "language_model.model.layers.8.feed_forward.w3.weight": "model-00002-of-00004.safetensors",
222
+ "language_model.model.layers.8.ffn_norm.weight": "model-00002-of-00004.safetensors",
223
+ "language_model.model.layers.9.attention.wo.weight": "model-00002-of-00004.safetensors",
224
+ "language_model.model.layers.9.attention.wqkv.weight": "model-00002-of-00004.safetensors",
225
+ "language_model.model.layers.9.attention_norm.weight": "model-00002-of-00004.safetensors",
226
+ "language_model.model.layers.9.feed_forward.w1.weight": "model-00002-of-00004.safetensors",
227
+ "language_model.model.layers.9.feed_forward.w2.weight": "model-00002-of-00004.safetensors",
228
+ "language_model.model.layers.9.feed_forward.w3.weight": "model-00002-of-00004.safetensors",
229
+ "language_model.model.layers.9.ffn_norm.weight": "model-00002-of-00004.safetensors",
230
+ "language_model.model.norm.weight": "model-00004-of-00004.safetensors",
231
+ "language_model.model.tok_embeddings.weight": "model-00001-of-00004.safetensors",
232
+ "language_model.output.weight": "model-00004-of-00004.safetensors",
233
+ "mlp1.0.bias": "model-00004-of-00004.safetensors",
234
+ "mlp1.0.weight": "model-00004-of-00004.safetensors",
235
+ "mlp1.1.bias": "model-00004-of-00004.safetensors",
236
+ "mlp1.1.weight": "model-00004-of-00004.safetensors",
237
+ "mlp1.3.bias": "model-00004-of-00004.safetensors",
238
+ "mlp1.3.weight": "model-00004-of-00004.safetensors",
239
+ "vision_model.embeddings.class_embedding": "model-00001-of-00004.safetensors",
240
+ "vision_model.embeddings.patch_embedding.bias": "model-00001-of-00004.safetensors",
241
+ "vision_model.embeddings.patch_embedding.weight": "model-00001-of-00004.safetensors",
242
+ "vision_model.embeddings.position_embedding": "model-00001-of-00004.safetensors",
243
+ "vision_model.encoder.layers.0.attn.proj.bias": "model-00001-of-00004.safetensors",
244
+ "vision_model.encoder.layers.0.attn.proj.weight": "model-00001-of-00004.safetensors",
245
+ "vision_model.encoder.layers.0.attn.qkv.bias": "model-00001-of-00004.safetensors",
246
+ "vision_model.encoder.layers.0.attn.qkv.weight": "model-00001-of-00004.safetensors",
247
+ "vision_model.encoder.layers.0.ls1": "model-00001-of-00004.safetensors",
248
+ "vision_model.encoder.layers.0.ls2": "model-00001-of-00004.safetensors",
249
+ "vision_model.encoder.layers.0.mlp.fc1.bias": "model-00001-of-00004.safetensors",
250
+ "vision_model.encoder.layers.0.mlp.fc1.weight": "model-00001-of-00004.safetensors",
251
+ "vision_model.encoder.layers.0.mlp.fc2.bias": "model-00001-of-00004.safetensors",
252
+ "vision_model.encoder.layers.0.mlp.fc2.weight": "model-00001-of-00004.safetensors",
253
+ "vision_model.encoder.layers.0.norm1.bias": "model-00001-of-00004.safetensors",
254
+ "vision_model.encoder.layers.0.norm1.weight": "model-00001-of-00004.safetensors",
255
+ "vision_model.encoder.layers.0.norm2.bias": "model-00001-of-00004.safetensors",
256
+ "vision_model.encoder.layers.0.norm2.weight": "model-00001-of-00004.safetensors",
257
+ "vision_model.encoder.layers.1.attn.proj.bias": "model-00001-of-00004.safetensors",
258
+ "vision_model.encoder.layers.1.attn.proj.weight": "model-00001-of-00004.safetensors",
259
+ "vision_model.encoder.layers.1.attn.qkv.bias": "model-00001-of-00004.safetensors",
260
+ "vision_model.encoder.layers.1.attn.qkv.weight": "model-00001-of-00004.safetensors",
261
+ "vision_model.encoder.layers.1.ls1": "model-00001-of-00004.safetensors",
262
+ "vision_model.encoder.layers.1.ls2": "model-00001-of-00004.safetensors",
263
+ "vision_model.encoder.layers.1.mlp.fc1.bias": "model-00001-of-00004.safetensors",
264
+ "vision_model.encoder.layers.1.mlp.fc1.weight": "model-00001-of-00004.safetensors",
265
+ "vision_model.encoder.layers.1.mlp.fc2.bias": "model-00001-of-00004.safetensors",
266
+ "vision_model.encoder.layers.1.mlp.fc2.weight": "model-00001-of-00004.safetensors",
267
+ "vision_model.encoder.layers.1.norm1.bias": "model-00001-of-00004.safetensors",
268
+ "vision_model.encoder.layers.1.norm1.weight": "model-00001-of-00004.safetensors",
269
+ "vision_model.encoder.layers.1.norm2.bias": "model-00001-of-00004.safetensors",
270
+ "vision_model.encoder.layers.1.norm2.weight": "model-00001-of-00004.safetensors",
271
+ "vision_model.encoder.layers.10.attn.proj.bias": "model-00001-of-00004.safetensors",
272
+ "vision_model.encoder.layers.10.attn.proj.weight": "model-00001-of-00004.safetensors",
273
+ "vision_model.encoder.layers.10.attn.qkv.bias": "model-00001-of-00004.safetensors",
274
+ "vision_model.encoder.layers.10.attn.qkv.weight": "model-00001-of-00004.safetensors",
275
+ "vision_model.encoder.layers.10.ls1": "model-00001-of-00004.safetensors",
276
+ "vision_model.encoder.layers.10.ls2": "model-00001-of-00004.safetensors",
277
+ "vision_model.encoder.layers.10.mlp.fc1.bias": "model-00001-of-00004.safetensors",
278
+ "vision_model.encoder.layers.10.mlp.fc1.weight": "model-00001-of-00004.safetensors",
279
+ "vision_model.encoder.layers.10.mlp.fc2.bias": "model-00001-of-00004.safetensors",
280
+ "vision_model.encoder.layers.10.mlp.fc2.weight": "model-00001-of-00004.safetensors",
281
+ "vision_model.encoder.layers.10.norm1.bias": "model-00001-of-00004.safetensors",
282
+ "vision_model.encoder.layers.10.norm1.weight": "model-00001-of-00004.safetensors",
283
+ "vision_model.encoder.layers.10.norm2.bias": "model-00001-of-00004.safetensors",
284
+ "vision_model.encoder.layers.10.norm2.weight": "model-00001-of-00004.safetensors",
285
+ "vision_model.encoder.layers.11.attn.proj.bias": "model-00001-of-00004.safetensors",
286
+ "vision_model.encoder.layers.11.attn.proj.weight": "model-00001-of-00004.safetensors",
287
+ "vision_model.encoder.layers.11.attn.qkv.bias": "model-00001-of-00004.safetensors",
288
+ "vision_model.encoder.layers.11.attn.qkv.weight": "model-00001-of-00004.safetensors",
289
+ "vision_model.encoder.layers.11.ls1": "model-00001-of-00004.safetensors",
290
+ "vision_model.encoder.layers.11.ls2": "model-00001-of-00004.safetensors",
291
+ "vision_model.encoder.layers.11.mlp.fc1.bias": "model-00001-of-00004.safetensors",
292
+ "vision_model.encoder.layers.11.mlp.fc1.weight": "model-00001-of-00004.safetensors",
293
+ "vision_model.encoder.layers.11.mlp.fc2.bias": "model-00001-of-00004.safetensors",
294
+ "vision_model.encoder.layers.11.mlp.fc2.weight": "model-00001-of-00004.safetensors",
295
+ "vision_model.encoder.layers.11.norm1.bias": "model-00001-of-00004.safetensors",
296
+ "vision_model.encoder.layers.11.norm1.weight": "model-00001-of-00004.safetensors",
297
+ "vision_model.encoder.layers.11.norm2.bias": "model-00001-of-00004.safetensors",
298
+ "vision_model.encoder.layers.11.norm2.weight": "model-00001-of-00004.safetensors",
299
+ "vision_model.encoder.layers.12.attn.proj.bias": "model-00001-of-00004.safetensors",
300
+ "vision_model.encoder.layers.12.attn.proj.weight": "model-00001-of-00004.safetensors",
301
+ "vision_model.encoder.layers.12.attn.qkv.bias": "model-00001-of-00004.safetensors",
302
+ "vision_model.encoder.layers.12.attn.qkv.weight": "model-00001-of-00004.safetensors",
303
+ "vision_model.encoder.layers.12.ls1": "model-00001-of-00004.safetensors",
304
+ "vision_model.encoder.layers.12.ls2": "model-00001-of-00004.safetensors",
305
+ "vision_model.encoder.layers.12.mlp.fc1.bias": "model-00001-of-00004.safetensors",
306
+ "vision_model.encoder.layers.12.mlp.fc1.weight": "model-00001-of-00004.safetensors",
307
+ "vision_model.encoder.layers.12.mlp.fc2.bias": "model-00001-of-00004.safetensors",
308
+ "vision_model.encoder.layers.12.mlp.fc2.weight": "model-00001-of-00004.safetensors",
309
+ "vision_model.encoder.layers.12.norm1.bias": "model-00001-of-00004.safetensors",
310
+ "vision_model.encoder.layers.12.norm1.weight": "model-00001-of-00004.safetensors",
311
+ "vision_model.encoder.layers.12.norm2.bias": "model-00001-of-00004.safetensors",
312
+ "vision_model.encoder.layers.12.norm2.weight": "model-00001-of-00004.safetensors",
313
+ "vision_model.encoder.layers.13.attn.proj.bias": "model-00001-of-00004.safetensors",
314
+ "vision_model.encoder.layers.13.attn.proj.weight": "model-00001-of-00004.safetensors",
315
+ "vision_model.encoder.layers.13.attn.qkv.bias": "model-00001-of-00004.safetensors",
316
+ "vision_model.encoder.layers.13.attn.qkv.weight": "model-00001-of-00004.safetensors",
317
+ "vision_model.encoder.layers.13.ls1": "model-00001-of-00004.safetensors",
318
+ "vision_model.encoder.layers.13.ls2": "model-00001-of-00004.safetensors",
319
+ "vision_model.encoder.layers.13.mlp.fc1.bias": "model-00001-of-00004.safetensors",
320
+ "vision_model.encoder.layers.13.mlp.fc1.weight": "model-00001-of-00004.safetensors",
321
+ "vision_model.encoder.layers.13.mlp.fc2.bias": "model-00001-of-00004.safetensors",
322
+ "vision_model.encoder.layers.13.mlp.fc2.weight": "model-00001-of-00004.safetensors",
323
+ "vision_model.encoder.layers.13.norm1.bias": "model-00001-of-00004.safetensors",
324
+ "vision_model.encoder.layers.13.norm1.weight": "model-00001-of-00004.safetensors",
325
+ "vision_model.encoder.layers.13.norm2.bias": "model-00001-of-00004.safetensors",
326
+ "vision_model.encoder.layers.13.norm2.weight": "model-00001-of-00004.safetensors",
327
+ "vision_model.encoder.layers.14.attn.proj.bias": "model-00001-of-00004.safetensors",
328
+ "vision_model.encoder.layers.14.attn.proj.weight": "model-00001-of-00004.safetensors",
329
+ "vision_model.encoder.layers.14.attn.qkv.bias": "model-00001-of-00004.safetensors",
330
+ "vision_model.encoder.layers.14.attn.qkv.weight": "model-00001-of-00004.safetensors",
331
+ "vision_model.encoder.layers.14.ls1": "model-00001-of-00004.safetensors",
332
+ "vision_model.encoder.layers.14.ls2": "model-00001-of-00004.safetensors",
333
+ "vision_model.encoder.layers.14.mlp.fc1.bias": "model-00001-of-00004.safetensors",
334
+ "vision_model.encoder.layers.14.mlp.fc1.weight": "model-00001-of-00004.safetensors",
335
+ "vision_model.encoder.layers.14.mlp.fc2.bias": "model-00001-of-00004.safetensors",
336
+ "vision_model.encoder.layers.14.mlp.fc2.weight": "model-00001-of-00004.safetensors",
337
+ "vision_model.encoder.layers.14.norm1.bias": "model-00001-of-00004.safetensors",
338
+ "vision_model.encoder.layers.14.norm1.weight": "model-00001-of-00004.safetensors",
339
+ "vision_model.encoder.layers.14.norm2.bias": "model-00001-of-00004.safetensors",
340
+ "vision_model.encoder.layers.14.norm2.weight": "model-00001-of-00004.safetensors",
341
+ "vision_model.encoder.layers.15.attn.proj.bias": "model-00001-of-00004.safetensors",
342
+ "vision_model.encoder.layers.15.attn.proj.weight": "model-00001-of-00004.safetensors",
343
+ "vision_model.encoder.layers.15.attn.qkv.bias": "model-00001-of-00004.safetensors",
344
+ "vision_model.encoder.layers.15.attn.qkv.weight": "model-00001-of-00004.safetensors",
345
+ "vision_model.encoder.layers.15.ls1": "model-00001-of-00004.safetensors",
346
+ "vision_model.encoder.layers.15.ls2": "model-00001-of-00004.safetensors",
347
+ "vision_model.encoder.layers.15.mlp.fc1.bias": "model-00001-of-00004.safetensors",
348
+ "vision_model.encoder.layers.15.mlp.fc1.weight": "model-00001-of-00004.safetensors",
349
+ "vision_model.encoder.layers.15.mlp.fc2.bias": "model-00001-of-00004.safetensors",
350
+ "vision_model.encoder.layers.15.mlp.fc2.weight": "model-00001-of-00004.safetensors",
351
+ "vision_model.encoder.layers.15.norm1.bias": "model-00001-of-00004.safetensors",
352
+ "vision_model.encoder.layers.15.norm1.weight": "model-00001-of-00004.safetensors",
353
+ "vision_model.encoder.layers.15.norm2.bias": "model-00001-of-00004.safetensors",
354
+ "vision_model.encoder.layers.15.norm2.weight": "model-00001-of-00004.safetensors",
355
+ "vision_model.encoder.layers.16.attn.proj.bias": "model-00001-of-00004.safetensors",
356
+ "vision_model.encoder.layers.16.attn.proj.weight": "model-00001-of-00004.safetensors",
357
+ "vision_model.encoder.layers.16.attn.qkv.bias": "model-00001-of-00004.safetensors",
358
+ "vision_model.encoder.layers.16.attn.qkv.weight": "model-00001-of-00004.safetensors",
359
+ "vision_model.encoder.layers.16.ls1": "model-00001-of-00004.safetensors",
360
+ "vision_model.encoder.layers.16.ls2": "model-00001-of-00004.safetensors",
361
+ "vision_model.encoder.layers.16.mlp.fc1.bias": "model-00001-of-00004.safetensors",
362
+ "vision_model.encoder.layers.16.mlp.fc1.weight": "model-00001-of-00004.safetensors",
363
+ "vision_model.encoder.layers.16.mlp.fc2.bias": "model-00001-of-00004.safetensors",
364
+ "vision_model.encoder.layers.16.mlp.fc2.weight": "model-00001-of-00004.safetensors",
365
+ "vision_model.encoder.layers.16.norm1.bias": "model-00001-of-00004.safetensors",
366
+ "vision_model.encoder.layers.16.norm1.weight": "model-00001-of-00004.safetensors",
367
+ "vision_model.encoder.layers.16.norm2.bias": "model-00001-of-00004.safetensors",
368
+ "vision_model.encoder.layers.16.norm2.weight": "model-00001-of-00004.safetensors",
369
+ "vision_model.encoder.layers.17.attn.proj.bias": "model-00001-of-00004.safetensors",
370
+ "vision_model.encoder.layers.17.attn.proj.weight": "model-00001-of-00004.safetensors",
371
+ "vision_model.encoder.layers.17.attn.qkv.bias": "model-00001-of-00004.safetensors",
372
+ "vision_model.encoder.layers.17.attn.qkv.weight": "model-00001-of-00004.safetensors",
373
+ "vision_model.encoder.layers.17.ls1": "model-00001-of-00004.safetensors",
374
+ "vision_model.encoder.layers.17.ls2": "model-00001-of-00004.safetensors",
375
+ "vision_model.encoder.layers.17.mlp.fc1.bias": "model-00001-of-00004.safetensors",
376
+ "vision_model.encoder.layers.17.mlp.fc1.weight": "model-00001-of-00004.safetensors",
377
+ "vision_model.encoder.layers.17.mlp.fc2.bias": "model-00001-of-00004.safetensors",
378
+ "vision_model.encoder.layers.17.mlp.fc2.weight": "model-00001-of-00004.safetensors",
379
+ "vision_model.encoder.layers.17.norm1.bias": "model-00001-of-00004.safetensors",
380
+ "vision_model.encoder.layers.17.norm1.weight": "model-00001-of-00004.safetensors",
381
+ "vision_model.encoder.layers.17.norm2.bias": "model-00001-of-00004.safetensors",
382
+ "vision_model.encoder.layers.17.norm2.weight": "model-00001-of-00004.safetensors",
383
+ "vision_model.encoder.layers.18.attn.proj.bias": "model-00001-of-00004.safetensors",
384
+ "vision_model.encoder.layers.18.attn.proj.weight": "model-00001-of-00004.safetensors",
385
+ "vision_model.encoder.layers.18.attn.qkv.bias": "model-00001-of-00004.safetensors",
386
+ "vision_model.encoder.layers.18.attn.qkv.weight": "model-00001-of-00004.safetensors",
387
+ "vision_model.encoder.layers.18.ls1": "model-00001-of-00004.safetensors",
388
+ "vision_model.encoder.layers.18.ls2": "model-00001-of-00004.safetensors",
389
+ "vision_model.encoder.layers.18.mlp.fc1.bias": "model-00001-of-00004.safetensors",
390
+ "vision_model.encoder.layers.18.mlp.fc1.weight": "model-00001-of-00004.safetensors",
391
+ "vision_model.encoder.layers.18.mlp.fc2.bias": "model-00001-of-00004.safetensors",
392
+ "vision_model.encoder.layers.18.mlp.fc2.weight": "model-00001-of-00004.safetensors",
393
+ "vision_model.encoder.layers.18.norm1.bias": "model-00001-of-00004.safetensors",
394
+ "vision_model.encoder.layers.18.norm1.weight": "model-00001-of-00004.safetensors",
395
+ "vision_model.encoder.layers.18.norm2.bias": "model-00001-of-00004.safetensors",
396
+ "vision_model.encoder.layers.18.norm2.weight": "model-00001-of-00004.safetensors",
397
+ "vision_model.encoder.layers.19.attn.proj.bias": "model-00001-of-00004.safetensors",
398
+ "vision_model.encoder.layers.19.attn.proj.weight": "model-00001-of-00004.safetensors",
399
+ "vision_model.encoder.layers.19.attn.qkv.bias": "model-00001-of-00004.safetensors",
400
+ "vision_model.encoder.layers.19.attn.qkv.weight": "model-00001-of-00004.safetensors",
401
+ "vision_model.encoder.layers.19.ls1": "model-00001-of-00004.safetensors",
402
+ "vision_model.encoder.layers.19.ls2": "model-00001-of-00004.safetensors",
403
+ "vision_model.encoder.layers.19.mlp.fc1.bias": "model-00001-of-00004.safetensors",
404
+ "vision_model.encoder.layers.19.mlp.fc1.weight": "model-00001-of-00004.safetensors",
405
+ "vision_model.encoder.layers.19.mlp.fc2.bias": "model-00001-of-00004.safetensors",
406
+ "vision_model.encoder.layers.19.mlp.fc2.weight": "model-00001-of-00004.safetensors",
407
+ "vision_model.encoder.layers.19.norm1.bias": "model-00001-of-00004.safetensors",
408
+ "vision_model.encoder.layers.19.norm1.weight": "model-00001-of-00004.safetensors",
409
+ "vision_model.encoder.layers.19.norm2.bias": "model-00001-of-00004.safetensors",
410
+ "vision_model.encoder.layers.19.norm2.weight": "model-00001-of-00004.safetensors",
411
+ "vision_model.encoder.layers.2.attn.proj.bias": "model-00001-of-00004.safetensors",
412
+ "vision_model.encoder.layers.2.attn.proj.weight": "model-00001-of-00004.safetensors",
413
+ "vision_model.encoder.layers.2.attn.qkv.bias": "model-00001-of-00004.safetensors",
414
+ "vision_model.encoder.layers.2.attn.qkv.weight": "model-00001-of-00004.safetensors",
415
+ "vision_model.encoder.layers.2.ls1": "model-00001-of-00004.safetensors",
416
+ "vision_model.encoder.layers.2.ls2": "model-00001-of-00004.safetensors",
417
+ "vision_model.encoder.layers.2.mlp.fc1.bias": "model-00001-of-00004.safetensors",
418
+ "vision_model.encoder.layers.2.mlp.fc1.weight": "model-00001-of-00004.safetensors",
419
+ "vision_model.encoder.layers.2.mlp.fc2.bias": "model-00001-of-00004.safetensors",
420
+ "vision_model.encoder.layers.2.mlp.fc2.weight": "model-00001-of-00004.safetensors",
421
+ "vision_model.encoder.layers.2.norm1.bias": "model-00001-of-00004.safetensors",
422
+ "vision_model.encoder.layers.2.norm1.weight": "model-00001-of-00004.safetensors",
423
+ "vision_model.encoder.layers.2.norm2.bias": "model-00001-of-00004.safetensors",
424
+ "vision_model.encoder.layers.2.norm2.weight": "model-00001-of-00004.safetensors",
425
+ "vision_model.encoder.layers.20.attn.proj.bias": "model-00001-of-00004.safetensors",
426
+ "vision_model.encoder.layers.20.attn.proj.weight": "model-00001-of-00004.safetensors",
427
+ "vision_model.encoder.layers.20.attn.qkv.bias": "model-00001-of-00004.safetensors",
428
+ "vision_model.encoder.layers.20.attn.qkv.weight": "model-00001-of-00004.safetensors",
429
+ "vision_model.encoder.layers.20.ls1": "model-00001-of-00004.safetensors",
430
+ "vision_model.encoder.layers.20.ls2": "model-00001-of-00004.safetensors",
431
+ "vision_model.encoder.layers.20.mlp.fc1.bias": "model-00001-of-00004.safetensors",
432
+ "vision_model.encoder.layers.20.mlp.fc1.weight": "model-00001-of-00004.safetensors",
433
+ "vision_model.encoder.layers.20.mlp.fc2.bias": "model-00001-of-00004.safetensors",
434
+ "vision_model.encoder.layers.20.mlp.fc2.weight": "model-00001-of-00004.safetensors",
435
+ "vision_model.encoder.layers.20.norm1.bias": "model-00001-of-00004.safetensors",
436
+ "vision_model.encoder.layers.20.norm1.weight": "model-00001-of-00004.safetensors",
437
+ "vision_model.encoder.layers.20.norm2.bias": "model-00001-of-00004.safetensors",
438
+ "vision_model.encoder.layers.20.norm2.weight": "model-00001-of-00004.safetensors",
439
+ "vision_model.encoder.layers.21.attn.proj.bias": "model-00001-of-00004.safetensors",
440
+ "vision_model.encoder.layers.21.attn.proj.weight": "model-00001-of-00004.safetensors",
441
+ "vision_model.encoder.layers.21.attn.qkv.bias": "model-00001-of-00004.safetensors",
442
+ "vision_model.encoder.layers.21.attn.qkv.weight": "model-00001-of-00004.safetensors",
443
+ "vision_model.encoder.layers.21.ls1": "model-00001-of-00004.safetensors",
444
+ "vision_model.encoder.layers.21.ls2": "model-00001-of-00004.safetensors",
445
+ "vision_model.encoder.layers.21.mlp.fc1.bias": "model-00001-of-00004.safetensors",
446
+ "vision_model.encoder.layers.21.mlp.fc1.weight": "model-00001-of-00004.safetensors",
447
+ "vision_model.encoder.layers.21.mlp.fc2.bias": "model-00001-of-00004.safetensors",
448
+ "vision_model.encoder.layers.21.mlp.fc2.weight": "model-00001-of-00004.safetensors",
449
+ "vision_model.encoder.layers.21.norm1.bias": "model-00001-of-00004.safetensors",
450
+ "vision_model.encoder.layers.21.norm1.weight": "model-00001-of-00004.safetensors",
451
+ "vision_model.encoder.layers.21.norm2.bias": "model-00001-of-00004.safetensors",
452
+ "vision_model.encoder.layers.21.norm2.weight": "model-00001-of-00004.safetensors",
453
+ "vision_model.encoder.layers.22.attn.proj.bias": "model-00001-of-00004.safetensors",
454
+ "vision_model.encoder.layers.22.attn.proj.weight": "model-00001-of-00004.safetensors",
455
+ "vision_model.encoder.layers.22.attn.qkv.bias": "model-00001-of-00004.safetensors",
456
+ "vision_model.encoder.layers.22.attn.qkv.weight": "model-00001-of-00004.safetensors",
457
+ "vision_model.encoder.layers.22.ls1": "model-00001-of-00004.safetensors",
458
+ "vision_model.encoder.layers.22.ls2": "model-00001-of-00004.safetensors",
459
+ "vision_model.encoder.layers.22.mlp.fc1.bias": "model-00001-of-00004.safetensors",
460
+ "vision_model.encoder.layers.22.mlp.fc1.weight": "model-00001-of-00004.safetensors",
461
+ "vision_model.encoder.layers.22.mlp.fc2.bias": "model-00001-of-00004.safetensors",
462
+ "vision_model.encoder.layers.22.mlp.fc2.weight": "model-00001-of-00004.safetensors",
463
+ "vision_model.encoder.layers.22.norm1.bias": "model-00001-of-00004.safetensors",
464
+ "vision_model.encoder.layers.22.norm1.weight": "model-00001-of-00004.safetensors",
465
+ "vision_model.encoder.layers.22.norm2.bias": "model-00001-of-00004.safetensors",
466
+ "vision_model.encoder.layers.22.norm2.weight": "model-00001-of-00004.safetensors",
467
+ "vision_model.encoder.layers.23.attn.proj.bias": "model-00001-of-00004.safetensors",
468
+ "vision_model.encoder.layers.23.attn.proj.weight": "model-00001-of-00004.safetensors",
469
+ "vision_model.encoder.layers.23.attn.qkv.bias": "model-00001-of-00004.safetensors",
470
+ "vision_model.encoder.layers.23.attn.qkv.weight": "model-00001-of-00004.safetensors",
471
+ "vision_model.encoder.layers.23.ls1": "model-00001-of-00004.safetensors",
472
+ "vision_model.encoder.layers.23.ls2": "model-00001-of-00004.safetensors",
473
+ "vision_model.encoder.layers.23.mlp.fc1.bias": "model-00001-of-00004.safetensors",
474
+ "vision_model.encoder.layers.23.mlp.fc1.weight": "model-00001-of-00004.safetensors",
475
+ "vision_model.encoder.layers.23.mlp.fc2.bias": "model-00001-of-00004.safetensors",
476
+ "vision_model.encoder.layers.23.mlp.fc2.weight": "model-00001-of-00004.safetensors",
477
+ "vision_model.encoder.layers.23.norm1.bias": "model-00001-of-00004.safetensors",
478
+ "vision_model.encoder.layers.23.norm1.weight": "model-00001-of-00004.safetensors",
479
+ "vision_model.encoder.layers.23.norm2.bias": "model-00001-of-00004.safetensors",
480
+ "vision_model.encoder.layers.23.norm2.weight": "model-00001-of-00004.safetensors",
481
+ "vision_model.encoder.layers.3.attn.proj.bias": "model-00001-of-00004.safetensors",
482
+ "vision_model.encoder.layers.3.attn.proj.weight": "model-00001-of-00004.safetensors",
483
+ "vision_model.encoder.layers.3.attn.qkv.bias": "model-00001-of-00004.safetensors",
484
+ "vision_model.encoder.layers.3.attn.qkv.weight": "model-00001-of-00004.safetensors",
485
+ "vision_model.encoder.layers.3.ls1": "model-00001-of-00004.safetensors",
486
+ "vision_model.encoder.layers.3.ls2": "model-00001-of-00004.safetensors",
487
+ "vision_model.encoder.layers.3.mlp.fc1.bias": "model-00001-of-00004.safetensors",
488
+ "vision_model.encoder.layers.3.mlp.fc1.weight": "model-00001-of-00004.safetensors",
489
+ "vision_model.encoder.layers.3.mlp.fc2.bias": "model-00001-of-00004.safetensors",
490
+ "vision_model.encoder.layers.3.mlp.fc2.weight": "model-00001-of-00004.safetensors",
491
+ "vision_model.encoder.layers.3.norm1.bias": "model-00001-of-00004.safetensors",
492
+ "vision_model.encoder.layers.3.norm1.weight": "model-00001-of-00004.safetensors",
493
+ "vision_model.encoder.layers.3.norm2.bias": "model-00001-of-00004.safetensors",
494
+ "vision_model.encoder.layers.3.norm2.weight": "model-00001-of-00004.safetensors",
495
+ "vision_model.encoder.layers.4.attn.proj.bias": "model-00001-of-00004.safetensors",
496
+ "vision_model.encoder.layers.4.attn.proj.weight": "model-00001-of-00004.safetensors",
497
+ "vision_model.encoder.layers.4.attn.qkv.bias": "model-00001-of-00004.safetensors",
498
+ "vision_model.encoder.layers.4.attn.qkv.weight": "model-00001-of-00004.safetensors",
499
+ "vision_model.encoder.layers.4.ls1": "model-00001-of-00004.safetensors",
500
+ "vision_model.encoder.layers.4.ls2": "model-00001-of-00004.safetensors",
501
+ "vision_model.encoder.layers.4.mlp.fc1.bias": "model-00001-of-00004.safetensors",
502
+ "vision_model.encoder.layers.4.mlp.fc1.weight": "model-00001-of-00004.safetensors",
503
+ "vision_model.encoder.layers.4.mlp.fc2.bias": "model-00001-of-00004.safetensors",
504
+ "vision_model.encoder.layers.4.mlp.fc2.weight": "model-00001-of-00004.safetensors",
505
+ "vision_model.encoder.layers.4.norm1.bias": "model-00001-of-00004.safetensors",
506
+ "vision_model.encoder.layers.4.norm1.weight": "model-00001-of-00004.safetensors",
507
+ "vision_model.encoder.layers.4.norm2.bias": "model-00001-of-00004.safetensors",
508
+ "vision_model.encoder.layers.4.norm2.weight": "model-00001-of-00004.safetensors",
509
+ "vision_model.encoder.layers.5.attn.proj.bias": "model-00001-of-00004.safetensors",
510
+ "vision_model.encoder.layers.5.attn.proj.weight": "model-00001-of-00004.safetensors",
511
+ "vision_model.encoder.layers.5.attn.qkv.bias": "model-00001-of-00004.safetensors",
512
+ "vision_model.encoder.layers.5.attn.qkv.weight": "model-00001-of-00004.safetensors",
513
+ "vision_model.encoder.layers.5.ls1": "model-00001-of-00004.safetensors",
514
+ "vision_model.encoder.layers.5.ls2": "model-00001-of-00004.safetensors",
515
+ "vision_model.encoder.layers.5.mlp.fc1.bias": "model-00001-of-00004.safetensors",
516
+ "vision_model.encoder.layers.5.mlp.fc1.weight": "model-00001-of-00004.safetensors",
517
+ "vision_model.encoder.layers.5.mlp.fc2.bias": "model-00001-of-00004.safetensors",
518
+ "vision_model.encoder.layers.5.mlp.fc2.weight": "model-00001-of-00004.safetensors",
519
+ "vision_model.encoder.layers.5.norm1.bias": "model-00001-of-00004.safetensors",
520
+ "vision_model.encoder.layers.5.norm1.weight": "model-00001-of-00004.safetensors",
521
+ "vision_model.encoder.layers.5.norm2.bias": "model-00001-of-00004.safetensors",
522
+ "vision_model.encoder.layers.5.norm2.weight": "model-00001-of-00004.safetensors",
523
+ "vision_model.encoder.layers.6.attn.proj.bias": "model-00001-of-00004.safetensors",
524
+ "vision_model.encoder.layers.6.attn.proj.weight": "model-00001-of-00004.safetensors",
525
+ "vision_model.encoder.layers.6.attn.qkv.bias": "model-00001-of-00004.safetensors",
526
+ "vision_model.encoder.layers.6.attn.qkv.weight": "model-00001-of-00004.safetensors",
527
+ "vision_model.encoder.layers.6.ls1": "model-00001-of-00004.safetensors",
528
+ "vision_model.encoder.layers.6.ls2": "model-00001-of-00004.safetensors",
529
+ "vision_model.encoder.layers.6.mlp.fc1.bias": "model-00001-of-00004.safetensors",
530
+ "vision_model.encoder.layers.6.mlp.fc1.weight": "model-00001-of-00004.safetensors",
531
+ "vision_model.encoder.layers.6.mlp.fc2.bias": "model-00001-of-00004.safetensors",
532
+ "vision_model.encoder.layers.6.mlp.fc2.weight": "model-00001-of-00004.safetensors",
533
+ "vision_model.encoder.layers.6.norm1.bias": "model-00001-of-00004.safetensors",
534
+ "vision_model.encoder.layers.6.norm1.weight": "model-00001-of-00004.safetensors",
535
+ "vision_model.encoder.layers.6.norm2.bias": "model-00001-of-00004.safetensors",
536
+ "vision_model.encoder.layers.6.norm2.weight": "model-00001-of-00004.safetensors",
537
+ "vision_model.encoder.layers.7.attn.proj.bias": "model-00001-of-00004.safetensors",
538
+ "vision_model.encoder.layers.7.attn.proj.weight": "model-00001-of-00004.safetensors",
539
+ "vision_model.encoder.layers.7.attn.qkv.bias": "model-00001-of-00004.safetensors",
540
+ "vision_model.encoder.layers.7.attn.qkv.weight": "model-00001-of-00004.safetensors",
541
+ "vision_model.encoder.layers.7.ls1": "model-00001-of-00004.safetensors",
542
+ "vision_model.encoder.layers.7.ls2": "model-00001-of-00004.safetensors",
543
+ "vision_model.encoder.layers.7.mlp.fc1.bias": "model-00001-of-00004.safetensors",
544
+ "vision_model.encoder.layers.7.mlp.fc1.weight": "model-00001-of-00004.safetensors",
545
+ "vision_model.encoder.layers.7.mlp.fc2.bias": "model-00001-of-00004.safetensors",
546
+ "vision_model.encoder.layers.7.mlp.fc2.weight": "model-00001-of-00004.safetensors",
547
+ "vision_model.encoder.layers.7.norm1.bias": "model-00001-of-00004.safetensors",
548
+ "vision_model.encoder.layers.7.norm1.weight": "model-00001-of-00004.safetensors",
549
+ "vision_model.encoder.layers.7.norm2.bias": "model-00001-of-00004.safetensors",
550
+ "vision_model.encoder.layers.7.norm2.weight": "model-00001-of-00004.safetensors",
551
+ "vision_model.encoder.layers.8.attn.proj.bias": "model-00001-of-00004.safetensors",
552
+ "vision_model.encoder.layers.8.attn.proj.weight": "model-00001-of-00004.safetensors",
553
+ "vision_model.encoder.layers.8.attn.qkv.bias": "model-00001-of-00004.safetensors",
554
+ "vision_model.encoder.layers.8.attn.qkv.weight": "model-00001-of-00004.safetensors",
555
+ "vision_model.encoder.layers.8.ls1": "model-00001-of-00004.safetensors",
556
+ "vision_model.encoder.layers.8.ls2": "model-00001-of-00004.safetensors",
557
+ "vision_model.encoder.layers.8.mlp.fc1.bias": "model-00001-of-00004.safetensors",
558
+ "vision_model.encoder.layers.8.mlp.fc1.weight": "model-00001-of-00004.safetensors",
559
+ "vision_model.encoder.layers.8.mlp.fc2.bias": "model-00001-of-00004.safetensors",
560
+ "vision_model.encoder.layers.8.mlp.fc2.weight": "model-00001-of-00004.safetensors",
561
+ "vision_model.encoder.layers.8.norm1.bias": "model-00001-of-00004.safetensors",
562
+ "vision_model.encoder.layers.8.norm1.weight": "model-00001-of-00004.safetensors",
563
+ "vision_model.encoder.layers.8.norm2.bias": "model-00001-of-00004.safetensors",
564
+ "vision_model.encoder.layers.8.norm2.weight": "model-00001-of-00004.safetensors",
565
+ "vision_model.encoder.layers.9.attn.proj.bias": "model-00001-of-00004.safetensors",
566
+ "vision_model.encoder.layers.9.attn.proj.weight": "model-00001-of-00004.safetensors",
567
+ "vision_model.encoder.layers.9.attn.qkv.bias": "model-00001-of-00004.safetensors",
568
+ "vision_model.encoder.layers.9.attn.qkv.weight": "model-00001-of-00004.safetensors",
569
+ "vision_model.encoder.layers.9.ls1": "model-00001-of-00004.safetensors",
570
+ "vision_model.encoder.layers.9.ls2": "model-00001-of-00004.safetensors",
571
+ "vision_model.encoder.layers.9.mlp.fc1.bias": "model-00001-of-00004.safetensors",
572
+ "vision_model.encoder.layers.9.mlp.fc1.weight": "model-00001-of-00004.safetensors",
573
+ "vision_model.encoder.layers.9.mlp.fc2.bias": "model-00001-of-00004.safetensors",
574
+ "vision_model.encoder.layers.9.mlp.fc2.weight": "model-00001-of-00004.safetensors",
575
+ "vision_model.encoder.layers.9.norm1.bias": "model-00001-of-00004.safetensors",
576
+ "vision_model.encoder.layers.9.norm1.weight": "model-00001-of-00004.safetensors",
577
+ "vision_model.encoder.layers.9.norm2.bias": "model-00001-of-00004.safetensors",
578
+ "vision_model.encoder.layers.9.norm2.weight": "model-00001-of-00004.safetensors"
579
+ }
580
+ }
modeling_internvl_chat.py ADDED
@@ -0,0 +1,346 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # --------------------------------------------------------
2
+ # InternVL
3
+ # Copyright (c) 2024 OpenGVLab
4
+ # Licensed under The MIT License [see LICENSE for details]
5
+ # --------------------------------------------------------
6
+ import warnings
7
+ from typing import Any, List, Optional, Tuple, Union
8
+
9
+ import torch.utils.checkpoint
10
+ import transformers
11
+ from torch import nn
12
+ from torch.nn import CrossEntropyLoss
13
+ from transformers import (AutoModel, GenerationConfig, LlamaForCausalLM,
14
+ LlamaTokenizer)
15
+ from transformers.modeling_outputs import CausalLMOutputWithPast
16
+ from transformers.modeling_utils import PreTrainedModel
17
+ from transformers.utils import ModelOutput, logging
18
+
19
+ from .configuration_internvl_chat import InternVLChatConfig
20
+ from .conversation import get_conv_template
21
+ from .modeling_intern_vit import InternVisionModel
22
+ from .modeling_internlm2 import InternLM2ForCausalLM
23
+
24
+ logger = logging.get_logger(__name__)
25
+
26
+
27
+ def version_cmp(v1, v2, op='eq'):
28
+ import operator
29
+
30
+ from packaging import version
31
+ op_func = getattr(operator, op)
32
+ return op_func(version.parse(v1), version.parse(v2))
33
+
34
+
35
+ class InternVLChatModel(PreTrainedModel):
36
+ config_class = InternVLChatConfig
37
+ main_input_name = 'pixel_values'
38
+ _supports_flash_attn_2 = True
39
+ _no_split_modules = ['InternVisionModel', 'LlamaDecoderLayer', 'InternLM2DecoderLayer']
40
+
41
+ def __init__(self, config: InternVLChatConfig, vision_model=None, language_model=None):
42
+ super().__init__(config)
43
+
44
+ assert version_cmp(transformers.__version__, '4.36.2', 'ge')
45
+ image_size = config.force_image_size or config.vision_config.image_size
46
+ patch_size = config.vision_config.patch_size
47
+ self.patch_size = patch_size
48
+ self.select_layer = config.select_layer
49
+ self.template = config.template
50
+ self.num_image_token = int((image_size // patch_size) ** 2 * (config.downsample_ratio ** 2))
51
+ self.downsample_ratio = config.downsample_ratio
52
+ self.ps_version = config.ps_version
53
+
54
+ logger.info(f'num_image_token: {self.num_image_token}')
55
+ logger.info(f'ps_version: {self.ps_version}')
56
+ if vision_model is not None:
57
+ self.vision_model = vision_model
58
+ else:
59
+ self.vision_model = InternVisionModel(config.vision_config)
60
+ if language_model is not None:
61
+ self.language_model = language_model
62
+ else:
63
+ if config.llm_config.architectures[0] == 'LlamaForCausalLM':
64
+ self.language_model = LlamaForCausalLM(config.llm_config)
65
+ elif config.llm_config.architectures[0] == 'InternLM2ForCausalLM':
66
+ self.language_model = InternLM2ForCausalLM(config.llm_config)
67
+ else:
68
+ raise NotImplementedError(f'{config.llm_config.architectures[0]} is not implemented.')
69
+
70
+ vit_hidden_size = config.vision_config.hidden_size
71
+ llm_hidden_size = config.llm_config.hidden_size
72
+
73
+ self.mlp1 = nn.Sequential(
74
+ nn.LayerNorm(vit_hidden_size * int(1 / self.downsample_ratio) ** 2),
75
+ nn.Linear(vit_hidden_size * int(1 / self.downsample_ratio) ** 2, llm_hidden_size),
76
+ nn.GELU(),
77
+ nn.Linear(llm_hidden_size, llm_hidden_size)
78
+ )
79
+
80
+ self.img_context_token_id = None
81
+ self.conv_template = get_conv_template(self.template)
82
+ self.system_message = self.conv_template.system_message
83
+
84
+ def forward(
85
+ self,
86
+ pixel_values: torch.FloatTensor,
87
+ input_ids: torch.LongTensor = None,
88
+ attention_mask: Optional[torch.Tensor] = None,
89
+ position_ids: Optional[torch.LongTensor] = None,
90
+ image_flags: Optional[torch.LongTensor] = None,
91
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
92
+ labels: Optional[torch.LongTensor] = None,
93
+ use_cache: Optional[bool] = None,
94
+ output_attentions: Optional[bool] = None,
95
+ output_hidden_states: Optional[bool] = None,
96
+ return_dict: Optional[bool] = None,
97
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
98
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
99
+
100
+ image_flags = image_flags.squeeze(-1)
101
+ input_embeds = self.language_model.get_input_embeddings()(input_ids)
102
+
103
+ vit_embeds = self.extract_feature(pixel_values)
104
+ vit_embeds = vit_embeds[image_flags == 1]
105
+ vit_batch_size = pixel_values.shape[0]
106
+
107
+ B, N, C = input_embeds.shape
108
+ input_embeds = input_embeds.reshape(B * N, C)
109
+
110
+ if torch.distributed.get_rank() == 0:
111
+ print(f'dynamic ViT batch size: {vit_batch_size}, images per sample: {vit_batch_size / B}, dynamic token length: {N}')
112
+
113
+ input_ids = input_ids.reshape(B * N)
114
+ selected = (input_ids == self.img_context_token_id)
115
+ try:
116
+ input_embeds[selected] = input_embeds[selected] * 0.0 + vit_embeds.reshape(-1, C)
117
+ except Exception as e:
118
+ vit_embeds = vit_embeds.reshape(-1, C)
119
+ print(f'warning: {e}, input_embeds[selected].shape={input_embeds[selected].shape}, '
120
+ f'vit_embeds.shape={vit_embeds.shape}')
121
+ n_token = selected.sum()
122
+ input_embeds[selected] = input_embeds[selected] * 0.0 + vit_embeds[:n_token]
123
+
124
+ input_embeds = input_embeds.reshape(B, N, C)
125
+
126
+ outputs = self.language_model(
127
+ inputs_embeds=input_embeds,
128
+ attention_mask=attention_mask,
129
+ position_ids=position_ids,
130
+ past_key_values=past_key_values,
131
+ use_cache=use_cache,
132
+ output_attentions=output_attentions,
133
+ output_hidden_states=output_hidden_states,
134
+ return_dict=return_dict,
135
+ )
136
+ logits = outputs.logits
137
+
138
+ loss = None
139
+ if labels is not None:
140
+ # Shift so that tokens < n predict n
141
+ shift_logits = logits[..., :-1, :].contiguous()
142
+ shift_labels = labels[..., 1:].contiguous()
143
+ # Flatten the tokens
144
+ loss_fct = CrossEntropyLoss()
145
+ shift_logits = shift_logits.view(-1, self.language_model.config.vocab_size)
146
+ shift_labels = shift_labels.view(-1)
147
+ # Enable model parallelism
148
+ shift_labels = shift_labels.to(shift_logits.device)
149
+ loss = loss_fct(shift_logits, shift_labels)
150
+
151
+ if not return_dict:
152
+ output = (logits,) + outputs[1:]
153
+ return (loss,) + output if loss is not None else output
154
+
155
+ return CausalLMOutputWithPast(
156
+ loss=loss,
157
+ logits=logits,
158
+ past_key_values=outputs.past_key_values,
159
+ hidden_states=outputs.hidden_states,
160
+ attentions=outputs.attentions,
161
+ )
162
+
163
+ def pixel_shuffle(self, x, scale_factor=0.5):
164
+ n, w, h, c = x.size()
165
+ # N, W, H, C --> N, W, H * scale, C // scale
166
+ x = x.view(n, w, int(h * scale_factor), int(c / scale_factor))
167
+ # N, W, H * scale, C // scale --> N, H * scale, W, C // scale
168
+ x = x.permute(0, 2, 1, 3).contiguous()
169
+ # N, H * scale, W, C // scale --> N, H * scale, W * scale, C // (scale ** 2)
170
+ x = x.view(n, int(h * scale_factor), int(w * scale_factor),
171
+ int(c / (scale_factor * scale_factor)))
172
+ if self.ps_version == 'v1':
173
+ warnings.warn("In ps_version 'v1', the height and width have not been swapped back, "
174
+ 'which results in a transposed image.')
175
+ else:
176
+ x = x.permute(0, 2, 1, 3).contiguous()
177
+ return x
178
+
179
+ def extract_feature(self, pixel_values):
180
+ if self.select_layer == -1:
181
+ vit_embeds = self.vision_model(
182
+ pixel_values=pixel_values,
183
+ output_hidden_states=False,
184
+ return_dict=True).last_hidden_state
185
+ else:
186
+ vit_embeds = self.vision_model(
187
+ pixel_values=pixel_values,
188
+ output_hidden_states=True,
189
+ return_dict=True).hidden_states[self.select_layer]
190
+ vit_embeds = vit_embeds[:, 1:, :]
191
+
192
+ h = w = int(vit_embeds.shape[1] ** 0.5)
193
+ vit_embeds = vit_embeds.reshape(vit_embeds.shape[0], h, w, -1)
194
+ vit_embeds = self.pixel_shuffle(vit_embeds, scale_factor=self.downsample_ratio)
195
+ vit_embeds = vit_embeds.reshape(vit_embeds.shape[0], -1, vit_embeds.shape[-1])
196
+ vit_embeds = self.mlp1(vit_embeds)
197
+ return vit_embeds
198
+
199
+ def batch_chat(self, tokenizer, pixel_values, questions, generation_config, num_patches_list=None,
200
+ history=None, return_history=False, IMG_START_TOKEN='<img>', IMG_END_TOKEN='</img>',
201
+ IMG_CONTEXT_TOKEN='<IMG_CONTEXT>', verbose=False, image_counts=None):
202
+ if history is not None or return_history:
203
+ print('Now multi-turn chat is not supported in batch_chat.')
204
+ raise NotImplementedError
205
+
206
+ if image_counts is not None:
207
+ num_patches_list = image_counts
208
+ print('Warning: `image_counts` is deprecated. Please use `num_patches_list` instead.')
209
+
210
+ img_context_token_id = tokenizer.convert_tokens_to_ids(IMG_CONTEXT_TOKEN)
211
+ self.img_context_token_id = img_context_token_id
212
+
213
+ if verbose and pixel_values is not None:
214
+ image_bs = pixel_values.shape[0]
215
+ print(f'dynamic ViT batch size: {image_bs}')
216
+
217
+ queries = []
218
+ for idx, num_patches in enumerate(num_patches_list):
219
+ question = questions[idx]
220
+ if pixel_values is not None and '<image>' not in question:
221
+ question = '<image>\n' + question
222
+ template = get_conv_template(self.template)
223
+ template.system_message = self.system_message
224
+ template.append_message(template.roles[0], question)
225
+ template.append_message(template.roles[1], None)
226
+ query = template.get_prompt()
227
+
228
+ image_tokens = IMG_START_TOKEN + IMG_CONTEXT_TOKEN * self.num_image_token * num_patches + IMG_END_TOKEN
229
+ query = query.replace('<image>', image_tokens, 1)
230
+ queries.append(query)
231
+
232
+ tokenizer.padding_side = 'left'
233
+ model_inputs = tokenizer(queries, return_tensors='pt', padding=True)
234
+ input_ids = model_inputs['input_ids'].cuda()
235
+ attention_mask = model_inputs['attention_mask'].cuda()
236
+ eos_token_id = tokenizer.convert_tokens_to_ids(template.sep)
237
+ generation_config['eos_token_id'] = eos_token_id
238
+ generation_output = self.generate(
239
+ pixel_values=pixel_values,
240
+ input_ids=input_ids,
241
+ attention_mask=attention_mask,
242
+ **generation_config
243
+ )
244
+ responses = tokenizer.batch_decode(generation_output, skip_special_tokens=True)
245
+ responses = [response.split(template.sep)[0].strip() for response in responses]
246
+ return responses
247
+
248
+ def chat(self, tokenizer, pixel_values, question, generation_config, history=None, return_history=False,
249
+ num_patches_list=None, IMG_START_TOKEN='<img>', IMG_END_TOKEN='</img>', IMG_CONTEXT_TOKEN='<IMG_CONTEXT>',
250
+ verbose=False):
251
+
252
+ if history is None and pixel_values is not None and '<image>' not in question:
253
+ question = '<image>\n' + question
254
+
255
+ if num_patches_list is None:
256
+ num_patches_list = [pixel_values.shape[0]] if pixel_values is not None else []
257
+ assert pixel_values is None or len(pixel_values) == sum(num_patches_list)
258
+
259
+ img_context_token_id = tokenizer.convert_tokens_to_ids(IMG_CONTEXT_TOKEN)
260
+ self.img_context_token_id = img_context_token_id
261
+
262
+ template = get_conv_template(self.template)
263
+ template.system_message = self.system_message
264
+ eos_token_id = tokenizer.convert_tokens_to_ids(template.sep)
265
+
266
+ history = [] if history is None else history
267
+ for (old_question, old_answer) in history:
268
+ template.append_message(template.roles[0], old_question)
269
+ template.append_message(template.roles[1], old_answer)
270
+ template.append_message(template.roles[0], question)
271
+ template.append_message(template.roles[1], None)
272
+ query = template.get_prompt()
273
+
274
+ if verbose and pixel_values is not None:
275
+ image_bs = pixel_values.shape[0]
276
+ print(f'dynamic ViT batch size: {image_bs}')
277
+
278
+ for num_patches in num_patches_list:
279
+ image_tokens = IMG_START_TOKEN + IMG_CONTEXT_TOKEN * self.num_image_token * num_patches + IMG_END_TOKEN
280
+ query = query.replace('<image>', image_tokens, 1)
281
+
282
+ model_inputs = tokenizer(query, return_tensors='pt')
283
+ input_ids = model_inputs['input_ids'].cuda()
284
+ attention_mask = model_inputs['attention_mask'].cuda()
285
+ generation_config['eos_token_id'] = eos_token_id
286
+ generation_output = self.generate(
287
+ pixel_values=pixel_values,
288
+ input_ids=input_ids,
289
+ attention_mask=attention_mask,
290
+ **generation_config
291
+ )
292
+ response = tokenizer.batch_decode(generation_output, skip_special_tokens=True)[0]
293
+ response = response.split(template.sep)[0].strip()
294
+ history.append((question, response))
295
+ if return_history:
296
+ return response, history
297
+ else:
298
+ query_to_print = query.replace(IMG_CONTEXT_TOKEN, '')
299
+ query_to_print = query_to_print.replace(f'{IMG_START_TOKEN}{IMG_END_TOKEN}', '<image>')
300
+ if verbose:
301
+ print(query_to_print, response)
302
+ return response
303
+
304
+ @torch.no_grad()
305
+ def generate(
306
+ self,
307
+ pixel_values: Optional[torch.FloatTensor] = None,
308
+ input_ids: Optional[torch.FloatTensor] = None,
309
+ attention_mask: Optional[torch.LongTensor] = None,
310
+ visual_features: Optional[torch.FloatTensor] = None,
311
+ generation_config: Optional[GenerationConfig] = None,
312
+ output_hidden_states: Optional[bool] = None,
313
+ return_dict: Optional[bool] = None,
314
+ **generate_kwargs,
315
+ ) -> torch.LongTensor:
316
+
317
+ assert self.img_context_token_id is not None
318
+ if pixel_values is not None:
319
+ if visual_features is not None:
320
+ vit_embeds = visual_features
321
+ else:
322
+ vit_embeds = self.extract_feature(pixel_values)
323
+ input_embeds = self.language_model.get_input_embeddings()(input_ids)
324
+ B, N, C = input_embeds.shape
325
+ input_embeds = input_embeds.reshape(B * N, C)
326
+
327
+ input_ids = input_ids.reshape(B * N)
328
+ selected = (input_ids == self.img_context_token_id)
329
+ assert selected.sum() != 0
330
+ input_embeds[selected] = vit_embeds.reshape(-1, C).to(input_embeds.device)
331
+
332
+ input_embeds = input_embeds.reshape(B, N, C)
333
+ else:
334
+ input_embeds = self.language_model.get_input_embeddings()(input_ids)
335
+
336
+ outputs = self.language_model.generate(
337
+ inputs_embeds=input_embeds,
338
+ attention_mask=attention_mask,
339
+ generation_config=generation_config,
340
+ output_hidden_states=output_hidden_states,
341
+ return_dict=return_dict,
342
+ use_cache=True,
343
+ **generate_kwargs,
344
+ )
345
+
346
+ return outputs
special_tokens_map.json ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>",
5
+ "<|action_start|>",
6
+ "<|action_end|>",
7
+ "<|interpreter|>",
8
+ "<|plugin|>",
9
+ "<img>",
10
+ "</img>",
11
+ "<IMG_CONTEXT>",
12
+ "<quad>",
13
+ "</quad>",
14
+ "<ref>",
15
+ "</ref>",
16
+ "<box>",
17
+ "</box>"
18
+ ],
19
+ "bos_token": {
20
+ "content": "<s>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false
25
+ },
26
+ "eos_token": {
27
+ "content": "</s>",
28
+ "lstrip": false,
29
+ "normalized": false,
30
+ "rstrip": false,
31
+ "single_word": false
32
+ },
33
+ "pad_token": {
34
+ "content": "</s>",
35
+ "lstrip": false,
36
+ "normalized": false,
37
+ "rstrip": false,
38
+ "single_word": false
39
+ },
40
+ "unk_token": {
41
+ "content": "<unk>",
42
+ "lstrip": false,
43
+ "normalized": false,
44
+ "rstrip": false,
45
+ "single_word": false
46
+ }
47
+ }
tokenization_internlm2.py ADDED
@@ -0,0 +1,235 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) The InternLM team and The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # This code is based on transformers/src/transformers/models/llama/tokenization_llama.py
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ """Tokenization classes for InternLM."""
18
+ import os
19
+ from shutil import copyfile
20
+ from typing import Any, Dict, List, Optional, Tuple
21
+
22
+ import sentencepiece as spm
23
+ from transformers.tokenization_utils import PreTrainedTokenizer
24
+ from transformers.utils import logging
25
+
26
+ logger = logging.get_logger(__name__)
27
+
28
+ VOCAB_FILES_NAMES = {'vocab_file': './tokenizer.model'}
29
+
30
+ PRETRAINED_VOCAB_FILES_MAP = {}
31
+
32
+
33
+ # Modified from transformers.model.llama.tokenization_llama.LlamaTokenizer
34
+ class InternLM2Tokenizer(PreTrainedTokenizer):
35
+ """
36
+ Construct a InternLM2 tokenizer. Based on byte-level Byte-Pair-Encoding.
37
+
38
+ Args:
39
+ vocab_file (`str`):
40
+ Path to the vocabulary file.
41
+ """
42
+
43
+ vocab_files_names = VOCAB_FILES_NAMES
44
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
45
+ model_input_names = ['input_ids', 'attention_mask']
46
+ _auto_class = 'AutoTokenizer'
47
+
48
+ def __init__(
49
+ self,
50
+ vocab_file,
51
+ unk_token='<unk>',
52
+ bos_token='<s>',
53
+ eos_token='</s>',
54
+ pad_token='</s>',
55
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
56
+ add_bos_token=True,
57
+ add_eos_token=False,
58
+ decode_with_prefix_space=False,
59
+ clean_up_tokenization_spaces=False,
60
+ **kwargs,
61
+ ):
62
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
63
+ self.vocab_file = vocab_file
64
+ self.add_bos_token = add_bos_token
65
+ self.add_eos_token = add_eos_token
66
+ self.decode_with_prefix_space = decode_with_prefix_space
67
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
68
+ self.sp_model.Load(vocab_file)
69
+ self._no_prefix_space_tokens = None
70
+ super().__init__(
71
+ bos_token=bos_token,
72
+ eos_token=eos_token,
73
+ unk_token=unk_token,
74
+ pad_token=pad_token,
75
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
76
+ **kwargs,
77
+ )
78
+
79
+ @property
80
+ def no_prefix_space_tokens(self):
81
+ if self._no_prefix_space_tokens is None:
82
+ vocab = self.convert_ids_to_tokens(list(range(self.vocab_size)))
83
+ self._no_prefix_space_tokens = {i for i, tok in enumerate(vocab) if not tok.startswith('▁')}
84
+ return self._no_prefix_space_tokens
85
+
86
+ @property
87
+ def vocab_size(self):
88
+ """Returns vocab size"""
89
+ return self.sp_model.get_piece_size()
90
+
91
+ @property
92
+ def bos_token_id(self) -> Optional[int]:
93
+ return self.sp_model.bos_id()
94
+
95
+ @property
96
+ def eos_token_id(self) -> Optional[int]:
97
+ return self.sp_model.eos_id()
98
+
99
+ def get_vocab(self):
100
+ """Returns vocab as a dict"""
101
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
102
+ vocab.update(self.added_tokens_encoder)
103
+ return vocab
104
+
105
+ def _tokenize(self, text):
106
+ """Returns a tokenized string."""
107
+ return self.sp_model.encode(text, out_type=str)
108
+
109
+ def _convert_token_to_id(self, token):
110
+ """Converts a token (str) in an id using the vocab."""
111
+ return self.sp_model.piece_to_id(token)
112
+
113
+ def _convert_id_to_token(self, index):
114
+ """Converts an index (integer) in a token (str) using the vocab."""
115
+ token = self.sp_model.IdToPiece(index)
116
+ return token
117
+
118
+ def _maybe_add_prefix_space(self, tokens, decoded):
119
+ if tokens and tokens[0] not in self.no_prefix_space_tokens:
120
+ return ' ' + decoded
121
+ else:
122
+ return decoded
123
+
124
+ def convert_tokens_to_string(self, tokens):
125
+ """Converts a sequence of tokens (string) in a single string."""
126
+ current_sub_tokens = []
127
+ out_string = ''
128
+ prev_is_special = False
129
+ for token in tokens:
130
+ # make sure that special tokens are not decoded using sentencepiece model
131
+ if token in self.all_special_tokens:
132
+ if not prev_is_special:
133
+ out_string += ' '
134
+ out_string += self.sp_model.decode(current_sub_tokens) + token
135
+ prev_is_special = True
136
+ current_sub_tokens = []
137
+ else:
138
+ current_sub_tokens.append(token)
139
+ prev_is_special = False
140
+ out_string += self.sp_model.decode(current_sub_tokens)
141
+ out_string = self.clean_up_tokenization(out_string)
142
+ out_string = self._maybe_add_prefix_space(tokens=tokens, decoded=out_string)
143
+ return out_string[1:]
144
+
145
+ def save_vocabulary(self, save_directory, filename_prefix: Optional[str] = None) -> Tuple[str]:
146
+ """
147
+ Save the vocabulary and special tokens file to a directory.
148
+
149
+ Args:
150
+ save_directory (`str`):
151
+ The directory in which to save the vocabulary.
152
+
153
+ Returns:
154
+ `Tuple(str)`: Paths to the files saved.
155
+ """
156
+ if not os.path.isdir(save_directory):
157
+ logger.error(f'Vocabulary path ({save_directory}) should be a directory')
158
+ return
159
+ out_vocab_file = os.path.join(
160
+ save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']
161
+ )
162
+
163
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
164
+ copyfile(self.vocab_file, out_vocab_file)
165
+ elif not os.path.isfile(self.vocab_file):
166
+ with open(out_vocab_file, 'wb') as fi:
167
+ content_spiece_model = self.sp_model.serialized_model_proto()
168
+ fi.write(content_spiece_model)
169
+
170
+ return (out_vocab_file,)
171
+
172
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
173
+ if self.add_bos_token:
174
+ bos_token_ids = [self.bos_token_id]
175
+ else:
176
+ bos_token_ids = []
177
+
178
+ output = bos_token_ids + token_ids_0
179
+
180
+ if token_ids_1 is not None:
181
+ output = output + token_ids_1
182
+
183
+ if self.add_eos_token:
184
+ output = output + [self.eos_token_id]
185
+
186
+ return output
187
+
188
+ def get_special_tokens_mask(
189
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
190
+ ) -> List[int]:
191
+ """
192
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
193
+ special tokens using the tokenizer `prepare_for_model` method.
194
+
195
+ Args:
196
+ token_ids_0 (`List[int]`):
197
+ List of IDs.
198
+ token_ids_1 (`List[int]`, *optional*):
199
+ Optional second list of IDs for sequence pairs.
200
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
201
+ Whether or not the token list is already formatted with special tokens for the model.
202
+
203
+ Returns:
204
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
205
+ """
206
+ if already_has_special_tokens:
207
+ return super().get_special_tokens_mask(
208
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
209
+ )
210
+
211
+ if token_ids_1 is None:
212
+ return [1] + ([0] * len(token_ids_0)) + [1]
213
+ return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
214
+
215
+ def create_token_type_ids_from_sequences(
216
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
217
+ ) -> List[int]:
218
+ """
219
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. T5 does not make
220
+ use of token type ids, therefore a list of zeros is returned.
221
+
222
+ Args:
223
+ token_ids_0 (`List[int]`):
224
+ List of IDs.
225
+ token_ids_1 (`List[int]`, *optional*):
226
+ Optional second list of IDs for sequence pairs.
227
+
228
+ Returns:
229
+ `List[int]`: List of zeros.
230
+ """
231
+ eos = [self.eos_token_id]
232
+
233
+ if token_ids_1 is None:
234
+ return len(token_ids_0 + eos) * [0]
235
+ return len(token_ids_0 + eos + token_ids_1 + eos) * [0]
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f868398fc4e05ee1e8aeba95ddf18ddcc45b8bce55d5093bead5bbf80429b48b
3
+ size 1477754
tokenizer_config.json ADDED
@@ -0,0 +1,179 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "<unk>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "<s>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "</s>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "92538": {
28
+ "content": "<|plugin|>",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "92539": {
36
+ "content": "<|interpreter|>",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ },
43
+ "92540": {
44
+ "content": "<|action_end|>",
45
+ "lstrip": false,
46
+ "normalized": false,
47
+ "rstrip": false,
48
+ "single_word": false,
49
+ "special": true
50
+ },
51
+ "92541": {
52
+ "content": "<|action_start|>",
53
+ "lstrip": false,
54
+ "normalized": false,
55
+ "rstrip": false,
56
+ "single_word": false,
57
+ "special": true
58
+ },
59
+ "92542": {
60
+ "content": "<|im_end|>",
61
+ "lstrip": false,
62
+ "normalized": false,
63
+ "rstrip": false,
64
+ "single_word": false,
65
+ "special": true
66
+ },
67
+ "92543": {
68
+ "content": "<|im_start|>",
69
+ "lstrip": false,
70
+ "normalized": false,
71
+ "rstrip": false,
72
+ "single_word": false,
73
+ "special": true
74
+ },
75
+ "92544": {
76
+ "content": "<img>",
77
+ "lstrip": false,
78
+ "normalized": false,
79
+ "rstrip": false,
80
+ "single_word": false,
81
+ "special": true
82
+ },
83
+ "92545": {
84
+ "content": "</img>",
85
+ "lstrip": false,
86
+ "normalized": false,
87
+ "rstrip": false,
88
+ "single_word": false,
89
+ "special": true
90
+ },
91
+ "92546": {
92
+ "content": "<IMG_CONTEXT>",
93
+ "lstrip": false,
94
+ "normalized": false,
95
+ "rstrip": false,
96
+ "single_word": false,
97
+ "special": true
98
+ },
99
+ "92547": {
100
+ "content": "<quad>",
101
+ "lstrip": false,
102
+ "normalized": false,
103
+ "rstrip": false,
104
+ "single_word": false,
105
+ "special": true
106
+ },
107
+ "92548": {
108
+ "content": "</quad>",
109
+ "lstrip": false,
110
+ "normalized": false,
111
+ "rstrip": false,
112
+ "single_word": false,
113
+ "special": true
114
+ },
115
+ "92549": {
116
+ "content": "<ref>",
117
+ "lstrip": false,
118
+ "normalized": false,
119
+ "rstrip": false,
120
+ "single_word": false,
121
+ "special": true
122
+ },
123
+ "92550": {
124
+ "content": "</ref>",
125
+ "lstrip": false,
126
+ "normalized": false,
127
+ "rstrip": false,
128
+ "single_word": false,
129
+ "special": true
130
+ },
131
+ "92551": {
132
+ "content": "<box>",
133
+ "lstrip": false,
134
+ "normalized": false,
135
+ "rstrip": false,
136
+ "single_word": false,
137
+ "special": true
138
+ },
139
+ "92552": {
140
+ "content": "</box>",
141
+ "lstrip": false,
142
+ "normalized": false,
143
+ "rstrip": false,
144
+ "single_word": false,
145
+ "special": true
146
+ }
147
+ },
148
+ "additional_special_tokens": [
149
+ "<|im_start|>",
150
+ "<|im_end|>",
151
+ "<|action_start|>",
152
+ "<|action_end|>",
153
+ "<|interpreter|>",
154
+ "<|plugin|>",
155
+ "<img>",
156
+ "</img>",
157
+ "<IMG_CONTEXT>",
158
+ "<quad>",
159
+ "</quad>",
160
+ "<ref>",
161
+ "</ref>",
162
+ "<box>",
163
+ "</box>"
164
+ ],
165
+ "auto_map": {
166
+ "AutoTokenizer": [
167
+ "tokenization_internlm2.InternLM2Tokenizer",
168
+ null
169
+ ]
170
+ },
171
+ "bos_token": "<s>",
172
+ "chat_template": "{{ bos_token }}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
173
+ "clean_up_tokenization_spaces": false,
174
+ "eos_token": "</s>",
175
+ "model_max_length": 2048,
176
+ "pad_token": "</s>",
177
+ "tokenizer_class": "InternLM2Tokenizer",
178
+ "unk_token": "<unk>"
179
+ }