bunnycore commited on
Commit
13f464b
·
verified ·
1 Parent(s): 92af8fa

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ tags:
4
+ - merge
5
+ - mergekit
6
+ - lazymergekit
7
+ ---
8
+
9
+ # gemma3
10
+
11
+ gemma3 is a merge of the following models using [mergekit](https://github.com/cg123/mergekit):
12
+
13
+ ## 🧩 Configuration
14
+
15
+ ```yaml
16
+
17
+ base_model: unsloth/gemma-3-4b-it+bunnycore/gemma-3-4b-toxic-r1
18
+ dtype: bfloat16
19
+ merge_method: passthrough
20
+ models:
21
+ - model: unsloth/gemma-3-4b-it+bunnycore/gemma-3-4b-toxic-r1
22
+ tokenizer_source: unsloth/gemma-3-4b-it
23
+
24
+
25
+ ```
added_tokens.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "<image_soft_token>": 262144
3
+ }
config.json ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Gemma3ForConditionalGeneration"
4
+ ],
5
+ "boi_token_index": 255999,
6
+ "bos_token_id": 2,
7
+ "eoi_token_index": 256000,
8
+ "eos_token_id": 106,
9
+ "image_token_index": 262144,
10
+ "initializer_range": 0.02,
11
+ "mm_tokens_per_image": 256,
12
+ "model_type": "gemma3",
13
+ "pad_token_id": 0,
14
+ "text_config": {
15
+ "attention_bias": false,
16
+ "attention_dropout": 0.0,
17
+ "attn_logit_softcapping": null,
18
+ "cache_implementation": "hybrid",
19
+ "final_logit_softcapping": null,
20
+ "head_dim": 256,
21
+ "hidden_activation": "gelu_pytorch_tanh",
22
+ "hidden_size": 2560,
23
+ "initializer_range": 0.02,
24
+ "intermediate_size": 10240,
25
+ "max_position_embeddings": 131072,
26
+ "model_type": "gemma3_text",
27
+ "num_attention_heads": 8,
28
+ "num_hidden_layers": 34,
29
+ "num_key_value_heads": 4,
30
+ "query_pre_attn_scalar": 256,
31
+ "rms_norm_eps": 1e-06,
32
+ "rope_local_base_freq": 10000.0,
33
+ "rope_scaling": {
34
+ "factor": 8.0,
35
+ "rope_type": "linear"
36
+ },
37
+ "rope_theta": 1000000.0,
38
+ "sliding_window": 1024,
39
+ "sliding_window_pattern": 6,
40
+ "torch_dtype": "bfloat16",
41
+ "use_cache": true,
42
+ "vocab_size": 262145
43
+ },
44
+ "torch_dtype": "bfloat16",
45
+ "transformers_version": "4.50.0.dev0",
46
+ "unsloth_fixed": true,
47
+ "vision_config": {
48
+ "attention_dropout": 0.0,
49
+ "hidden_act": "gelu_pytorch_tanh",
50
+ "hidden_size": 1152,
51
+ "image_size": 896,
52
+ "intermediate_size": 4304,
53
+ "layer_norm_eps": 1e-06,
54
+ "model_type": "siglip_vision_model",
55
+ "num_attention_heads": 16,
56
+ "num_channels": 3,
57
+ "num_hidden_layers": 27,
58
+ "patch_size": 14,
59
+ "torch_dtype": "bfloat16",
60
+ "vision_use_head": false
61
+ }
62
+ }
mergekit_config.yml ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+ base_model: unsloth/gemma-3-4b-it+bunnycore/gemma-3-4b-toxic-r1
4
+ dtype: bfloat16
5
+ merge_method: passthrough
6
+ models:
7
+ - model: unsloth/gemma-3-4b-it+bunnycore/gemma-3-4b-toxic-r1
8
+ tokenizer_source: unsloth/gemma-3-4b-it
9
+
10
+
model-00001-of-00009.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d56586c00d519e098a67c386005defee332badc31c2b4fd7d5da27334965a812
3
+ size 1342182552
model-00002-of-00009.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a8ad26abf0edaa492100101cd902a598943f45d7c0921ceccac8e580fad60119
3
+ size 996268536
model-00003-of-00009.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4b9fdc20e38914ca35dd896bf99141477c10041e8ad3c423f9a5da31731cfd0b
3
+ size 996263328
model-00004-of-00009.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d0e24704f4057d48416121b1680da5c0bba585dac9eb4fac0d4ec9a035e7dcf0
3
+ size 996279696
model-00005-of-00009.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:96fad93cc2c40848978972d9a50058d8100a449c6e087cf4bae662e89301a5bd
3
+ size 975298072
model-00006-of-00009.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1c1e3e18f15cf67baf2d4f58f2a556754e980c70472a2542180c13ae463c40fa
3
+ size 996263312
model-00007-of-00009.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ece2fe20fd972b5dab4bda6c869913bea8aa5e9d3ac600f73a600c9b53b9cc1d
3
+ size 996263288
model-00008-of-00009.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:13e08e6ca51f14e3d91af4e8b3f2dd1febb0200bbd19a70035ed38af12521aad
3
+ size 996336888
model-00009-of-00009.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6e842912315f49f47b42f45b79c246847d4222e58e2745a1d575b1289e719170
3
+ size 304798120
model.safetensors.index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"metadata": {"mergekit_version": "0.1.2"}, "weight_map": {"language_model.model.embed_tokens.weight": "model-00001-of-00009.safetensors", "language_model.model.layers.0.input_layernorm.weight": "model-00002-of-00009.safetensors", "language_model.model.layers.0.mlp.down_proj.weight": "model-00002-of-00009.safetensors", "language_model.model.layers.0.mlp.gate_proj.weight": "model-00002-of-00009.safetensors", "language_model.model.layers.0.mlp.up_proj.weight": "model-00002-of-00009.safetensors", "language_model.model.layers.0.post_attention_layernorm.weight": "model-00002-of-00009.safetensors", "language_model.model.layers.0.post_feedforward_layernorm.weight": "model-00002-of-00009.safetensors", "language_model.model.layers.0.pre_feedforward_layernorm.weight": "model-00002-of-00009.safetensors", "language_model.model.layers.0.self_attn.k_norm.weight": "model-00002-of-00009.safetensors", "language_model.model.layers.0.self_attn.k_proj.weight": "model-00002-of-00009.safetensors", "language_model.model.layers.0.self_attn.o_proj.weight": "model-00002-of-00009.safetensors", "language_model.model.layers.0.self_attn.q_norm.weight": "model-00002-of-00009.safetensors", "language_model.model.layers.0.self_attn.q_proj.weight": "model-00002-of-00009.safetensors", "language_model.model.layers.0.self_attn.v_proj.weight": "model-00002-of-00009.safetensors", "language_model.model.layers.1.input_layernorm.weight": "model-00002-of-00009.safetensors", "language_model.model.layers.1.mlp.down_proj.weight": "model-00002-of-00009.safetensors", "language_model.model.layers.1.mlp.gate_proj.weight": "model-00002-of-00009.safetensors", "language_model.model.layers.1.mlp.up_proj.weight": "model-00002-of-00009.safetensors", "language_model.model.layers.1.post_attention_layernorm.weight": "model-00002-of-00009.safetensors", "language_model.model.layers.1.post_feedforward_layernorm.weight": "model-00002-of-00009.safetensors", "language_model.model.layers.1.pre_feedforward_layernorm.weight": "model-00002-of-00009.safetensors", "language_model.model.layers.1.self_attn.k_norm.weight": "model-00002-of-00009.safetensors", "language_model.model.layers.1.self_attn.k_proj.weight": "model-00002-of-00009.safetensors", "language_model.model.layers.1.self_attn.o_proj.weight": "model-00002-of-00009.safetensors", "language_model.model.layers.1.self_attn.q_norm.weight": "model-00002-of-00009.safetensors", "language_model.model.layers.1.self_attn.q_proj.weight": "model-00002-of-00009.safetensors", "language_model.model.layers.1.self_attn.v_proj.weight": "model-00002-of-00009.safetensors", "language_model.model.layers.10.input_layernorm.weight": "model-00002-of-00009.safetensors", "language_model.model.layers.10.mlp.down_proj.weight": "model-00002-of-00009.safetensors", "language_model.model.layers.10.mlp.gate_proj.weight": "model-00002-of-00009.safetensors", "language_model.model.layers.10.mlp.up_proj.weight": "model-00002-of-00009.safetensors", "language_model.model.layers.10.post_attention_layernorm.weight": "model-00002-of-00009.safetensors", "language_model.model.layers.10.post_feedforward_layernorm.weight": "model-00002-of-00009.safetensors", "language_model.model.layers.10.pre_feedforward_layernorm.weight": "model-00002-of-00009.safetensors", "language_model.model.layers.10.self_attn.k_norm.weight": "model-00002-of-00009.safetensors", "language_model.model.layers.10.self_attn.k_proj.weight": "model-00002-of-00009.safetensors", "language_model.model.layers.10.self_attn.o_proj.weight": "model-00002-of-00009.safetensors", "language_model.model.layers.10.self_attn.q_norm.weight": "model-00002-of-00009.safetensors", "language_model.model.layers.10.self_attn.q_proj.weight": "model-00002-of-00009.safetensors", "language_model.model.layers.10.self_attn.v_proj.weight": "model-00002-of-00009.safetensors", "language_model.model.layers.11.input_layernorm.weight": "model-00002-of-00009.safetensors", "language_model.model.layers.11.mlp.down_proj.weight": "model-00002-of-00009.safetensors", "language_model.model.layers.11.mlp.gate_proj.weight": "model-00002-of-00009.safetensors", "language_model.model.layers.11.mlp.up_proj.weight": "model-00002-of-00009.safetensors", "language_model.model.layers.11.post_attention_layernorm.weight": "model-00002-of-00009.safetensors", "language_model.model.layers.11.post_feedforward_layernorm.weight": "model-00002-of-00009.safetensors", "language_model.model.layers.11.pre_feedforward_layernorm.weight": "model-00002-of-00009.safetensors", "language_model.model.layers.11.self_attn.k_norm.weight": "model-00002-of-00009.safetensors", "language_model.model.layers.11.self_attn.k_proj.weight": "model-00002-of-00009.safetensors", "language_model.model.layers.11.self_attn.o_proj.weight": "model-00002-of-00009.safetensors", "language_model.model.layers.11.self_attn.q_norm.weight": "model-00002-of-00009.safetensors", "language_model.model.layers.11.self_attn.q_proj.weight": "model-00002-of-00009.safetensors", "language_model.model.layers.11.self_attn.v_proj.weight": "model-00002-of-00009.safetensors", "language_model.model.layers.12.input_layernorm.weight": "model-00002-of-00009.safetensors", "language_model.model.layers.12.mlp.down_proj.weight": "model-00002-of-00009.safetensors", "language_model.model.layers.12.mlp.gate_proj.weight": "model-00002-of-00009.safetensors", "language_model.model.layers.12.mlp.up_proj.weight": "model-00002-of-00009.safetensors", "language_model.model.layers.12.post_attention_layernorm.weight": "model-00002-of-00009.safetensors", "language_model.model.layers.12.post_feedforward_layernorm.weight": "model-00002-of-00009.safetensors", "language_model.model.layers.12.pre_feedforward_layernorm.weight": "model-00002-of-00009.safetensors", "language_model.model.layers.12.self_attn.k_norm.weight": "model-00002-of-00009.safetensors", "language_model.model.layers.12.self_attn.k_proj.weight": "model-00002-of-00009.safetensors", "language_model.model.layers.12.self_attn.o_proj.weight": "model-00002-of-00009.safetensors", "language_model.model.layers.12.self_attn.q_norm.weight": "model-00002-of-00009.safetensors", "language_model.model.layers.12.self_attn.q_proj.weight": "model-00002-of-00009.safetensors", "language_model.model.layers.12.self_attn.v_proj.weight": "model-00002-of-00009.safetensors", "language_model.model.layers.13.input_layernorm.weight": "model-00002-of-00009.safetensors", "language_model.model.layers.13.mlp.down_proj.weight": "model-00002-of-00009.safetensors", "language_model.model.layers.13.mlp.gate_proj.weight": "model-00003-of-00009.safetensors", "language_model.model.layers.13.mlp.up_proj.weight": "model-00003-of-00009.safetensors", "language_model.model.layers.13.post_attention_layernorm.weight": "model-00003-of-00009.safetensors", "language_model.model.layers.13.post_feedforward_layernorm.weight": "model-00003-of-00009.safetensors", "language_model.model.layers.13.pre_feedforward_layernorm.weight": "model-00003-of-00009.safetensors", "language_model.model.layers.13.self_attn.k_norm.weight": "model-00003-of-00009.safetensors", "language_model.model.layers.13.self_attn.k_proj.weight": "model-00003-of-00009.safetensors", "language_model.model.layers.13.self_attn.o_proj.weight": "model-00003-of-00009.safetensors", "language_model.model.layers.13.self_attn.q_norm.weight": "model-00003-of-00009.safetensors", "language_model.model.layers.13.self_attn.q_proj.weight": "model-00003-of-00009.safetensors", "language_model.model.layers.13.self_attn.v_proj.weight": "model-00003-of-00009.safetensors", "language_model.model.layers.14.input_layernorm.weight": "model-00003-of-00009.safetensors", "language_model.model.layers.14.mlp.down_proj.weight": "model-00003-of-00009.safetensors", "language_model.model.layers.14.mlp.gate_proj.weight": "model-00003-of-00009.safetensors", "language_model.model.layers.14.mlp.up_proj.weight": "model-00003-of-00009.safetensors", "language_model.model.layers.14.post_attention_layernorm.weight": "model-00003-of-00009.safetensors", "language_model.model.layers.14.post_feedforward_layernorm.weight": "model-00003-of-00009.safetensors", "language_model.model.layers.14.pre_feedforward_layernorm.weight": "model-00003-of-00009.safetensors", "language_model.model.layers.14.self_attn.k_norm.weight": "model-00003-of-00009.safetensors", "language_model.model.layers.14.self_attn.k_proj.weight": "model-00003-of-00009.safetensors", "language_model.model.layers.14.self_attn.o_proj.weight": "model-00003-of-00009.safetensors", "language_model.model.layers.14.self_attn.q_norm.weight": "model-00003-of-00009.safetensors", "language_model.model.layers.14.self_attn.q_proj.weight": "model-00003-of-00009.safetensors", "language_model.model.layers.14.self_attn.v_proj.weight": "model-00003-of-00009.safetensors", "language_model.model.layers.15.input_layernorm.weight": "model-00003-of-00009.safetensors", "language_model.model.layers.15.mlp.down_proj.weight": "model-00003-of-00009.safetensors", "language_model.model.layers.15.mlp.gate_proj.weight": "model-00003-of-00009.safetensors", "language_model.model.layers.15.mlp.up_proj.weight": "model-00003-of-00009.safetensors", "language_model.model.layers.15.post_attention_layernorm.weight": "model-00003-of-00009.safetensors", "language_model.model.layers.15.post_feedforward_layernorm.weight": "model-00003-of-00009.safetensors", "language_model.model.layers.15.pre_feedforward_layernorm.weight": "model-00003-of-00009.safetensors", "language_model.model.layers.15.self_attn.k_norm.weight": "model-00003-of-00009.safetensors", "language_model.model.layers.15.self_attn.k_proj.weight": "model-00003-of-00009.safetensors", "language_model.model.layers.15.self_attn.o_proj.weight": "model-00003-of-00009.safetensors", "language_model.model.layers.15.self_attn.q_norm.weight": "model-00003-of-00009.safetensors", "language_model.model.layers.15.self_attn.q_proj.weight": "model-00003-of-00009.safetensors", "language_model.model.layers.15.self_attn.v_proj.weight": "model-00003-of-00009.safetensors", "language_model.model.layers.16.input_layernorm.weight": "model-00003-of-00009.safetensors", "language_model.model.layers.16.mlp.down_proj.weight": "model-00003-of-00009.safetensors", "language_model.model.layers.16.mlp.gate_proj.weight": "model-00003-of-00009.safetensors", "language_model.model.layers.16.mlp.up_proj.weight": "model-00003-of-00009.safetensors", "language_model.model.layers.16.post_attention_layernorm.weight": "model-00003-of-00009.safetensors", "language_model.model.layers.16.post_feedforward_layernorm.weight": "model-00003-of-00009.safetensors", "language_model.model.layers.16.pre_feedforward_layernorm.weight": "model-00003-of-00009.safetensors", "language_model.model.layers.16.self_attn.k_norm.weight": "model-00003-of-00009.safetensors", "language_model.model.layers.16.self_attn.k_proj.weight": "model-00003-of-00009.safetensors", "language_model.model.layers.16.self_attn.o_proj.weight": "model-00003-of-00009.safetensors", "language_model.model.layers.16.self_attn.q_norm.weight": "model-00003-of-00009.safetensors", "language_model.model.layers.16.self_attn.q_proj.weight": "model-00003-of-00009.safetensors", "language_model.model.layers.16.self_attn.v_proj.weight": "model-00003-of-00009.safetensors", "language_model.model.layers.17.input_layernorm.weight": "model-00003-of-00009.safetensors", "language_model.model.layers.17.mlp.down_proj.weight": "model-00003-of-00009.safetensors", "language_model.model.layers.17.mlp.gate_proj.weight": "model-00003-of-00009.safetensors", "language_model.model.layers.17.mlp.up_proj.weight": "model-00003-of-00009.safetensors", "language_model.model.layers.17.post_attention_layernorm.weight": "model-00003-of-00009.safetensors", "language_model.model.layers.17.post_feedforward_layernorm.weight": "model-00003-of-00009.safetensors", "language_model.model.layers.17.pre_feedforward_layernorm.weight": "model-00003-of-00009.safetensors", "language_model.model.layers.17.self_attn.k_norm.weight": "model-00003-of-00009.safetensors", "language_model.model.layers.17.self_attn.k_proj.weight": "model-00003-of-00009.safetensors", "language_model.model.layers.17.self_attn.o_proj.weight": "model-00003-of-00009.safetensors", "language_model.model.layers.17.self_attn.q_norm.weight": "model-00003-of-00009.safetensors", "language_model.model.layers.17.self_attn.q_proj.weight": "model-00003-of-00009.safetensors", "language_model.model.layers.17.self_attn.v_proj.weight": "model-00003-of-00009.safetensors", "language_model.model.layers.18.input_layernorm.weight": "model-00003-of-00009.safetensors", "language_model.model.layers.18.mlp.down_proj.weight": "model-00003-of-00009.safetensors", "language_model.model.layers.18.mlp.gate_proj.weight": "model-00003-of-00009.safetensors", "language_model.model.layers.18.mlp.up_proj.weight": "model-00004-of-00009.safetensors", "language_model.model.layers.18.post_attention_layernorm.weight": "model-00004-of-00009.safetensors", "language_model.model.layers.18.post_feedforward_layernorm.weight": "model-00004-of-00009.safetensors", "language_model.model.layers.18.pre_feedforward_layernorm.weight": "model-00004-of-00009.safetensors", "language_model.model.layers.18.self_attn.k_norm.weight": "model-00004-of-00009.safetensors", "language_model.model.layers.18.self_attn.k_proj.weight": "model-00004-of-00009.safetensors", "language_model.model.layers.18.self_attn.o_proj.weight": "model-00004-of-00009.safetensors", "language_model.model.layers.18.self_attn.q_norm.weight": "model-00004-of-00009.safetensors", "language_model.model.layers.18.self_attn.q_proj.weight": "model-00004-of-00009.safetensors", "language_model.model.layers.18.self_attn.v_proj.weight": "model-00004-of-00009.safetensors", "language_model.model.layers.19.input_layernorm.weight": "model-00004-of-00009.safetensors", "language_model.model.layers.19.mlp.down_proj.weight": "model-00004-of-00009.safetensors", "language_model.model.layers.19.mlp.gate_proj.weight": "model-00004-of-00009.safetensors", "language_model.model.layers.19.mlp.up_proj.weight": "model-00004-of-00009.safetensors", "language_model.model.layers.19.post_attention_layernorm.weight": "model-00004-of-00009.safetensors", "language_model.model.layers.19.post_feedforward_layernorm.weight": "model-00004-of-00009.safetensors", "language_model.model.layers.19.pre_feedforward_layernorm.weight": "model-00004-of-00009.safetensors", "language_model.model.layers.19.self_attn.k_norm.weight": "model-00004-of-00009.safetensors", "language_model.model.layers.19.self_attn.k_proj.weight": "model-00004-of-00009.safetensors", "language_model.model.layers.19.self_attn.o_proj.weight": "model-00004-of-00009.safetensors", "language_model.model.layers.19.self_attn.q_norm.weight": "model-00004-of-00009.safetensors", "language_model.model.layers.19.self_attn.q_proj.weight": "model-00004-of-00009.safetensors", "language_model.model.layers.19.self_attn.v_proj.weight": "model-00004-of-00009.safetensors", "language_model.model.layers.2.input_layernorm.weight": "model-00004-of-00009.safetensors", "language_model.model.layers.2.mlp.down_proj.weight": "model-00004-of-00009.safetensors", "language_model.model.layers.2.mlp.gate_proj.weight": "model-00004-of-00009.safetensors", "language_model.model.layers.2.mlp.up_proj.weight": "model-00004-of-00009.safetensors", "language_model.model.layers.2.post_attention_layernorm.weight": "model-00004-of-00009.safetensors", "language_model.model.layers.2.post_feedforward_layernorm.weight": "model-00004-of-00009.safetensors", "language_model.model.layers.2.pre_feedforward_layernorm.weight": "model-00004-of-00009.safetensors", "language_model.model.layers.2.self_attn.k_norm.weight": "model-00004-of-00009.safetensors", "language_model.model.layers.2.self_attn.k_proj.weight": "model-00004-of-00009.safetensors", "language_model.model.layers.2.self_attn.o_proj.weight": "model-00004-of-00009.safetensors", "language_model.model.layers.2.self_attn.q_norm.weight": "model-00004-of-00009.safetensors", "language_model.model.layers.2.self_attn.q_proj.weight": "model-00004-of-00009.safetensors", "language_model.model.layers.2.self_attn.v_proj.weight": "model-00004-of-00009.safetensors", "language_model.model.layers.20.input_layernorm.weight": "model-00004-of-00009.safetensors", "language_model.model.layers.20.mlp.down_proj.weight": "model-00004-of-00009.safetensors", "language_model.model.layers.20.mlp.gate_proj.weight": "model-00004-of-00009.safetensors", "language_model.model.layers.20.mlp.up_proj.weight": "model-00004-of-00009.safetensors", "language_model.model.layers.20.post_attention_layernorm.weight": "model-00004-of-00009.safetensors", "language_model.model.layers.20.post_feedforward_layernorm.weight": "model-00004-of-00009.safetensors", "language_model.model.layers.20.pre_feedforward_layernorm.weight": "model-00004-of-00009.safetensors", "language_model.model.layers.20.self_attn.k_norm.weight": "model-00004-of-00009.safetensors", "language_model.model.layers.20.self_attn.k_proj.weight": "model-00004-of-00009.safetensors", "language_model.model.layers.20.self_attn.o_proj.weight": "model-00004-of-00009.safetensors", "language_model.model.layers.20.self_attn.q_norm.weight": "model-00004-of-00009.safetensors", "language_model.model.layers.20.self_attn.q_proj.weight": "model-00004-of-00009.safetensors", "language_model.model.layers.20.self_attn.v_proj.weight": "model-00004-of-00009.safetensors", "language_model.model.layers.21.input_layernorm.weight": "model-00004-of-00009.safetensors", "language_model.model.layers.21.mlp.down_proj.weight": "model-00004-of-00009.safetensors", "language_model.model.layers.21.mlp.gate_proj.weight": "model-00004-of-00009.safetensors", "language_model.model.layers.21.mlp.up_proj.weight": "model-00004-of-00009.safetensors", "language_model.model.layers.21.post_attention_layernorm.weight": "model-00004-of-00009.safetensors", "language_model.model.layers.21.post_feedforward_layernorm.weight": "model-00004-of-00009.safetensors", "language_model.model.layers.21.pre_feedforward_layernorm.weight": "model-00004-of-00009.safetensors", "language_model.model.layers.21.self_attn.k_norm.weight": "model-00004-of-00009.safetensors", "language_model.model.layers.21.self_attn.k_proj.weight": "model-00004-of-00009.safetensors", "language_model.model.layers.21.self_attn.o_proj.weight": "model-00004-of-00009.safetensors", "language_model.model.layers.21.self_attn.q_norm.weight": "model-00004-of-00009.safetensors", "language_model.model.layers.21.self_attn.q_proj.weight": "model-00004-of-00009.safetensors", "language_model.model.layers.21.self_attn.v_proj.weight": "model-00004-of-00009.safetensors", "language_model.model.layers.22.input_layernorm.weight": "model-00004-of-00009.safetensors", "language_model.model.layers.22.mlp.down_proj.weight": "model-00004-of-00009.safetensors", "language_model.model.layers.22.mlp.gate_proj.weight": "model-00004-of-00009.safetensors", "language_model.model.layers.22.mlp.up_proj.weight": "model-00004-of-00009.safetensors", "language_model.model.layers.22.post_attention_layernorm.weight": "model-00004-of-00009.safetensors", "language_model.model.layers.22.post_feedforward_layernorm.weight": "model-00004-of-00009.safetensors", "language_model.model.layers.22.pre_feedforward_layernorm.weight": "model-00004-of-00009.safetensors", "language_model.model.layers.22.self_attn.k_norm.weight": "model-00004-of-00009.safetensors", "language_model.model.layers.22.self_attn.k_proj.weight": "model-00005-of-00009.safetensors", "language_model.model.layers.22.self_attn.o_proj.weight": "model-00005-of-00009.safetensors", "language_model.model.layers.22.self_attn.q_norm.weight": "model-00005-of-00009.safetensors", "language_model.model.layers.22.self_attn.q_proj.weight": "model-00005-of-00009.safetensors", "language_model.model.layers.22.self_attn.v_proj.weight": "model-00005-of-00009.safetensors", "language_model.model.layers.23.input_layernorm.weight": "model-00005-of-00009.safetensors", "language_model.model.layers.23.mlp.down_proj.weight": "model-00005-of-00009.safetensors", "language_model.model.layers.23.mlp.gate_proj.weight": "model-00005-of-00009.safetensors", "language_model.model.layers.23.mlp.up_proj.weight": "model-00005-of-00009.safetensors", "language_model.model.layers.23.post_attention_layernorm.weight": "model-00005-of-00009.safetensors", "language_model.model.layers.23.post_feedforward_layernorm.weight": "model-00005-of-00009.safetensors", "language_model.model.layers.23.pre_feedforward_layernorm.weight": "model-00005-of-00009.safetensors", "language_model.model.layers.23.self_attn.k_norm.weight": "model-00005-of-00009.safetensors", "language_model.model.layers.23.self_attn.k_proj.weight": "model-00005-of-00009.safetensors", "language_model.model.layers.23.self_attn.o_proj.weight": "model-00005-of-00009.safetensors", "language_model.model.layers.23.self_attn.q_norm.weight": "model-00005-of-00009.safetensors", "language_model.model.layers.23.self_attn.q_proj.weight": "model-00005-of-00009.safetensors", "language_model.model.layers.23.self_attn.v_proj.weight": "model-00005-of-00009.safetensors", "language_model.model.layers.24.input_layernorm.weight": "model-00005-of-00009.safetensors", "language_model.model.layers.24.mlp.down_proj.weight": "model-00005-of-00009.safetensors", "language_model.model.layers.24.mlp.gate_proj.weight": "model-00005-of-00009.safetensors", "language_model.model.layers.24.mlp.up_proj.weight": "model-00005-of-00009.safetensors", "language_model.model.layers.24.post_attention_layernorm.weight": "model-00005-of-00009.safetensors", "language_model.model.layers.24.post_feedforward_layernorm.weight": "model-00005-of-00009.safetensors", "language_model.model.layers.24.pre_feedforward_layernorm.weight": "model-00005-of-00009.safetensors", "language_model.model.layers.24.self_attn.k_norm.weight": "model-00005-of-00009.safetensors", "language_model.model.layers.24.self_attn.k_proj.weight": "model-00005-of-00009.safetensors", "language_model.model.layers.24.self_attn.o_proj.weight": "model-00005-of-00009.safetensors", "language_model.model.layers.24.self_attn.q_norm.weight": "model-00005-of-00009.safetensors", "language_model.model.layers.24.self_attn.q_proj.weight": "model-00005-of-00009.safetensors", "language_model.model.layers.24.self_attn.v_proj.weight": "model-00005-of-00009.safetensors", "language_model.model.layers.25.input_layernorm.weight": "model-00005-of-00009.safetensors", "language_model.model.layers.25.mlp.down_proj.weight": "model-00005-of-00009.safetensors", "language_model.model.layers.25.mlp.gate_proj.weight": "model-00005-of-00009.safetensors", "language_model.model.layers.25.mlp.up_proj.weight": "model-00005-of-00009.safetensors", "language_model.model.layers.25.post_attention_layernorm.weight": "model-00005-of-00009.safetensors", "language_model.model.layers.25.post_feedforward_layernorm.weight": "model-00005-of-00009.safetensors", "language_model.model.layers.25.pre_feedforward_layernorm.weight": "model-00005-of-00009.safetensors", "language_model.model.layers.25.self_attn.k_norm.weight": "model-00005-of-00009.safetensors", "language_model.model.layers.25.self_attn.k_proj.weight": "model-00005-of-00009.safetensors", "language_model.model.layers.25.self_attn.o_proj.weight": "model-00005-of-00009.safetensors", "language_model.model.layers.25.self_attn.q_norm.weight": "model-00005-of-00009.safetensors", "language_model.model.layers.25.self_attn.q_proj.weight": "model-00005-of-00009.safetensors", "language_model.model.layers.25.self_attn.v_proj.weight": "model-00005-of-00009.safetensors", "language_model.model.layers.26.input_layernorm.weight": "model-00005-of-00009.safetensors", "language_model.model.layers.26.mlp.down_proj.weight": "model-00005-of-00009.safetensors", "language_model.model.layers.26.mlp.gate_proj.weight": "model-00005-of-00009.safetensors", "language_model.model.layers.26.mlp.up_proj.weight": "model-00005-of-00009.safetensors", "language_model.model.layers.26.post_attention_layernorm.weight": "model-00005-of-00009.safetensors", "language_model.model.layers.26.post_feedforward_layernorm.weight": "model-00005-of-00009.safetensors", "language_model.model.layers.26.pre_feedforward_layernorm.weight": "model-00005-of-00009.safetensors", "language_model.model.layers.26.self_attn.k_norm.weight": "model-00005-of-00009.safetensors", "language_model.model.layers.26.self_attn.k_proj.weight": "model-00005-of-00009.safetensors", "language_model.model.layers.26.self_attn.o_proj.weight": "model-00005-of-00009.safetensors", "language_model.model.layers.26.self_attn.q_norm.weight": "model-00005-of-00009.safetensors", "language_model.model.layers.26.self_attn.q_proj.weight": "model-00005-of-00009.safetensors", "language_model.model.layers.26.self_attn.v_proj.weight": "model-00005-of-00009.safetensors", "language_model.model.layers.27.input_layernorm.weight": "model-00005-of-00009.safetensors", "language_model.model.layers.27.mlp.down_proj.weight": "model-00005-of-00009.safetensors", "language_model.model.layers.27.mlp.gate_proj.weight": "model-00005-of-00009.safetensors", "language_model.model.layers.27.mlp.up_proj.weight": "model-00005-of-00009.safetensors", "language_model.model.layers.27.post_attention_layernorm.weight": "model-00005-of-00009.safetensors", "language_model.model.layers.27.post_feedforward_layernorm.weight": "model-00005-of-00009.safetensors", "language_model.model.layers.27.pre_feedforward_layernorm.weight": "model-00005-of-00009.safetensors", "language_model.model.layers.27.self_attn.k_norm.weight": "model-00005-of-00009.safetensors", "language_model.model.layers.27.self_attn.k_proj.weight": "model-00005-of-00009.safetensors", "language_model.model.layers.27.self_attn.o_proj.weight": "model-00005-of-00009.safetensors", "language_model.model.layers.27.self_attn.q_norm.weight": "model-00005-of-00009.safetensors", "language_model.model.layers.27.self_attn.q_proj.weight": "model-00005-of-00009.safetensors", "language_model.model.layers.27.self_attn.v_proj.weight": "model-00005-of-00009.safetensors", "language_model.model.layers.28.input_layernorm.weight": "model-00005-of-00009.safetensors", "language_model.model.layers.28.mlp.down_proj.weight": "model-00006-of-00009.safetensors", "language_model.model.layers.28.mlp.gate_proj.weight": "model-00006-of-00009.safetensors", "language_model.model.layers.28.mlp.up_proj.weight": "model-00006-of-00009.safetensors", "language_model.model.layers.28.post_attention_layernorm.weight": "model-00006-of-00009.safetensors", "language_model.model.layers.28.post_feedforward_layernorm.weight": "model-00006-of-00009.safetensors", "language_model.model.layers.28.pre_feedforward_layernorm.weight": "model-00006-of-00009.safetensors", "language_model.model.layers.28.self_attn.k_norm.weight": "model-00006-of-00009.safetensors", "language_model.model.layers.28.self_attn.k_proj.weight": "model-00006-of-00009.safetensors", "language_model.model.layers.28.self_attn.o_proj.weight": "model-00006-of-00009.safetensors", "language_model.model.layers.28.self_attn.q_norm.weight": "model-00006-of-00009.safetensors", "language_model.model.layers.28.self_attn.q_proj.weight": "model-00006-of-00009.safetensors", "language_model.model.layers.28.self_attn.v_proj.weight": "model-00006-of-00009.safetensors", "language_model.model.layers.29.input_layernorm.weight": "model-00006-of-00009.safetensors", "language_model.model.layers.29.mlp.down_proj.weight": "model-00006-of-00009.safetensors", "language_model.model.layers.29.mlp.gate_proj.weight": "model-00006-of-00009.safetensors", "language_model.model.layers.29.mlp.up_proj.weight": "model-00006-of-00009.safetensors", "language_model.model.layers.29.post_attention_layernorm.weight": "model-00006-of-00009.safetensors", "language_model.model.layers.29.post_feedforward_layernorm.weight": "model-00006-of-00009.safetensors", "language_model.model.layers.29.pre_feedforward_layernorm.weight": "model-00006-of-00009.safetensors", "language_model.model.layers.29.self_attn.k_norm.weight": "model-00006-of-00009.safetensors", "language_model.model.layers.29.self_attn.k_proj.weight": "model-00006-of-00009.safetensors", "language_model.model.layers.29.self_attn.o_proj.weight": "model-00006-of-00009.safetensors", "language_model.model.layers.29.self_attn.q_norm.weight": "model-00006-of-00009.safetensors", "language_model.model.layers.29.self_attn.q_proj.weight": "model-00006-of-00009.safetensors", "language_model.model.layers.29.self_attn.v_proj.weight": "model-00006-of-00009.safetensors", "language_model.model.layers.3.input_layernorm.weight": "model-00006-of-00009.safetensors", "language_model.model.layers.3.mlp.down_proj.weight": "model-00006-of-00009.safetensors", "language_model.model.layers.3.mlp.gate_proj.weight": "model-00006-of-00009.safetensors", "language_model.model.layers.3.mlp.up_proj.weight": "model-00006-of-00009.safetensors", "language_model.model.layers.3.post_attention_layernorm.weight": "model-00006-of-00009.safetensors", "language_model.model.layers.3.post_feedforward_layernorm.weight": "model-00006-of-00009.safetensors", "language_model.model.layers.3.pre_feedforward_layernorm.weight": "model-00006-of-00009.safetensors", "language_model.model.layers.3.self_attn.k_norm.weight": "model-00006-of-00009.safetensors", "language_model.model.layers.3.self_attn.k_proj.weight": "model-00006-of-00009.safetensors", "language_model.model.layers.3.self_attn.o_proj.weight": "model-00006-of-00009.safetensors", "language_model.model.layers.3.self_attn.q_norm.weight": "model-00006-of-00009.safetensors", "language_model.model.layers.3.self_attn.q_proj.weight": "model-00006-of-00009.safetensors", "language_model.model.layers.3.self_attn.v_proj.weight": "model-00006-of-00009.safetensors", "language_model.model.layers.30.input_layernorm.weight": "model-00006-of-00009.safetensors", "language_model.model.layers.30.mlp.down_proj.weight": "model-00006-of-00009.safetensors", "language_model.model.layers.30.mlp.gate_proj.weight": "model-00006-of-00009.safetensors", "language_model.model.layers.30.mlp.up_proj.weight": "model-00006-of-00009.safetensors", "language_model.model.layers.30.post_attention_layernorm.weight": "model-00006-of-00009.safetensors", "language_model.model.layers.30.post_feedforward_layernorm.weight": "model-00006-of-00009.safetensors", "language_model.model.layers.30.pre_feedforward_layernorm.weight": "model-00006-of-00009.safetensors", "language_model.model.layers.30.self_attn.k_norm.weight": "model-00006-of-00009.safetensors", "language_model.model.layers.30.self_attn.k_proj.weight": "model-00006-of-00009.safetensors", "language_model.model.layers.30.self_attn.o_proj.weight": "model-00006-of-00009.safetensors", "language_model.model.layers.30.self_attn.q_norm.weight": "model-00006-of-00009.safetensors", "language_model.model.layers.30.self_attn.q_proj.weight": "model-00006-of-00009.safetensors", "language_model.model.layers.30.self_attn.v_proj.weight": "model-00006-of-00009.safetensors", "language_model.model.layers.31.input_layernorm.weight": "model-00006-of-00009.safetensors", "language_model.model.layers.31.mlp.down_proj.weight": "model-00006-of-00009.safetensors", "language_model.model.layers.31.mlp.gate_proj.weight": "model-00006-of-00009.safetensors", "language_model.model.layers.31.mlp.up_proj.weight": "model-00006-of-00009.safetensors", "language_model.model.layers.31.post_attention_layernorm.weight": "model-00006-of-00009.safetensors", "language_model.model.layers.31.post_feedforward_layernorm.weight": "model-00006-of-00009.safetensors", "language_model.model.layers.31.pre_feedforward_layernorm.weight": "model-00006-of-00009.safetensors", "language_model.model.layers.31.self_attn.k_norm.weight": "model-00006-of-00009.safetensors", "language_model.model.layers.31.self_attn.k_proj.weight": "model-00006-of-00009.safetensors", "language_model.model.layers.31.self_attn.o_proj.weight": "model-00006-of-00009.safetensors", "language_model.model.layers.31.self_attn.q_norm.weight": "model-00006-of-00009.safetensors", "language_model.model.layers.31.self_attn.q_proj.weight": "model-00006-of-00009.safetensors", "language_model.model.layers.31.self_attn.v_proj.weight": "model-00006-of-00009.safetensors", "language_model.model.layers.32.input_layernorm.weight": "model-00006-of-00009.safetensors", "language_model.model.layers.32.mlp.down_proj.weight": "model-00006-of-00009.safetensors", "language_model.model.layers.32.mlp.gate_proj.weight": "model-00007-of-00009.safetensors", "language_model.model.layers.32.mlp.up_proj.weight": "model-00007-of-00009.safetensors", "language_model.model.layers.32.post_attention_layernorm.weight": "model-00007-of-00009.safetensors", "language_model.model.layers.32.post_feedforward_layernorm.weight": "model-00007-of-00009.safetensors", "language_model.model.layers.32.pre_feedforward_layernorm.weight": "model-00007-of-00009.safetensors", "language_model.model.layers.32.self_attn.k_norm.weight": "model-00007-of-00009.safetensors", "language_model.model.layers.32.self_attn.k_proj.weight": "model-00007-of-00009.safetensors", "language_model.model.layers.32.self_attn.o_proj.weight": "model-00007-of-00009.safetensors", "language_model.model.layers.32.self_attn.q_norm.weight": "model-00007-of-00009.safetensors", "language_model.model.layers.32.self_attn.q_proj.weight": "model-00007-of-00009.safetensors", "language_model.model.layers.32.self_attn.v_proj.weight": "model-00007-of-00009.safetensors", "language_model.model.layers.33.input_layernorm.weight": "model-00007-of-00009.safetensors", "language_model.model.layers.33.mlp.down_proj.weight": "model-00007-of-00009.safetensors", "language_model.model.layers.33.mlp.gate_proj.weight": "model-00007-of-00009.safetensors", "language_model.model.layers.33.mlp.up_proj.weight": "model-00007-of-00009.safetensors", "language_model.model.layers.33.post_attention_layernorm.weight": "model-00007-of-00009.safetensors", "language_model.model.layers.33.post_feedforward_layernorm.weight": "model-00007-of-00009.safetensors", "language_model.model.layers.33.pre_feedforward_layernorm.weight": "model-00007-of-00009.safetensors", "language_model.model.layers.33.self_attn.k_norm.weight": "model-00007-of-00009.safetensors", "language_model.model.layers.33.self_attn.k_proj.weight": "model-00007-of-00009.safetensors", "language_model.model.layers.33.self_attn.o_proj.weight": "model-00007-of-00009.safetensors", "language_model.model.layers.33.self_attn.q_norm.weight": "model-00007-of-00009.safetensors", "language_model.model.layers.33.self_attn.q_proj.weight": "model-00007-of-00009.safetensors", "language_model.model.layers.33.self_attn.v_proj.weight": "model-00007-of-00009.safetensors", "language_model.model.layers.4.input_layernorm.weight": "model-00007-of-00009.safetensors", "language_model.model.layers.4.mlp.down_proj.weight": "model-00007-of-00009.safetensors", "language_model.model.layers.4.mlp.gate_proj.weight": "model-00007-of-00009.safetensors", "language_model.model.layers.4.mlp.up_proj.weight": "model-00007-of-00009.safetensors", "language_model.model.layers.4.post_attention_layernorm.weight": "model-00007-of-00009.safetensors", "language_model.model.layers.4.post_feedforward_layernorm.weight": "model-00007-of-00009.safetensors", "language_model.model.layers.4.pre_feedforward_layernorm.weight": "model-00007-of-00009.safetensors", "language_model.model.layers.4.self_attn.k_norm.weight": "model-00007-of-00009.safetensors", "language_model.model.layers.4.self_attn.k_proj.weight": "model-00007-of-00009.safetensors", "language_model.model.layers.4.self_attn.o_proj.weight": "model-00007-of-00009.safetensors", "language_model.model.layers.4.self_attn.q_norm.weight": "model-00007-of-00009.safetensors", "language_model.model.layers.4.self_attn.q_proj.weight": "model-00007-of-00009.safetensors", "language_model.model.layers.4.self_attn.v_proj.weight": "model-00007-of-00009.safetensors", "language_model.model.layers.5.input_layernorm.weight": "model-00007-of-00009.safetensors", "language_model.model.layers.5.mlp.down_proj.weight": "model-00007-of-00009.safetensors", "language_model.model.layers.5.mlp.gate_proj.weight": "model-00007-of-00009.safetensors", "language_model.model.layers.5.mlp.up_proj.weight": "model-00007-of-00009.safetensors", "language_model.model.layers.5.post_attention_layernorm.weight": "model-00007-of-00009.safetensors", "language_model.model.layers.5.post_feedforward_layernorm.weight": "model-00007-of-00009.safetensors", "language_model.model.layers.5.pre_feedforward_layernorm.weight": "model-00007-of-00009.safetensors", "language_model.model.layers.5.self_attn.k_norm.weight": "model-00007-of-00009.safetensors", "language_model.model.layers.5.self_attn.k_proj.weight": "model-00007-of-00009.safetensors", "language_model.model.layers.5.self_attn.o_proj.weight": "model-00007-of-00009.safetensors", "language_model.model.layers.5.self_attn.q_norm.weight": "model-00007-of-00009.safetensors", "language_model.model.layers.5.self_attn.q_proj.weight": "model-00007-of-00009.safetensors", "language_model.model.layers.5.self_attn.v_proj.weight": "model-00007-of-00009.safetensors", "language_model.model.layers.6.input_layernorm.weight": "model-00007-of-00009.safetensors", "language_model.model.layers.6.mlp.down_proj.weight": "model-00007-of-00009.safetensors", "language_model.model.layers.6.mlp.gate_proj.weight": "model-00007-of-00009.safetensors", "language_model.model.layers.6.mlp.up_proj.weight": "model-00007-of-00009.safetensors", "language_model.model.layers.6.post_attention_layernorm.weight": "model-00007-of-00009.safetensors", "language_model.model.layers.6.post_feedforward_layernorm.weight": "model-00007-of-00009.safetensors", "language_model.model.layers.6.pre_feedforward_layernorm.weight": "model-00007-of-00009.safetensors", "language_model.model.layers.6.self_attn.k_norm.weight": "model-00007-of-00009.safetensors", "language_model.model.layers.6.self_attn.k_proj.weight": "model-00007-of-00009.safetensors", "language_model.model.layers.6.self_attn.o_proj.weight": "model-00007-of-00009.safetensors", "language_model.model.layers.6.self_attn.q_norm.weight": "model-00007-of-00009.safetensors", "language_model.model.layers.6.self_attn.q_proj.weight": "model-00007-of-00009.safetensors", "language_model.model.layers.6.self_attn.v_proj.weight": "model-00007-of-00009.safetensors", "language_model.model.layers.7.input_layernorm.weight": "model-00007-of-00009.safetensors", "language_model.model.layers.7.mlp.down_proj.weight": "model-00007-of-00009.safetensors", "language_model.model.layers.7.mlp.gate_proj.weight": "model-00007-of-00009.safetensors", "language_model.model.layers.7.mlp.up_proj.weight": "model-00008-of-00009.safetensors", "language_model.model.layers.7.post_attention_layernorm.weight": "model-00008-of-00009.safetensors", "language_model.model.layers.7.post_feedforward_layernorm.weight": "model-00008-of-00009.safetensors", "language_model.model.layers.7.pre_feedforward_layernorm.weight": "model-00008-of-00009.safetensors", "language_model.model.layers.7.self_attn.k_norm.weight": "model-00008-of-00009.safetensors", "language_model.model.layers.7.self_attn.k_proj.weight": "model-00008-of-00009.safetensors", "language_model.model.layers.7.self_attn.o_proj.weight": "model-00008-of-00009.safetensors", "language_model.model.layers.7.self_attn.q_norm.weight": "model-00008-of-00009.safetensors", "language_model.model.layers.7.self_attn.q_proj.weight": "model-00008-of-00009.safetensors", "language_model.model.layers.7.self_attn.v_proj.weight": "model-00008-of-00009.safetensors", "language_model.model.layers.8.input_layernorm.weight": "model-00008-of-00009.safetensors", "language_model.model.layers.8.mlp.down_proj.weight": "model-00008-of-00009.safetensors", "language_model.model.layers.8.mlp.gate_proj.weight": "model-00008-of-00009.safetensors", "language_model.model.layers.8.mlp.up_proj.weight": "model-00008-of-00009.safetensors", "language_model.model.layers.8.post_attention_layernorm.weight": "model-00008-of-00009.safetensors", "language_model.model.layers.8.post_feedforward_layernorm.weight": "model-00008-of-00009.safetensors", "language_model.model.layers.8.pre_feedforward_layernorm.weight": "model-00008-of-00009.safetensors", "language_model.model.layers.8.self_attn.k_norm.weight": "model-00008-of-00009.safetensors", "language_model.model.layers.8.self_attn.k_proj.weight": "model-00008-of-00009.safetensors", "language_model.model.layers.8.self_attn.o_proj.weight": "model-00008-of-00009.safetensors", "language_model.model.layers.8.self_attn.q_norm.weight": "model-00008-of-00009.safetensors", "language_model.model.layers.8.self_attn.q_proj.weight": "model-00008-of-00009.safetensors", "language_model.model.layers.8.self_attn.v_proj.weight": "model-00008-of-00009.safetensors", "language_model.model.layers.9.input_layernorm.weight": "model-00008-of-00009.safetensors", "language_model.model.layers.9.mlp.down_proj.weight": "model-00008-of-00009.safetensors", "language_model.model.layers.9.mlp.gate_proj.weight": "model-00008-of-00009.safetensors", "language_model.model.layers.9.mlp.up_proj.weight": "model-00008-of-00009.safetensors", "language_model.model.layers.9.post_attention_layernorm.weight": "model-00008-of-00009.safetensors", "language_model.model.layers.9.post_feedforward_layernorm.weight": "model-00008-of-00009.safetensors", "language_model.model.layers.9.pre_feedforward_layernorm.weight": "model-00008-of-00009.safetensors", "language_model.model.layers.9.self_attn.k_norm.weight": "model-00008-of-00009.safetensors", "language_model.model.layers.9.self_attn.k_proj.weight": "model-00008-of-00009.safetensors", "language_model.model.layers.9.self_attn.o_proj.weight": "model-00008-of-00009.safetensors", "language_model.model.layers.9.self_attn.q_norm.weight": "model-00008-of-00009.safetensors", "language_model.model.layers.9.self_attn.q_proj.weight": "model-00008-of-00009.safetensors", "language_model.model.layers.9.self_attn.v_proj.weight": "model-00008-of-00009.safetensors", "language_model.model.norm.weight": "model-00008-of-00009.safetensors", "multi_modal_projector.mm_input_projection_weight": "model-00008-of-00009.safetensors", "multi_modal_projector.mm_soft_emb_norm.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.embeddings.patch_embedding.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.embeddings.patch_embedding.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.embeddings.position_embedding.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.0.layer_norm1.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.0.layer_norm1.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.0.layer_norm2.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.0.layer_norm2.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.0.mlp.fc1.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.0.mlp.fc1.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.0.mlp.fc2.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.0.mlp.fc2.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.0.self_attn.k_proj.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.0.self_attn.k_proj.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.0.self_attn.out_proj.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.0.self_attn.out_proj.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.0.self_attn.q_proj.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.0.self_attn.q_proj.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.0.self_attn.v_proj.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.0.self_attn.v_proj.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.1.layer_norm1.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.1.layer_norm1.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.1.layer_norm2.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.1.layer_norm2.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.1.mlp.fc1.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.1.mlp.fc1.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.1.mlp.fc2.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.1.mlp.fc2.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.1.self_attn.k_proj.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.1.self_attn.k_proj.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.1.self_attn.out_proj.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.1.self_attn.out_proj.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.1.self_attn.q_proj.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.1.self_attn.q_proj.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.1.self_attn.v_proj.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.1.self_attn.v_proj.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.10.layer_norm1.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.10.layer_norm1.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.10.layer_norm2.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.10.layer_norm2.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.10.mlp.fc1.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.10.mlp.fc1.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.10.mlp.fc2.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.10.mlp.fc2.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.10.self_attn.k_proj.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.10.self_attn.k_proj.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.10.self_attn.out_proj.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.10.self_attn.out_proj.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.10.self_attn.q_proj.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.10.self_attn.q_proj.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.10.self_attn.v_proj.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.10.self_attn.v_proj.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.11.layer_norm1.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.11.layer_norm1.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.11.layer_norm2.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.11.layer_norm2.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.11.mlp.fc1.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.11.mlp.fc1.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.11.mlp.fc2.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.11.mlp.fc2.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.11.self_attn.k_proj.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.11.self_attn.k_proj.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.11.self_attn.out_proj.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.11.self_attn.out_proj.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.11.self_attn.q_proj.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.11.self_attn.q_proj.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.11.self_attn.v_proj.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.11.self_attn.v_proj.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.12.layer_norm1.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.12.layer_norm1.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.12.layer_norm2.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.12.layer_norm2.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.12.mlp.fc1.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.12.mlp.fc1.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.12.mlp.fc2.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.12.mlp.fc2.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.12.self_attn.k_proj.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.12.self_attn.k_proj.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.12.self_attn.out_proj.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.12.self_attn.out_proj.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.12.self_attn.q_proj.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.12.self_attn.q_proj.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.12.self_attn.v_proj.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.12.self_attn.v_proj.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.13.layer_norm1.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.13.layer_norm1.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.13.layer_norm2.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.13.layer_norm2.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.13.mlp.fc1.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.13.mlp.fc1.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.13.mlp.fc2.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.13.mlp.fc2.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.13.self_attn.k_proj.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.13.self_attn.k_proj.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.13.self_attn.out_proj.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.13.self_attn.out_proj.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.13.self_attn.q_proj.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.13.self_attn.q_proj.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.13.self_attn.v_proj.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.13.self_attn.v_proj.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.14.layer_norm1.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.14.layer_norm1.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.14.layer_norm2.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.14.layer_norm2.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.14.mlp.fc1.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.14.mlp.fc1.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.14.mlp.fc2.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.14.mlp.fc2.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.14.self_attn.k_proj.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.14.self_attn.k_proj.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.14.self_attn.out_proj.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.14.self_attn.out_proj.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.14.self_attn.q_proj.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.14.self_attn.q_proj.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.14.self_attn.v_proj.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.14.self_attn.v_proj.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.15.layer_norm1.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.15.layer_norm1.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.15.layer_norm2.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.15.layer_norm2.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.15.mlp.fc1.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.15.mlp.fc1.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.15.mlp.fc2.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.15.mlp.fc2.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.15.self_attn.k_proj.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.15.self_attn.k_proj.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.15.self_attn.out_proj.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.15.self_attn.out_proj.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.15.self_attn.q_proj.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.15.self_attn.q_proj.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.15.self_attn.v_proj.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.15.self_attn.v_proj.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.16.layer_norm1.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.16.layer_norm1.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.16.layer_norm2.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.16.layer_norm2.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.16.mlp.fc1.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.16.mlp.fc1.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.16.mlp.fc2.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.16.mlp.fc2.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.16.self_attn.k_proj.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.16.self_attn.k_proj.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.16.self_attn.out_proj.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.16.self_attn.out_proj.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.16.self_attn.q_proj.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.16.self_attn.q_proj.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.16.self_attn.v_proj.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.16.self_attn.v_proj.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.17.layer_norm1.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.17.layer_norm1.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.17.layer_norm2.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.17.layer_norm2.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.17.mlp.fc1.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.17.mlp.fc1.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.17.mlp.fc2.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.17.mlp.fc2.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.17.self_attn.k_proj.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.17.self_attn.k_proj.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.17.self_attn.out_proj.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.17.self_attn.out_proj.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.17.self_attn.q_proj.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.17.self_attn.q_proj.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.17.self_attn.v_proj.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.17.self_attn.v_proj.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.18.layer_norm1.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.18.layer_norm1.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.18.layer_norm2.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.18.layer_norm2.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.18.mlp.fc1.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.18.mlp.fc1.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.18.mlp.fc2.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.18.mlp.fc2.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.18.self_attn.k_proj.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.18.self_attn.k_proj.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.18.self_attn.out_proj.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.18.self_attn.out_proj.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.18.self_attn.q_proj.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.18.self_attn.q_proj.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.18.self_attn.v_proj.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.18.self_attn.v_proj.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.19.layer_norm1.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.19.layer_norm1.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.19.layer_norm2.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.19.layer_norm2.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.19.mlp.fc1.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.19.mlp.fc1.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.19.mlp.fc2.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.19.mlp.fc2.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.19.self_attn.k_proj.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.19.self_attn.k_proj.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.19.self_attn.out_proj.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.19.self_attn.out_proj.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.19.self_attn.q_proj.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.19.self_attn.q_proj.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.19.self_attn.v_proj.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.19.self_attn.v_proj.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.2.layer_norm1.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.2.layer_norm1.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.2.layer_norm2.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.2.layer_norm2.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.2.mlp.fc1.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.2.mlp.fc1.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.2.mlp.fc2.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.2.mlp.fc2.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.2.self_attn.k_proj.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.2.self_attn.k_proj.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.2.self_attn.out_proj.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.2.self_attn.out_proj.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.2.self_attn.q_proj.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.2.self_attn.q_proj.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.2.self_attn.v_proj.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.2.self_attn.v_proj.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.20.layer_norm1.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.20.layer_norm1.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.20.layer_norm2.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.20.layer_norm2.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.20.mlp.fc1.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.20.mlp.fc1.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.20.mlp.fc2.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.20.mlp.fc2.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.20.self_attn.k_proj.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.20.self_attn.k_proj.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.20.self_attn.out_proj.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.20.self_attn.out_proj.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.20.self_attn.q_proj.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.20.self_attn.q_proj.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.20.self_attn.v_proj.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.20.self_attn.v_proj.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.21.layer_norm1.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.21.layer_norm1.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.21.layer_norm2.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.21.layer_norm2.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.21.mlp.fc1.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.21.mlp.fc1.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.21.mlp.fc2.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.21.mlp.fc2.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.21.self_attn.k_proj.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.21.self_attn.k_proj.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.21.self_attn.out_proj.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.21.self_attn.out_proj.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.21.self_attn.q_proj.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.21.self_attn.q_proj.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.21.self_attn.v_proj.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.21.self_attn.v_proj.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.22.layer_norm1.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.22.layer_norm1.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.22.layer_norm2.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.22.layer_norm2.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.22.mlp.fc1.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.22.mlp.fc1.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.22.mlp.fc2.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.22.mlp.fc2.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.22.self_attn.k_proj.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.22.self_attn.k_proj.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.22.self_attn.out_proj.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.22.self_attn.out_proj.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.22.self_attn.q_proj.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.22.self_attn.q_proj.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.22.self_attn.v_proj.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.22.self_attn.v_proj.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.23.layer_norm1.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.23.layer_norm1.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.23.layer_norm2.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.23.layer_norm2.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.23.mlp.fc1.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.23.mlp.fc1.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.23.mlp.fc2.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.23.mlp.fc2.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.23.self_attn.k_proj.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.23.self_attn.k_proj.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.23.self_attn.out_proj.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.23.self_attn.out_proj.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.23.self_attn.q_proj.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.23.self_attn.q_proj.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.23.self_attn.v_proj.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.23.self_attn.v_proj.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.24.layer_norm1.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.24.layer_norm1.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.24.layer_norm2.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.24.layer_norm2.weight": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.24.mlp.fc1.bias": "model-00008-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.24.mlp.fc1.weight": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.24.mlp.fc2.bias": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.24.mlp.fc2.weight": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.24.self_attn.k_proj.bias": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.24.self_attn.k_proj.weight": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.24.self_attn.out_proj.bias": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.24.self_attn.out_proj.weight": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.24.self_attn.q_proj.bias": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.24.self_attn.q_proj.weight": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.24.self_attn.v_proj.bias": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.24.self_attn.v_proj.weight": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.25.layer_norm1.bias": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.25.layer_norm1.weight": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.25.layer_norm2.bias": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.25.layer_norm2.weight": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.25.mlp.fc1.bias": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.25.mlp.fc1.weight": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.25.mlp.fc2.bias": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.25.mlp.fc2.weight": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.25.self_attn.k_proj.bias": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.25.self_attn.k_proj.weight": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.25.self_attn.out_proj.bias": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.25.self_attn.out_proj.weight": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.25.self_attn.q_proj.bias": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.25.self_attn.q_proj.weight": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.25.self_attn.v_proj.bias": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.25.self_attn.v_proj.weight": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.26.layer_norm1.bias": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.26.layer_norm1.weight": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.26.layer_norm2.bias": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.26.layer_norm2.weight": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.26.mlp.fc1.bias": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.26.mlp.fc1.weight": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.26.mlp.fc2.bias": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.26.mlp.fc2.weight": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.26.self_attn.k_proj.bias": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.26.self_attn.k_proj.weight": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.26.self_attn.out_proj.bias": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.26.self_attn.out_proj.weight": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.26.self_attn.q_proj.bias": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.26.self_attn.q_proj.weight": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.26.self_attn.v_proj.bias": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.26.self_attn.v_proj.weight": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.3.layer_norm1.bias": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.3.layer_norm1.weight": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.3.layer_norm2.bias": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.3.layer_norm2.weight": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.3.mlp.fc1.bias": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.3.mlp.fc1.weight": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.3.mlp.fc2.bias": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.3.mlp.fc2.weight": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.3.self_attn.k_proj.bias": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.3.self_attn.k_proj.weight": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.3.self_attn.out_proj.bias": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.3.self_attn.out_proj.weight": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.3.self_attn.q_proj.bias": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.3.self_attn.q_proj.weight": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.3.self_attn.v_proj.bias": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.3.self_attn.v_proj.weight": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.4.layer_norm1.bias": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.4.layer_norm1.weight": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.4.layer_norm2.bias": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.4.layer_norm2.weight": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.4.mlp.fc1.bias": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.4.mlp.fc1.weight": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.4.mlp.fc2.bias": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.4.mlp.fc2.weight": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.4.self_attn.k_proj.bias": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.4.self_attn.k_proj.weight": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.4.self_attn.out_proj.bias": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.4.self_attn.out_proj.weight": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.4.self_attn.q_proj.bias": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.4.self_attn.q_proj.weight": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.4.self_attn.v_proj.bias": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.4.self_attn.v_proj.weight": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.5.layer_norm1.bias": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.5.layer_norm1.weight": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.5.layer_norm2.bias": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.5.layer_norm2.weight": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.5.mlp.fc1.bias": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.5.mlp.fc1.weight": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.5.mlp.fc2.bias": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.5.mlp.fc2.weight": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.5.self_attn.k_proj.bias": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.5.self_attn.k_proj.weight": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.5.self_attn.out_proj.bias": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.5.self_attn.out_proj.weight": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.5.self_attn.q_proj.bias": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.5.self_attn.q_proj.weight": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.5.self_attn.v_proj.bias": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.5.self_attn.v_proj.weight": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.6.layer_norm1.bias": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.6.layer_norm1.weight": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.6.layer_norm2.bias": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.6.layer_norm2.weight": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.6.mlp.fc1.bias": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.6.mlp.fc1.weight": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.6.mlp.fc2.bias": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.6.mlp.fc2.weight": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.6.self_attn.k_proj.bias": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.6.self_attn.k_proj.weight": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.6.self_attn.out_proj.bias": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.6.self_attn.out_proj.weight": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.6.self_attn.q_proj.bias": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.6.self_attn.q_proj.weight": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.6.self_attn.v_proj.bias": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.6.self_attn.v_proj.weight": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.7.layer_norm1.bias": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.7.layer_norm1.weight": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.7.layer_norm2.bias": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.7.layer_norm2.weight": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.7.mlp.fc1.bias": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.7.mlp.fc1.weight": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.7.mlp.fc2.bias": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.7.mlp.fc2.weight": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.7.self_attn.k_proj.bias": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.7.self_attn.k_proj.weight": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.7.self_attn.out_proj.bias": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.7.self_attn.out_proj.weight": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.7.self_attn.q_proj.bias": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.7.self_attn.q_proj.weight": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.7.self_attn.v_proj.bias": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.7.self_attn.v_proj.weight": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.8.layer_norm1.bias": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.8.layer_norm1.weight": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.8.layer_norm2.bias": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.8.layer_norm2.weight": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.8.mlp.fc1.bias": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.8.mlp.fc1.weight": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.8.mlp.fc2.bias": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.8.mlp.fc2.weight": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.8.self_attn.k_proj.bias": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.8.self_attn.k_proj.weight": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.8.self_attn.out_proj.bias": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.8.self_attn.out_proj.weight": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.8.self_attn.q_proj.bias": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.8.self_attn.q_proj.weight": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.8.self_attn.v_proj.bias": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.8.self_attn.v_proj.weight": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.9.layer_norm1.bias": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.9.layer_norm1.weight": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.9.layer_norm2.bias": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.9.layer_norm2.weight": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.9.mlp.fc1.bias": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.9.mlp.fc1.weight": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.9.mlp.fc2.bias": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.9.mlp.fc2.weight": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.9.self_attn.k_proj.bias": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.9.self_attn.k_proj.weight": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.9.self_attn.out_proj.bias": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.9.self_attn.out_proj.weight": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.9.self_attn.q_proj.bias": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.9.self_attn.q_proj.weight": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.9.self_attn.v_proj.bias": "model-00009-of-00009.safetensors", "vision_tower.vision_model.encoder.layers.9.self_attn.v_proj.weight": "model-00009-of-00009.safetensors", "vision_tower.vision_model.post_layernorm.bias": "model-00009-of-00009.safetensors", "vision_tower.vision_model.post_layernorm.weight": "model-00009-of-00009.safetensors"}}
special_tokens_map.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "boi_token": "<start_of_image>",
3
+ "bos_token": {
4
+ "content": "<bos>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false
9
+ },
10
+ "eoi_token": "<end_of_image>",
11
+ "eos_token": {
12
+ "content": "<end_of_turn>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false
17
+ },
18
+ "image_token": "<image_soft_token>",
19
+ "pad_token": {
20
+ "content": "<pad>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false
25
+ },
26
+ "unk_token": {
27
+ "content": "<unk>",
28
+ "lstrip": false,
29
+ "normalized": false,
30
+ "rstrip": false,
31
+ "single_word": false
32
+ }
33
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4667f2089529e8e7657cfb6d1c19910ae71ff5f28aa7ab2ff2763330affad795
3
+ size 33384568
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1299c11d7cf632ef3b4e11937501358ada021bbdf7c47638d13c0ee982f2e79c
3
+ size 4689074
tokenizer_config.json ADDED
The diff for this file is too large to render. See raw diff