lmmy commited on
Commit
55d02b0
·
verified ·
1 Parent(s): 73dd044

Add files using upload-large-folder tool

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: gg-hf-gm/gemma-3-270m-it
3
+ license: gemma
4
+ tags:
5
+ - gemma3
6
+ - gemma
7
+ - google
8
+ - mlx
9
+ pipeline_tag: text-generation
10
+ library_name: transformers
11
+ extra_gated_heading: Access Gemma on Hugging Face
12
+ extra_gated_prompt: To access Gemma on Hugging Face, you’re required to review and
13
+ agree to Google’s usage license. To do this, please ensure you’re logged in to Hugging
14
+ Face and click below. Requests are processed immediately.
15
+ extra_gated_button_content: Acknowledge license
16
+ ---
17
+ ## 💫 Community Model> gemma-3-270m-it by gg-hf-gm
18
+
19
+ _👾 [LM Studio](https://lmstudio.ai) Community models highlights program. Highlighting new & noteworthy models by the community. Join the conversation on [Discord](https://discord.gg/aPQfnNkxGC)_.
20
+
21
+ **Model creator**: [gg-hf-gm](https://huggingface.co/gg-hf-gm)<br>
22
+ **Original model**: [gemma-3-270m-it](https://huggingface.co/gg-hf-gm/gemma-3-270m-it)<br>
23
+ **MLX quantization**: provided by [LM Studio team](https://x.com/lmstudio) using [mlx_lm](https://github.com/ml-explore/mlx-lm)<br>
24
+
25
+ ## Technical Details
26
+
27
+ Original bfloat16 version of gemma-3-270m-it using MLX, optimized for Apple Silicon.
28
+
29
+ ## Special thanks
30
+
31
+ 🙏 Special thanks to the [Apple Machine Learning Research](https://github.com/ml-explore) team for creating [MLX](https://github.com/ml-explore/mlx).
32
+
33
+ ## Disclaimers
34
+
35
+ LM Studio is not the creator, originator, or owner of any Model featured in the Community Model Program. Each Community Model is created and provided by third parties. LM Studio does not endorse, support, represent or guarantee the completeness, truthfulness, accuracy, or reliability of any Community Model. You understand that Community Models can produce content that might be offensive, harmful, inaccurate or otherwise inappropriate, or deceptive. Each Community Model is the sole responsibility of the person or entity who originated such Model. LM Studio may not monitor or control the Community Models and cannot, and does not, take responsibility for any such Model. LM Studio disclaims all warranties or guarantees about the accuracy, reliability or benefits of the Community Models. LM Studio further disclaims any warranty that the Community Model will meet your requirements, be secure, uninterrupted or available at any time or location, or error-free, viruses-free, or that any errors will be corrected, or otherwise. You will be solely responsible for any damage resulting from your use of or access to the Community Models, your downloading of any Community Model, or use of any other Community Model provided by or through LM Studio.
chat_template.jinja ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {{ bos_token }}
2
+ {%- if messages[0]['role'] == 'system' -%}
3
+ {%- if messages[0]['content'] is string -%}
4
+ {%- set first_user_prefix = messages[0]['content'] + '
5
+
6
+ ' -%}
7
+ {%- else -%}
8
+ {%- set first_user_prefix = messages[0]['content'][0]['text'] + '
9
+
10
+ ' -%}
11
+ {%- endif -%}
12
+ {%- set loop_messages = messages[1:] -%}
13
+ {%- else -%}
14
+ {%- set first_user_prefix = "" -%}
15
+ {%- set loop_messages = messages -%}
16
+ {%- endif -%}
17
+ {%- for message in loop_messages -%}
18
+ {%- if (message['role'] == 'user') != (loop.index0 % 2 == 0) -%}
19
+ {{ raise_exception("Conversation roles must alternate user/assistant/user/assistant/...") }}
20
+ {%- endif -%}
21
+ {%- if (message['role'] == 'assistant') -%}
22
+ {%- set role = "model" -%}
23
+ {%- else -%}
24
+ {%- set role = message['role'] -%}
25
+ {%- endif -%}
26
+ {{ '<start_of_turn>' + role + '
27
+ ' + (first_user_prefix if loop.first else "") }}
28
+ {%- if message['content'] is string -%}
29
+ {{ message['content'] | trim }}
30
+ {%- elif message['content'] is iterable -%}
31
+ {%- for item in message['content'] -%}
32
+ {%- if item['type'] == 'image' -%}
33
+ {{ '<start_of_image>' }}
34
+ {%- elif item['type'] == 'text' -%}
35
+ {{ item['text'] | trim }}
36
+ {%- endif -%}
37
+ {%- endfor -%}
38
+ {%- else -%}
39
+ {{ raise_exception("Invalid content type") }}
40
+ {%- endif -%}
41
+ {{ '<end_of_turn>
42
+ ' }}
43
+ {%- endfor -%}
44
+ {%- if add_generation_prompt -%}
45
+ {{'<start_of_turn>model
46
+ '}}
47
+ {%- endif -%}
config.json ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_sliding_window_pattern": 6,
3
+ "architectures": [
4
+ "Gemma3ForCausalLM"
5
+ ],
6
+ "attention_bias": false,
7
+ "attention_dropout": 0.0,
8
+ "attn_logit_softcapping": null,
9
+ "bos_token_id": 2,
10
+ "eos_token_id": 1,
11
+ "final_logit_softcapping": null,
12
+ "head_dim": 256,
13
+ "hidden_activation": "gelu_pytorch_tanh",
14
+ "hidden_size": 640,
15
+ "initializer_range": 0.02,
16
+ "intermediate_size": 2048,
17
+ "layer_types": [
18
+ "sliding_attention",
19
+ "sliding_attention",
20
+ "sliding_attention",
21
+ "sliding_attention",
22
+ "sliding_attention",
23
+ "full_attention",
24
+ "sliding_attention",
25
+ "sliding_attention",
26
+ "sliding_attention",
27
+ "sliding_attention",
28
+ "sliding_attention",
29
+ "full_attention",
30
+ "sliding_attention",
31
+ "sliding_attention",
32
+ "sliding_attention",
33
+ "sliding_attention",
34
+ "sliding_attention",
35
+ "full_attention"
36
+ ],
37
+ "max_position_embeddings": 32768,
38
+ "model_type": "gemma3_text",
39
+ "num_attention_heads": 4,
40
+ "num_hidden_layers": 18,
41
+ "num_key_value_heads": 1,
42
+ "pad_token_id": 0,
43
+ "query_pre_attn_scalar": 256,
44
+ "rms_norm_eps": 1e-06,
45
+ "rope_local_base_freq": 10000.0,
46
+ "rope_scaling": null,
47
+ "rope_theta": 1000000.0,
48
+ "sliding_window": 512,
49
+ "torch_dtype": "bfloat16",
50
+ "transformers_version": "4.55.0.dev0",
51
+ "use_bidirectional_attention": false,
52
+ "use_cache": true,
53
+ "vocab_size": 262144
54
+ }
generation_config.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cache_implementation": "hybrid",
3
+ "do_sample": true,
4
+ "eos_token_id": [
5
+ 1,
6
+ 106
7
+ ],
8
+ "top_k": 64,
9
+ "top_p": 0.95,
10
+ "transformers_version": "4.55.0.dev0"
11
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0e2acba196efd538420b12060013cab1b11344fb8ebfd6a1c5813cafc2f79162
3
+ size 871767174
model.safetensors.index.json ADDED
@@ -0,0 +1,245 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 871740672,
4
+ "total_parameters": 435870336
5
+ },
6
+ "weight_map": {
7
+ "lm_head.weight": "model.safetensors",
8
+ "model.embed_tokens.weight": "model.safetensors",
9
+ "model.layers.0.input_layernorm.weight": "model.safetensors",
10
+ "model.layers.0.mlp.down_proj.weight": "model.safetensors",
11
+ "model.layers.0.mlp.gate_proj.weight": "model.safetensors",
12
+ "model.layers.0.mlp.up_proj.weight": "model.safetensors",
13
+ "model.layers.0.post_attention_layernorm.weight": "model.safetensors",
14
+ "model.layers.0.post_feedforward_layernorm.weight": "model.safetensors",
15
+ "model.layers.0.pre_feedforward_layernorm.weight": "model.safetensors",
16
+ "model.layers.0.self_attn.k_norm.weight": "model.safetensors",
17
+ "model.layers.0.self_attn.k_proj.weight": "model.safetensors",
18
+ "model.layers.0.self_attn.o_proj.weight": "model.safetensors",
19
+ "model.layers.0.self_attn.q_norm.weight": "model.safetensors",
20
+ "model.layers.0.self_attn.q_proj.weight": "model.safetensors",
21
+ "model.layers.0.self_attn.v_proj.weight": "model.safetensors",
22
+ "model.layers.1.input_layernorm.weight": "model.safetensors",
23
+ "model.layers.1.mlp.down_proj.weight": "model.safetensors",
24
+ "model.layers.1.mlp.gate_proj.weight": "model.safetensors",
25
+ "model.layers.1.mlp.up_proj.weight": "model.safetensors",
26
+ "model.layers.1.post_attention_layernorm.weight": "model.safetensors",
27
+ "model.layers.1.post_feedforward_layernorm.weight": "model.safetensors",
28
+ "model.layers.1.pre_feedforward_layernorm.weight": "model.safetensors",
29
+ "model.layers.1.self_attn.k_norm.weight": "model.safetensors",
30
+ "model.layers.1.self_attn.k_proj.weight": "model.safetensors",
31
+ "model.layers.1.self_attn.o_proj.weight": "model.safetensors",
32
+ "model.layers.1.self_attn.q_norm.weight": "model.safetensors",
33
+ "model.layers.1.self_attn.q_proj.weight": "model.safetensors",
34
+ "model.layers.1.self_attn.v_proj.weight": "model.safetensors",
35
+ "model.layers.10.input_layernorm.weight": "model.safetensors",
36
+ "model.layers.10.mlp.down_proj.weight": "model.safetensors",
37
+ "model.layers.10.mlp.gate_proj.weight": "model.safetensors",
38
+ "model.layers.10.mlp.up_proj.weight": "model.safetensors",
39
+ "model.layers.10.post_attention_layernorm.weight": "model.safetensors",
40
+ "model.layers.10.post_feedforward_layernorm.weight": "model.safetensors",
41
+ "model.layers.10.pre_feedforward_layernorm.weight": "model.safetensors",
42
+ "model.layers.10.self_attn.k_norm.weight": "model.safetensors",
43
+ "model.layers.10.self_attn.k_proj.weight": "model.safetensors",
44
+ "model.layers.10.self_attn.o_proj.weight": "model.safetensors",
45
+ "model.layers.10.self_attn.q_norm.weight": "model.safetensors",
46
+ "model.layers.10.self_attn.q_proj.weight": "model.safetensors",
47
+ "model.layers.10.self_attn.v_proj.weight": "model.safetensors",
48
+ "model.layers.11.input_layernorm.weight": "model.safetensors",
49
+ "model.layers.11.mlp.down_proj.weight": "model.safetensors",
50
+ "model.layers.11.mlp.gate_proj.weight": "model.safetensors",
51
+ "model.layers.11.mlp.up_proj.weight": "model.safetensors",
52
+ "model.layers.11.post_attention_layernorm.weight": "model.safetensors",
53
+ "model.layers.11.post_feedforward_layernorm.weight": "model.safetensors",
54
+ "model.layers.11.pre_feedforward_layernorm.weight": "model.safetensors",
55
+ "model.layers.11.self_attn.k_norm.weight": "model.safetensors",
56
+ "model.layers.11.self_attn.k_proj.weight": "model.safetensors",
57
+ "model.layers.11.self_attn.o_proj.weight": "model.safetensors",
58
+ "model.layers.11.self_attn.q_norm.weight": "model.safetensors",
59
+ "model.layers.11.self_attn.q_proj.weight": "model.safetensors",
60
+ "model.layers.11.self_attn.v_proj.weight": "model.safetensors",
61
+ "model.layers.12.input_layernorm.weight": "model.safetensors",
62
+ "model.layers.12.mlp.down_proj.weight": "model.safetensors",
63
+ "model.layers.12.mlp.gate_proj.weight": "model.safetensors",
64
+ "model.layers.12.mlp.up_proj.weight": "model.safetensors",
65
+ "model.layers.12.post_attention_layernorm.weight": "model.safetensors",
66
+ "model.layers.12.post_feedforward_layernorm.weight": "model.safetensors",
67
+ "model.layers.12.pre_feedforward_layernorm.weight": "model.safetensors",
68
+ "model.layers.12.self_attn.k_norm.weight": "model.safetensors",
69
+ "model.layers.12.self_attn.k_proj.weight": "model.safetensors",
70
+ "model.layers.12.self_attn.o_proj.weight": "model.safetensors",
71
+ "model.layers.12.self_attn.q_norm.weight": "model.safetensors",
72
+ "model.layers.12.self_attn.q_proj.weight": "model.safetensors",
73
+ "model.layers.12.self_attn.v_proj.weight": "model.safetensors",
74
+ "model.layers.13.input_layernorm.weight": "model.safetensors",
75
+ "model.layers.13.mlp.down_proj.weight": "model.safetensors",
76
+ "model.layers.13.mlp.gate_proj.weight": "model.safetensors",
77
+ "model.layers.13.mlp.up_proj.weight": "model.safetensors",
78
+ "model.layers.13.post_attention_layernorm.weight": "model.safetensors",
79
+ "model.layers.13.post_feedforward_layernorm.weight": "model.safetensors",
80
+ "model.layers.13.pre_feedforward_layernorm.weight": "model.safetensors",
81
+ "model.layers.13.self_attn.k_norm.weight": "model.safetensors",
82
+ "model.layers.13.self_attn.k_proj.weight": "model.safetensors",
83
+ "model.layers.13.self_attn.o_proj.weight": "model.safetensors",
84
+ "model.layers.13.self_attn.q_norm.weight": "model.safetensors",
85
+ "model.layers.13.self_attn.q_proj.weight": "model.safetensors",
86
+ "model.layers.13.self_attn.v_proj.weight": "model.safetensors",
87
+ "model.layers.14.input_layernorm.weight": "model.safetensors",
88
+ "model.layers.14.mlp.down_proj.weight": "model.safetensors",
89
+ "model.layers.14.mlp.gate_proj.weight": "model.safetensors",
90
+ "model.layers.14.mlp.up_proj.weight": "model.safetensors",
91
+ "model.layers.14.post_attention_layernorm.weight": "model.safetensors",
92
+ "model.layers.14.post_feedforward_layernorm.weight": "model.safetensors",
93
+ "model.layers.14.pre_feedforward_layernorm.weight": "model.safetensors",
94
+ "model.layers.14.self_attn.k_norm.weight": "model.safetensors",
95
+ "model.layers.14.self_attn.k_proj.weight": "model.safetensors",
96
+ "model.layers.14.self_attn.o_proj.weight": "model.safetensors",
97
+ "model.layers.14.self_attn.q_norm.weight": "model.safetensors",
98
+ "model.layers.14.self_attn.q_proj.weight": "model.safetensors",
99
+ "model.layers.14.self_attn.v_proj.weight": "model.safetensors",
100
+ "model.layers.15.input_layernorm.weight": "model.safetensors",
101
+ "model.layers.15.mlp.down_proj.weight": "model.safetensors",
102
+ "model.layers.15.mlp.gate_proj.weight": "model.safetensors",
103
+ "model.layers.15.mlp.up_proj.weight": "model.safetensors",
104
+ "model.layers.15.post_attention_layernorm.weight": "model.safetensors",
105
+ "model.layers.15.post_feedforward_layernorm.weight": "model.safetensors",
106
+ "model.layers.15.pre_feedforward_layernorm.weight": "model.safetensors",
107
+ "model.layers.15.self_attn.k_norm.weight": "model.safetensors",
108
+ "model.layers.15.self_attn.k_proj.weight": "model.safetensors",
109
+ "model.layers.15.self_attn.o_proj.weight": "model.safetensors",
110
+ "model.layers.15.self_attn.q_norm.weight": "model.safetensors",
111
+ "model.layers.15.self_attn.q_proj.weight": "model.safetensors",
112
+ "model.layers.15.self_attn.v_proj.weight": "model.safetensors",
113
+ "model.layers.16.input_layernorm.weight": "model.safetensors",
114
+ "model.layers.16.mlp.down_proj.weight": "model.safetensors",
115
+ "model.layers.16.mlp.gate_proj.weight": "model.safetensors",
116
+ "model.layers.16.mlp.up_proj.weight": "model.safetensors",
117
+ "model.layers.16.post_attention_layernorm.weight": "model.safetensors",
118
+ "model.layers.16.post_feedforward_layernorm.weight": "model.safetensors",
119
+ "model.layers.16.pre_feedforward_layernorm.weight": "model.safetensors",
120
+ "model.layers.16.self_attn.k_norm.weight": "model.safetensors",
121
+ "model.layers.16.self_attn.k_proj.weight": "model.safetensors",
122
+ "model.layers.16.self_attn.o_proj.weight": "model.safetensors",
123
+ "model.layers.16.self_attn.q_norm.weight": "model.safetensors",
124
+ "model.layers.16.self_attn.q_proj.weight": "model.safetensors",
125
+ "model.layers.16.self_attn.v_proj.weight": "model.safetensors",
126
+ "model.layers.17.input_layernorm.weight": "model.safetensors",
127
+ "model.layers.17.mlp.down_proj.weight": "model.safetensors",
128
+ "model.layers.17.mlp.gate_proj.weight": "model.safetensors",
129
+ "model.layers.17.mlp.up_proj.weight": "model.safetensors",
130
+ "model.layers.17.post_attention_layernorm.weight": "model.safetensors",
131
+ "model.layers.17.post_feedforward_layernorm.weight": "model.safetensors",
132
+ "model.layers.17.pre_feedforward_layernorm.weight": "model.safetensors",
133
+ "model.layers.17.self_attn.k_norm.weight": "model.safetensors",
134
+ "model.layers.17.self_attn.k_proj.weight": "model.safetensors",
135
+ "model.layers.17.self_attn.o_proj.weight": "model.safetensors",
136
+ "model.layers.17.self_attn.q_norm.weight": "model.safetensors",
137
+ "model.layers.17.self_attn.q_proj.weight": "model.safetensors",
138
+ "model.layers.17.self_attn.v_proj.weight": "model.safetensors",
139
+ "model.layers.2.input_layernorm.weight": "model.safetensors",
140
+ "model.layers.2.mlp.down_proj.weight": "model.safetensors",
141
+ "model.layers.2.mlp.gate_proj.weight": "model.safetensors",
142
+ "model.layers.2.mlp.up_proj.weight": "model.safetensors",
143
+ "model.layers.2.post_attention_layernorm.weight": "model.safetensors",
144
+ "model.layers.2.post_feedforward_layernorm.weight": "model.safetensors",
145
+ "model.layers.2.pre_feedforward_layernorm.weight": "model.safetensors",
146
+ "model.layers.2.self_attn.k_norm.weight": "model.safetensors",
147
+ "model.layers.2.self_attn.k_proj.weight": "model.safetensors",
148
+ "model.layers.2.self_attn.o_proj.weight": "model.safetensors",
149
+ "model.layers.2.self_attn.q_norm.weight": "model.safetensors",
150
+ "model.layers.2.self_attn.q_proj.weight": "model.safetensors",
151
+ "model.layers.2.self_attn.v_proj.weight": "model.safetensors",
152
+ "model.layers.3.input_layernorm.weight": "model.safetensors",
153
+ "model.layers.3.mlp.down_proj.weight": "model.safetensors",
154
+ "model.layers.3.mlp.gate_proj.weight": "model.safetensors",
155
+ "model.layers.3.mlp.up_proj.weight": "model.safetensors",
156
+ "model.layers.3.post_attention_layernorm.weight": "model.safetensors",
157
+ "model.layers.3.post_feedforward_layernorm.weight": "model.safetensors",
158
+ "model.layers.3.pre_feedforward_layernorm.weight": "model.safetensors",
159
+ "model.layers.3.self_attn.k_norm.weight": "model.safetensors",
160
+ "model.layers.3.self_attn.k_proj.weight": "model.safetensors",
161
+ "model.layers.3.self_attn.o_proj.weight": "model.safetensors",
162
+ "model.layers.3.self_attn.q_norm.weight": "model.safetensors",
163
+ "model.layers.3.self_attn.q_proj.weight": "model.safetensors",
164
+ "model.layers.3.self_attn.v_proj.weight": "model.safetensors",
165
+ "model.layers.4.input_layernorm.weight": "model.safetensors",
166
+ "model.layers.4.mlp.down_proj.weight": "model.safetensors",
167
+ "model.layers.4.mlp.gate_proj.weight": "model.safetensors",
168
+ "model.layers.4.mlp.up_proj.weight": "model.safetensors",
169
+ "model.layers.4.post_attention_layernorm.weight": "model.safetensors",
170
+ "model.layers.4.post_feedforward_layernorm.weight": "model.safetensors",
171
+ "model.layers.4.pre_feedforward_layernorm.weight": "model.safetensors",
172
+ "model.layers.4.self_attn.k_norm.weight": "model.safetensors",
173
+ "model.layers.4.self_attn.k_proj.weight": "model.safetensors",
174
+ "model.layers.4.self_attn.o_proj.weight": "model.safetensors",
175
+ "model.layers.4.self_attn.q_norm.weight": "model.safetensors",
176
+ "model.layers.4.self_attn.q_proj.weight": "model.safetensors",
177
+ "model.layers.4.self_attn.v_proj.weight": "model.safetensors",
178
+ "model.layers.5.input_layernorm.weight": "model.safetensors",
179
+ "model.layers.5.mlp.down_proj.weight": "model.safetensors",
180
+ "model.layers.5.mlp.gate_proj.weight": "model.safetensors",
181
+ "model.layers.5.mlp.up_proj.weight": "model.safetensors",
182
+ "model.layers.5.post_attention_layernorm.weight": "model.safetensors",
183
+ "model.layers.5.post_feedforward_layernorm.weight": "model.safetensors",
184
+ "model.layers.5.pre_feedforward_layernorm.weight": "model.safetensors",
185
+ "model.layers.5.self_attn.k_norm.weight": "model.safetensors",
186
+ "model.layers.5.self_attn.k_proj.weight": "model.safetensors",
187
+ "model.layers.5.self_attn.o_proj.weight": "model.safetensors",
188
+ "model.layers.5.self_attn.q_norm.weight": "model.safetensors",
189
+ "model.layers.5.self_attn.q_proj.weight": "model.safetensors",
190
+ "model.layers.5.self_attn.v_proj.weight": "model.safetensors",
191
+ "model.layers.6.input_layernorm.weight": "model.safetensors",
192
+ "model.layers.6.mlp.down_proj.weight": "model.safetensors",
193
+ "model.layers.6.mlp.gate_proj.weight": "model.safetensors",
194
+ "model.layers.6.mlp.up_proj.weight": "model.safetensors",
195
+ "model.layers.6.post_attention_layernorm.weight": "model.safetensors",
196
+ "model.layers.6.post_feedforward_layernorm.weight": "model.safetensors",
197
+ "model.layers.6.pre_feedforward_layernorm.weight": "model.safetensors",
198
+ "model.layers.6.self_attn.k_norm.weight": "model.safetensors",
199
+ "model.layers.6.self_attn.k_proj.weight": "model.safetensors",
200
+ "model.layers.6.self_attn.o_proj.weight": "model.safetensors",
201
+ "model.layers.6.self_attn.q_norm.weight": "model.safetensors",
202
+ "model.layers.6.self_attn.q_proj.weight": "model.safetensors",
203
+ "model.layers.6.self_attn.v_proj.weight": "model.safetensors",
204
+ "model.layers.7.input_layernorm.weight": "model.safetensors",
205
+ "model.layers.7.mlp.down_proj.weight": "model.safetensors",
206
+ "model.layers.7.mlp.gate_proj.weight": "model.safetensors",
207
+ "model.layers.7.mlp.up_proj.weight": "model.safetensors",
208
+ "model.layers.7.post_attention_layernorm.weight": "model.safetensors",
209
+ "model.layers.7.post_feedforward_layernorm.weight": "model.safetensors",
210
+ "model.layers.7.pre_feedforward_layernorm.weight": "model.safetensors",
211
+ "model.layers.7.self_attn.k_norm.weight": "model.safetensors",
212
+ "model.layers.7.self_attn.k_proj.weight": "model.safetensors",
213
+ "model.layers.7.self_attn.o_proj.weight": "model.safetensors",
214
+ "model.layers.7.self_attn.q_norm.weight": "model.safetensors",
215
+ "model.layers.7.self_attn.q_proj.weight": "model.safetensors",
216
+ "model.layers.7.self_attn.v_proj.weight": "model.safetensors",
217
+ "model.layers.8.input_layernorm.weight": "model.safetensors",
218
+ "model.layers.8.mlp.down_proj.weight": "model.safetensors",
219
+ "model.layers.8.mlp.gate_proj.weight": "model.safetensors",
220
+ "model.layers.8.mlp.up_proj.weight": "model.safetensors",
221
+ "model.layers.8.post_attention_layernorm.weight": "model.safetensors",
222
+ "model.layers.8.post_feedforward_layernorm.weight": "model.safetensors",
223
+ "model.layers.8.pre_feedforward_layernorm.weight": "model.safetensors",
224
+ "model.layers.8.self_attn.k_norm.weight": "model.safetensors",
225
+ "model.layers.8.self_attn.k_proj.weight": "model.safetensors",
226
+ "model.layers.8.self_attn.o_proj.weight": "model.safetensors",
227
+ "model.layers.8.self_attn.q_norm.weight": "model.safetensors",
228
+ "model.layers.8.self_attn.q_proj.weight": "model.safetensors",
229
+ "model.layers.8.self_attn.v_proj.weight": "model.safetensors",
230
+ "model.layers.9.input_layernorm.weight": "model.safetensors",
231
+ "model.layers.9.mlp.down_proj.weight": "model.safetensors",
232
+ "model.layers.9.mlp.gate_proj.weight": "model.safetensors",
233
+ "model.layers.9.mlp.up_proj.weight": "model.safetensors",
234
+ "model.layers.9.post_attention_layernorm.weight": "model.safetensors",
235
+ "model.layers.9.post_feedforward_layernorm.weight": "model.safetensors",
236
+ "model.layers.9.pre_feedforward_layernorm.weight": "model.safetensors",
237
+ "model.layers.9.self_attn.k_norm.weight": "model.safetensors",
238
+ "model.layers.9.self_attn.k_proj.weight": "model.safetensors",
239
+ "model.layers.9.self_attn.o_proj.weight": "model.safetensors",
240
+ "model.layers.9.self_attn.q_norm.weight": "model.safetensors",
241
+ "model.layers.9.self_attn.q_proj.weight": "model.safetensors",
242
+ "model.layers.9.self_attn.v_proj.weight": "model.safetensors",
243
+ "model.norm.weight": "model.safetensors"
244
+ }
245
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "boi_token": "<start_of_image>",
3
+ "bos_token": {
4
+ "content": "<bos>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false
9
+ },
10
+ "eoi_token": "<end_of_image>",
11
+ "eos_token": {
12
+ "content": "<eos>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false
17
+ },
18
+ "image_token": "<image_soft_token>",
19
+ "pad_token": {
20
+ "content": "<pad>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false
25
+ },
26
+ "unk_token": {
27
+ "content": "<unk>",
28
+ "lstrip": false,
29
+ "normalized": false,
30
+ "rstrip": false,
31
+ "single_word": false
32
+ }
33
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4667f2089529e8e7657cfb6d1c19910ae71ff5f28aa7ab2ff2763330affad795
3
+ size 33384568
tokenizer_config.json ADDED
The diff for this file is too large to render. See raw diff