Upload folder using huggingface_hub
Browse files- .gitattributes +1 -0
- added_tokens.json +1 -0
- chat_template.json +1 -0
- config.json +43 -0
- generation_config.json +1 -0
- model-00001-of-00012.safetensors +3 -0
- model-00002-of-00012.safetensors +3 -0
- model-00003-of-00012.safetensors +3 -0
- model-00004-of-00012.safetensors +3 -0
- model-00005-of-00012.safetensors +3 -0
- model-00006-of-00012.safetensors +3 -0
- model-00007-of-00012.safetensors +3 -0
- model-00008-of-00012.safetensors +3 -0
- model-00009-of-00012.safetensors +3 -0
- model-00010-of-00012.safetensors +3 -0
- model-00011-of-00012.safetensors +3 -0
- model-00012-of-00012.safetensors +3 -0
- model.safetensors.index.json +0 -0
- preprocessor_config.json +1 -0
- processor_config.json +1 -0
- special_tokens_map.json +1 -0
- tokenizer.json +3 -0
- tokenizer.model +3 -0
- tokenizer_config.json +0 -0
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
added_tokens.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"<image_soft_token>": 262144}
|
chat_template.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"chat_template": "{{ bos_token }}\n{%- if messages[0]['role'] == 'system' -%}\n {%- if messages[0]['content'] is string -%}\n {%- set first_user_prefix = messages[0]['content'] + '\n\n' -%}\n {%- else -%}\n {%- set first_user_prefix = messages[0]['content'][0]['text'] + '\n\n' -%}\n {%- endif -%}\n {%- set loop_messages = messages[1:] -%}\n{%- else -%}\n {%- set first_user_prefix = \"\" -%}\n {%- set loop_messages = messages -%}\n{%- endif -%}\n{%- for message in loop_messages -%}\n {%- if (message['role'] == 'user') != (loop.index0 % 2 == 0) -%}\n {{ raise_exception(\"Conversation roles must alternate user/assistant/user/assistant/...\") }}\n {%- endif -%}\n {%- if (message['role'] == 'assistant') -%}\n {%- set role = \"model\" -%}\n {%- else -%}\n {%- set role = message['role'] -%}\n {%- endif -%}\n {{ '<start_of_turn>' + role + '\n' + (first_user_prefix if loop.first else \"\") }}\n {%- if message['content'] is string -%}\n {{ message['content'] | trim }}\n {%- elif message['content'] is iterable -%}\n {%- for item in message['content'] -%}\n {%- if item['type'] == 'image' -%}\n {{ '<start_of_image>' }}\n {%- elif item['type'] == 'text' -%}\n {{ item['text'] | trim }}\n {%- endif -%}\n {%- endfor -%}\n {%- else -%}\n {{ raise_exception(\"Invalid content type\") }}\n {%- endif -%}\n {{ '<end_of_turn>\n' }}\n{%- endfor -%}\n{%- if add_generation_prompt -%}\n {{'<start_of_turn>model\n'}}\n{%- endif -%}\n"}
|
config.json
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"architectures": [
|
3 |
+
"Gemma3ForConditionalGeneration"
|
4 |
+
],
|
5 |
+
"boi_token_index": 255999,
|
6 |
+
"eoi_token_index": 256000,
|
7 |
+
"eos_token_id": [
|
8 |
+
1,
|
9 |
+
106
|
10 |
+
],
|
11 |
+
"image_token_index": 262144,
|
12 |
+
"initializer_range": 0.02,
|
13 |
+
"mm_tokens_per_image": 256,
|
14 |
+
"model_type": "gemma3",
|
15 |
+
"text_config": {
|
16 |
+
"head_dim": 128,
|
17 |
+
"hidden_size": 5376,
|
18 |
+
"intermediate_size": 21504,
|
19 |
+
"model_type": "gemma3_text",
|
20 |
+
"num_attention_heads": 32,
|
21 |
+
"num_hidden_layers": 62,
|
22 |
+
"num_key_value_heads": 16,
|
23 |
+
"query_pre_attn_scalar": 168,
|
24 |
+
"rope_scaling": {
|
25 |
+
"factor": 8.0,
|
26 |
+
"rope_type": "linear"
|
27 |
+
},
|
28 |
+
"sliding_window": 1024
|
29 |
+
},
|
30 |
+
"torch_dtype": "bfloat16",
|
31 |
+
"transformers_version": "4.50.0.dev0",
|
32 |
+
"vision_config": {
|
33 |
+
"hidden_size": 1152,
|
34 |
+
"image_size": 896,
|
35 |
+
"intermediate_size": 4304,
|
36 |
+
"model_type": "siglip_vision_model",
|
37 |
+
"num_attention_heads": 16,
|
38 |
+
"num_hidden_layers": 27,
|
39 |
+
"patch_size": 14,
|
40 |
+
"vision_use_head": false
|
41 |
+
},
|
42 |
+
"_name_or_path": "barchetta/barchetta-sn11-1746885175-833341"
|
43 |
+
}
|
generation_config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"bos_token_id": 2, "cache_implementation": "hybrid", "do_sample": true, "eos_token_id": [1, 106], "pad_token_id": 0, "top_k": 64, "top_p": 0.95, "transformers_version": "4.50.0.dev0"}
|
model-00001-of-00012.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9e2d286e8d8a52b059607d076b171f5ce818c8c14ea86f3704ad3e250c383a91
|
3 |
+
size 4854573744
|
model-00002-of-00012.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:09ed5e6139da89bcedd2b25501ca8dbaa5bd98410e35a54ff8090ebb0652c73a
|
3 |
+
size 4954792984
|
model-00003-of-00012.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:23a1ce8bc56af213421be5392d78b989922245320ff9a9a173ecaf293013adac
|
3 |
+
size 4954793024
|
model-00004-of-00012.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:68cb8bb97ae42b28a86e455c4e789ccb8ee93410b96081e6be029be0e978af1d
|
3 |
+
size 4954793064
|
model-00005-of-00012.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2e0bb081d0a21f94297b9b4381fb836bd2b72d5ce8e1cb701fc112a1fb62aa0b
|
3 |
+
size 4954793064
|
model-00006-of-00012.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:550022183c776911d8c70753424f72545dbed48025bd4606a78476e3dab55ba7
|
3 |
+
size 4954793064
|
model-00007-of-00012.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b8b3a8fe43b7da069356cb981635c6ed03c33e5711a97b6790589dfaba8e9de1
|
3 |
+
size 4954793064
|
model-00008-of-00012.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a4a4d0b1bc089ee1e083f97caa6b6d5daf2ae5596f6c2fb54b4e0e7f65d647cd
|
3 |
+
size 4954793064
|
model-00009-of-00012.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4b839ee47428dc87cf9b0f6823ffac78f22c91df2e1a9d9fe6d889e979d0e999
|
3 |
+
size 4954793064
|
model-00010-of-00012.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:09caa056ab2f78f45a3995678edbf35ae0e0562bf84b2ede8893488bac2cebd4
|
3 |
+
size 4954793064
|
model-00011-of-00012.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7bb4a5c2a593308b3a9e41e1ce96fba8a0cfff0c4af1c4f7a82ed1992a01983d
|
3 |
+
size 4954793064
|
model-00012-of-00012.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:27b51eaa5c103254e62c74436676c1925ebd84dfbbc9b4ec0eec0549beb18c37
|
3 |
+
size 462476736
|
model.safetensors.index.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
preprocessor_config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"do_convert_rgb": null, "do_normalize": true, "do_pan_and_scan": null, "do_rescale": true, "do_resize": true, "image_mean": [0.5, 0.5, 0.5], "image_processor_type": "Gemma3ImageProcessor", "image_seq_length": 256, "image_std": [0.5, 0.5, 0.5], "pan_and_scan_max_num_crops": null, "pan_and_scan_min_crop_size": null, "pan_and_scan_min_ratio_to_activate": null, "processor_class": "Gemma3Processor", "resample": 2, "rescale_factor": 0.00392156862745098, "size": {"height": 896, "width": 896}}
|
processor_config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"image_seq_length": 256, "processor_class": "Gemma3Processor"}
|
special_tokens_map.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"boi_token": "<start_of_image>", "bos_token": {"content": "<bos>", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false}, "eoi_token": "<end_of_image>", "eos_token": {"content": "<eos>", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false}, "image_token": "<image_soft_token>", "pad_token": {"content": "<pad>", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false}, "unk_token": {"content": "<unk>", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false}}
|
tokenizer.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d99d58e69b40e7c491d5d99acf41871c7355d7d7f61f3f6a8163f08afea471c4
|
3 |
+
size 20323111
|
tokenizer.model
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1299c11d7cf632ef3b4e11937501358ada021bbdf7c47638d13c0ee982f2e79c
|
3 |
+
size 4689074
|
tokenizer_config.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|