Upload folder using huggingface_hub
Browse files- .gitattributes +1 -0
- added_tokens.json +1 -0
- chat_template.json +1 -0
- config.json +43 -0
- generation_config.json +1 -0
- model-00001-of-00012.safetensors +3 -0
- model-00002-of-00012.safetensors +3 -0
- model-00003-of-00012.safetensors +3 -0
- model-00004-of-00012.safetensors +3 -0
- model-00005-of-00012.safetensors +3 -0
- model-00006-of-00012.safetensors +3 -0
- model-00007-of-00012.safetensors +3 -0
- model-00008-of-00012.safetensors +3 -0
- model-00009-of-00012.safetensors +3 -0
- model-00010-of-00012.safetensors +3 -0
- model-00011-of-00012.safetensors +3 -0
- model-00012-of-00012.safetensors +3 -0
- model.safetensors.index.json +0 -0
- preprocessor_config.json +1 -0
- processor_config.json +1 -0
- special_tokens_map.json +1 -0
- tokenizer.json +3 -0
- tokenizer.model +3 -0
- tokenizer_config.json +0 -0
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
added_tokens.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"<image_soft_token>": 262144}
|
chat_template.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"chat_template": "{{ bos_token }}\n{%- if messages[0]['role'] == 'system' -%}\n {%- if messages[0]['content'] is string -%}\n {%- set first_user_prefix = messages[0]['content'] + '\n\n' -%}\n {%- else -%}\n {%- set first_user_prefix = messages[0]['content'][0]['text'] + '\n\n' -%}\n {%- endif -%}\n {%- set loop_messages = messages[1:] -%}\n{%- else -%}\n {%- set first_user_prefix = \"\" -%}\n {%- set loop_messages = messages -%}\n{%- endif -%}\n{%- for message in loop_messages -%}\n {%- if (message['role'] == 'user') != (loop.index0 % 2 == 0) -%}\n {{ raise_exception(\"Conversation roles must alternate user/assistant/user/assistant/...\") }}\n {%- endif -%}\n {%- if (message['role'] == 'assistant') -%}\n {%- set role = \"model\" -%}\n {%- else -%}\n {%- set role = message['role'] -%}\n {%- endif -%}\n {{ '<start_of_turn>' + role + '\n' + (first_user_prefix if loop.first else \"\") }}\n {%- if message['content'] is string -%}\n {{ message['content'] | trim }}\n {%- elif message['content'] is iterable -%}\n {%- for item in message['content'] -%}\n {%- if item['type'] == 'image' -%}\n {{ '<start_of_image>' }}\n {%- elif item['type'] == 'text' -%}\n {{ item['text'] | trim }}\n {%- endif -%}\n {%- endfor -%}\n {%- else -%}\n {{ raise_exception(\"Invalid content type\") }}\n {%- endif -%}\n {{ '<end_of_turn>\n' }}\n{%- endfor -%}\n{%- if add_generation_prompt -%}\n {{'<start_of_turn>model\n'}}\n{%- endif -%}\n"}
|
config.json
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"architectures": [
|
3 |
+
"Gemma3ForConditionalGeneration"
|
4 |
+
],
|
5 |
+
"boi_token_index": 255999,
|
6 |
+
"eoi_token_index": 256000,
|
7 |
+
"eos_token_id": [
|
8 |
+
1,
|
9 |
+
106
|
10 |
+
],
|
11 |
+
"image_token_index": 262144,
|
12 |
+
"initializer_range": 0.02,
|
13 |
+
"mm_tokens_per_image": 256,
|
14 |
+
"model_type": "gemma3",
|
15 |
+
"text_config": {
|
16 |
+
"head_dim": 128,
|
17 |
+
"hidden_size": 5376,
|
18 |
+
"intermediate_size": 21504,
|
19 |
+
"model_type": "gemma3_text",
|
20 |
+
"num_attention_heads": 32,
|
21 |
+
"num_hidden_layers": 62,
|
22 |
+
"num_key_value_heads": 16,
|
23 |
+
"query_pre_attn_scalar": 168,
|
24 |
+
"rope_scaling": {
|
25 |
+
"factor": 8.0,
|
26 |
+
"rope_type": "linear"
|
27 |
+
},
|
28 |
+
"sliding_window": 1024
|
29 |
+
},
|
30 |
+
"torch_dtype": "bfloat16",
|
31 |
+
"transformers_version": "4.50.0.dev0",
|
32 |
+
"vision_config": {
|
33 |
+
"hidden_size": 1152,
|
34 |
+
"image_size": 896,
|
35 |
+
"intermediate_size": 4304,
|
36 |
+
"model_type": "siglip_vision_model",
|
37 |
+
"num_attention_heads": 16,
|
38 |
+
"num_hidden_layers": 27,
|
39 |
+
"patch_size": 14,
|
40 |
+
"vision_use_head": false
|
41 |
+
},
|
42 |
+
"_name_or_path": "barchetta/barchetta-sn11-1746885175-834292"
|
43 |
+
}
|
generation_config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"bos_token_id": 2, "cache_implementation": "hybrid", "do_sample": true, "eos_token_id": [1, 106], "pad_token_id": 0, "top_k": 64, "top_p": 0.95, "transformers_version": "4.50.0.dev0"}
|
model-00001-of-00012.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5f79c65454930b27708d76ee9e552cf8ed6abf5e82da3024764ff467c1435c80
|
3 |
+
size 4854573744
|
model-00002-of-00012.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:75fc147f950cff71e1d831a8917b2e7c43d5adaf19ce77ee7688b30173387e79
|
3 |
+
size 4954792984
|
model-00003-of-00012.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e9eb4198556be97d36aa4409cfb38c4df92564fab4ccab8c18bc764a6c2345c9
|
3 |
+
size 4954793024
|
model-00004-of-00012.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:463381d5a438dc2c8cc129488da381769c3f754100cce77e8dd59b474f08343c
|
3 |
+
size 4954793064
|
model-00005-of-00012.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7f0eaa8f41093703523388d235fa8afedbbcdcaa4ba07943456f40e38fc02744
|
3 |
+
size 4954793064
|
model-00006-of-00012.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e9a9be91ef40c4cc3f55cda24e160217eada35a103de7a9e191325ed11f1353a
|
3 |
+
size 4954793064
|
model-00007-of-00012.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:588b59a73d02470ff8b81b134dfa2a77489447992fb8294516362bd5f091428c
|
3 |
+
size 4954793064
|
model-00008-of-00012.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ffd3016b3ec34b07e8e786777d4145fe1a20e5a4b6288b7188fc5ac4c3c8ac02
|
3 |
+
size 4954793064
|
model-00009-of-00012.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0611d6d2553d75f3a54785350298183cd7b859174b9ba7b03772448000084fdd
|
3 |
+
size 4954793064
|
model-00010-of-00012.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:428c5720fbed3d771a82c429045d3a531f94351f297c0b8927dd6ef09368781c
|
3 |
+
size 4954793064
|
model-00011-of-00012.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:785fbf990f154c4f8a04675a778386ca7979a5d1c1adfc4a0b3cabd09b8f714e
|
3 |
+
size 4954793064
|
model-00012-of-00012.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:51ecc64a12721047bc62fe9f0d2ac46965e1553c0e219ec86dd5f670361a7086
|
3 |
+
size 462476736
|
model.safetensors.index.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
preprocessor_config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"do_convert_rgb": null, "do_normalize": true, "do_pan_and_scan": null, "do_rescale": true, "do_resize": true, "image_mean": [0.5, 0.5, 0.5], "image_processor_type": "Gemma3ImageProcessor", "image_seq_length": 256, "image_std": [0.5, 0.5, 0.5], "pan_and_scan_max_num_crops": null, "pan_and_scan_min_crop_size": null, "pan_and_scan_min_ratio_to_activate": null, "processor_class": "Gemma3Processor", "resample": 2, "rescale_factor": 0.00392156862745098, "size": {"height": 896, "width": 896}}
|
processor_config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"image_seq_length": 256, "processor_class": "Gemma3Processor"}
|
special_tokens_map.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"boi_token": "<start_of_image>", "bos_token": {"content": "<bos>", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false}, "eoi_token": "<end_of_image>", "eos_token": {"content": "<eos>", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false}, "image_token": "<image_soft_token>", "pad_token": {"content": "<pad>", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false}, "unk_token": {"content": "<unk>", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false}}
|
tokenizer.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d99d58e69b40e7c491d5d99acf41871c7355d7d7f61f3f6a8163f08afea471c4
|
3 |
+
size 20323111
|
tokenizer.model
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1299c11d7cf632ef3b4e11937501358ada021bbdf7c47638d13c0ee982f2e79c
|
3 |
+
size 4689074
|
tokenizer_config.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|