Add files using upload-large-folder tool
Browse files- config.json +67 -52
- generation_config.json +1 -1
- model-00001-of-00005.safetensors +3 -0
- model-00002-of-00005.safetensors +3 -0
- model-00003-of-00005.safetensors +3 -0
- model-00004-of-00005.safetensors +3 -0
- model-00005-of-00005.safetensors +3 -0
- model.safetensors.index.json +0 -0
- tokenizer_config.json +1 -1
config.json
CHANGED
@@ -23,56 +23,71 @@
|
|
23 |
"multi_modal_projector",
|
24 |
"merger",
|
25 |
"modality_projection",
|
26 |
-
"
|
27 |
-
"language_model.
|
28 |
-
"
|
29 |
-
"
|
30 |
-
"
|
31 |
-
"
|
32 |
-
"
|
33 |
-
"
|
34 |
-
"
|
35 |
-
"
|
36 |
-
"
|
37 |
-
"
|
38 |
-
"vision_tower.transformer.layers.
|
39 |
-
"
|
40 |
-
"vision_tower.transformer.layers.
|
41 |
-
"
|
42 |
-
"
|
43 |
-
"vision_tower.transformer.layers.
|
44 |
-
"vision_tower.transformer.layers.
|
45 |
-
"vision_tower.transformer.layers.
|
46 |
-
"vision_tower.transformer.layers.21.
|
47 |
-
"vision_tower.transformer.layers.
|
48 |
-
"vision_tower.transformer.layers.
|
49 |
-
"vision_tower.transformer.layers.
|
50 |
-
"vision_tower.transformer.layers.
|
51 |
-
"
|
52 |
-
"vision_tower.transformer.layers.
|
53 |
-
"vision_tower.transformer.layers.
|
54 |
-
"vision_tower.transformer.layers.
|
55 |
-
"
|
56 |
-
"vision_tower.transformer.layers.
|
57 |
-
"vision_tower.transformer.layers.
|
58 |
-
"vision_tower.transformer.layers.
|
59 |
-
"vision_tower.transformer.layers.
|
60 |
-
"vision_tower.transformer.layers.
|
61 |
-
"vision_tower.transformer.layers.
|
62 |
-
"vision_tower.transformer.layers.
|
63 |
-
"vision_tower.transformer.layers.
|
64 |
-
"vision_tower.transformer.layers.
|
65 |
-
"vision_tower.transformer.layers.
|
66 |
-
"vision_tower.transformer.layers.
|
67 |
-
"vision_tower.transformer.layers.
|
68 |
-
"vision_tower.transformer.layers.
|
69 |
-
"vision_tower.transformer.layers.
|
70 |
-
"vision_tower.transformer.layers.
|
71 |
-
"vision_tower.transformer.layers.
|
72 |
-
"vision_tower.transformer.layers.
|
73 |
-
"vision_tower.transformer.layers.
|
74 |
-
"vision_tower.transformer.layers.
|
75 |
-
"vision_tower.transformer.layers.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
76 |
],
|
77 |
"llm_int8_threshold": 6.0,
|
78 |
"load_in_4bit": true,
|
@@ -100,12 +115,12 @@
|
|
100 |
"vocab_size": 131072
|
101 |
},
|
102 |
"torch_dtype": "bfloat16",
|
103 |
-
"transformers_version": "4.
|
104 |
"unsloth_fixed": true,
|
105 |
"vision_config": {
|
106 |
"attention_dropout": 0.0,
|
107 |
"head_dim": 64,
|
108 |
-
"hidden_act": "
|
109 |
"hidden_size": 1024,
|
110 |
"image_size": 1540,
|
111 |
"initializer_range": 0.02,
|
|
|
23 |
"multi_modal_projector",
|
24 |
"merger",
|
25 |
"modality_projection",
|
26 |
+
"model.language_model.layers.10.mlp",
|
27 |
+
"model.language_model.layers.8.self_attn",
|
28 |
+
"model.language_model.layers.4.mlp",
|
29 |
+
"model.language_model.layers.6.mlp",
|
30 |
+
"model.language_model.layers.8.mlp",
|
31 |
+
"model.language_model.layers.9.mlp",
|
32 |
+
"model.language_model.layers.5.mlp",
|
33 |
+
"model.language_model.layers.3.self_attn",
|
34 |
+
"model.language_model.layers.4.self_attn",
|
35 |
+
"model.language_model.layers.6.self_attn",
|
36 |
+
"model.language_model.layers.7.mlp",
|
37 |
+
"model.language_model.layers.1.self_attn",
|
38 |
+
"model.vision_tower.transformer.layers.20.attention",
|
39 |
+
"model.language_model.layers.2.self_attn",
|
40 |
+
"model.vision_tower.transformer.layers.19.attention",
|
41 |
+
"model.language_model.layers.7.self_attn",
|
42 |
+
"model.language_model.layers.5.self_attn",
|
43 |
+
"model.vision_tower.transformer.layers.18.attention",
|
44 |
+
"model.vision_tower.transformer.layers.17.attention",
|
45 |
+
"model.vision_tower.transformer.layers.20.feed_forward",
|
46 |
+
"model.vision_tower.transformer.layers.21.attention",
|
47 |
+
"model.vision_tower.transformer.layers.16.attention",
|
48 |
+
"model.vision_tower.transformer.layers.21.feed_forward",
|
49 |
+
"model.vision_tower.transformer.layers.15.attention",
|
50 |
+
"model.vision_tower.transformer.layers.22.feed_forward",
|
51 |
+
"model.multi_modal_projector",
|
52 |
+
"model.vision_tower.transformer.layers.18.feed_forward",
|
53 |
+
"model.vision_tower.transformer.layers.16.feed_forward",
|
54 |
+
"model.vision_tower.transformer.layers.15.feed_forward",
|
55 |
+
"model.language_model.layers.2.mlp",
|
56 |
+
"model.vision_tower.transformer.layers.19.feed_forward",
|
57 |
+
"model.vision_tower.transformer.layers.14.attention",
|
58 |
+
"model.vision_tower.transformer.layers.8.feed_forward",
|
59 |
+
"model.vision_tower.transformer.layers.12.feed_forward",
|
60 |
+
"model.vision_tower.transformer.layers.14.feed_forward",
|
61 |
+
"model.vision_tower.transformer.layers.7.feed_forward",
|
62 |
+
"model.vision_tower.transformer.layers.5.feed_forward",
|
63 |
+
"model.vision_tower.transformer.layers.4.feed_forward",
|
64 |
+
"model.vision_tower.transformer.layers.6.feed_forward",
|
65 |
+
"model.vision_tower.transformer.layers.23.attention",
|
66 |
+
"model.vision_tower.transformer.layers.17.feed_forward",
|
67 |
+
"model.vision_tower.transformer.layers.13.attention",
|
68 |
+
"model.vision_tower.transformer.layers.11.feed_forward",
|
69 |
+
"model.vision_tower.transformer.layers.22.attention",
|
70 |
+
"model.vision_tower.transformer.layers.3.feed_forward",
|
71 |
+
"model.vision_tower.transformer.layers.10.feed_forward",
|
72 |
+
"model.vision_tower.transformer.layers.9.feed_forward",
|
73 |
+
"model.vision_tower.transformer.layers.13.feed_forward",
|
74 |
+
"model.vision_tower.transformer.layers.12.attention",
|
75 |
+
"model.vision_tower.transformer.layers.23.feed_forward",
|
76 |
+
"model.vision_tower.transformer.layers.11.attention",
|
77 |
+
"model.vision_tower.transformer.layers.10.attention",
|
78 |
+
"model.vision_tower.transformer.layers.8.attention",
|
79 |
+
"model.vision_tower.transformer.layers.2.feed_forward",
|
80 |
+
"model.vision_tower.transformer.layers.5.attention",
|
81 |
+
"model.vision_tower.transformer.layers.0.feed_forward",
|
82 |
+
"model.vision_tower.transformer.layers.1.feed_forward",
|
83 |
+
"model.vision_tower.transformer.layers.4.attention",
|
84 |
+
"model.vision_tower.transformer.layers.7.attention",
|
85 |
+
"model.vision_tower.transformer.layers.6.attention",
|
86 |
+
"model.vision_tower.transformer.layers.0.attention",
|
87 |
+
"model.vision_tower.transformer.layers.3.attention",
|
88 |
+
"model.vision_tower.transformer.layers.1.attention",
|
89 |
+
"model.vision_tower.transformer.layers.9.attention",
|
90 |
+
"model.vision_tower.transformer.layers.2.attention"
|
91 |
],
|
92 |
"llm_int8_threshold": 6.0,
|
93 |
"load_in_4bit": true,
|
|
|
115 |
"vocab_size": 131072
|
116 |
},
|
117 |
"torch_dtype": "bfloat16",
|
118 |
+
"transformers_version": "4.52.3",
|
119 |
"unsloth_fixed": true,
|
120 |
"vision_config": {
|
121 |
"attention_dropout": 0.0,
|
122 |
"head_dim": 64,
|
123 |
+
"hidden_act": "silu",
|
124 |
"hidden_size": 1024,
|
125 |
"image_size": 1540,
|
126 |
"initializer_range": 0.02,
|
generation_config.json
CHANGED
@@ -3,5 +3,5 @@
|
|
3 |
"bos_token_id": 1,
|
4 |
"eos_token_id": 2,
|
5 |
"pad_token_id": 11,
|
6 |
-
"transformers_version": "4.
|
7 |
}
|
|
|
3 |
"bos_token_id": 1,
|
4 |
"eos_token_id": 2,
|
5 |
"pad_token_id": 11,
|
6 |
+
"transformers_version": "4.52.3"
|
7 |
}
|
model-00001-of-00005.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1b8e786bb48255b88409fa752312a4e3f535755b88ac6ddd665b05b179f9778b
|
3 |
+
size 4787827762
|
model-00002-of-00005.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f06765d75d85b016ac8c483b8f299e54482c611448ffcf9e2ffed489ac1c74da
|
3 |
+
size 4781593336
|
model-00003-of-00005.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c387a5b535f3be869604cc76bbf46e39c81f3e279269ba19b5a11514ae0bc3dd
|
3 |
+
size 4999760552
|
model-00004-of-00005.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b4ff098c12f3ab27cf0940fb25db799b557c996f49a08d44a643e5f18d1776ca
|
3 |
+
size 4971731423
|
model-00005-of-00005.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0265677a5cae4750e80bd318b93744b34b433954ea605e54a3ced02e580b18af
|
3 |
+
size 2088763690
|
model.safetensors.index.json
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
CHANGED
@@ -9018,4 +9018,4 @@
|
|
9018 |
"tokenizer_class": "LlamaTokenizerFast",
|
9019 |
"unk_token": "<unk>",
|
9020 |
"use_default_system_prompt": false
|
9021 |
-
}
|
|
|
9018 |
"tokenizer_class": "LlamaTokenizerFast",
|
9019 |
"unk_token": "<unk>",
|
9020 |
"use_default_system_prompt": false
|
9021 |
+
}
|