Update config.json
Browse files- config.json +3 -3
config.json
CHANGED
@@ -10,7 +10,7 @@
|
|
10 |
"projector_hidden_act": "gelu",
|
11 |
"quantization_config": {
|
12 |
"amp": true,
|
13 |
-
"autoround_version": "0.
|
14 |
"batch_size": 8,
|
15 |
"bits": 4,
|
16 |
"data_type": "int",
|
@@ -29,7 +29,7 @@
|
|
29 |
"scale_dtype": "torch.float16",
|
30 |
"seqlen": 2048,
|
31 |
"sym": true,
|
32 |
-
"block_name_to_quantize": "language_model.model.layers"
|
33 |
},
|
34 |
"text_config": {
|
35 |
"_name_or_path": "unsloth/Meta-Llama-3.1-8B-Instruct",
|
@@ -61,7 +61,7 @@
|
|
61 |
"vocab_size": 128256
|
62 |
},
|
63 |
"torch_dtype": "float16",
|
64 |
-
"transformers_version": "4.
|
65 |
"vision_config": {
|
66 |
"_name_or_path": "google/siglip-so400m-patch14-384",
|
67 |
"architectures": [
|
|
|
10 |
"projector_hidden_act": "gelu",
|
11 |
"quantization_config": {
|
12 |
"amp": true,
|
13 |
+
"autoround_version": "0.5.1",
|
14 |
"batch_size": 8,
|
15 |
"bits": 4,
|
16 |
"data_type": "int",
|
|
|
29 |
"scale_dtype": "torch.float16",
|
30 |
"seqlen": 2048,
|
31 |
"sym": true,
|
32 |
+
"block_name_to_quantize": "language_model.model.layers,model.language_model.layers"
|
33 |
},
|
34 |
"text_config": {
|
35 |
"_name_or_path": "unsloth/Meta-Llama-3.1-8B-Instruct",
|
|
|
61 |
"vocab_size": 128256
|
62 |
},
|
63 |
"torch_dtype": "float16",
|
64 |
+
"transformers_version": "4.52.2",
|
65 |
"vision_config": {
|
66 |
"_name_or_path": "google/siglip-so400m-patch14-384",
|
67 |
"architectures": [
|