Re-upload of GPTQ model due to issue with base model
Browse files- quantize_config.json +2 -2
quantize_config.json
CHANGED
|
@@ -1,8 +1,8 @@
|
|
| 1 |
{
|
| 2 |
"bits": 4,
|
| 3 |
-
"group_size":
|
| 4 |
"damp_percent": 0.01,
|
| 5 |
-
"desc_act":
|
| 6 |
"sym": true,
|
| 7 |
"true_sequential": true
|
| 8 |
}
|
|
|
|
| 1 |
{
|
| 2 |
"bits": 4,
|
| 3 |
+
"group_size": 128,
|
| 4 |
"damp_percent": 0.01,
|
| 5 |
+
"desc_act": false,
|
| 6 |
"sym": true,
|
| 7 |
"true_sequential": true
|
| 8 |
}
|