sharpenb commited on
Commit
47f896e
·
verified ·
1 Parent(s): caeb44b

54f6b34a04e47a6c932a99dcbbc5c0d73570f00f2654156b7a4e2bc90814b28d

Browse files
Files changed (2) hide show
  1. config.json +21 -1
  2. smash_config.json +1 -1
config.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
  "_attn_implementation_autoset": true,
3
- "_name_or_path": "google/gemma-2-2b",
4
  "architectures": [
5
  "Gemma2ForCausalLM"
6
  ],
@@ -23,6 +23,26 @@
23
  "num_hidden_layers": 26,
24
  "num_key_value_heads": 4,
25
  "pad_token_id": 0,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
  "query_pre_attn_scalar": 256,
27
  "rms_norm_eps": 1e-06,
28
  "rope_theta": 10000.0,
 
1
  {
2
  "_attn_implementation_autoset": true,
3
+ "_name_or_path": "/tmp/models/tmpnthtz7dc/tmp_e9rkptp",
4
  "architectures": [
5
  "Gemma2ForCausalLM"
6
  ],
 
23
  "num_hidden_layers": 26,
24
  "num_key_value_heads": 4,
25
  "pad_token_id": 0,
26
+ "quantization_config": {
27
+ "quant_config": {
28
+ "offload_meta": false,
29
+ "scale_quant_params": null,
30
+ "weight_quant_params": {
31
+ "axis": 1,
32
+ "channel_wise": true,
33
+ "group_size": 64,
34
+ "nbits": 4,
35
+ "optimize": true,
36
+ "round_zero": true,
37
+ "view_as_float": false
38
+ },
39
+ "zero_quant_params": null
40
+ },
41
+ "quant_method": "hqq",
42
+ "skip_modules": [
43
+ "lm_head"
44
+ ]
45
+ },
46
  "query_pre_attn_scalar": 256,
47
  "rms_norm_eps": 1e-06,
48
  "rope_theta": 10000.0,
smash_config.json CHANGED
@@ -11,7 +11,7 @@
11
  "quant_hqq_weight_bits": 4,
12
  "max_batch_size": 1,
13
  "device": "cuda",
14
- "cache_dir": "/tmp/models/tmp28ewnzjn",
15
  "task": "",
16
  "save_load_fn": "hqq",
17
  "save_load_fn_args": {},
 
11
  "quant_hqq_weight_bits": 4,
12
  "max_batch_size": 1,
13
  "device": "cuda",
14
+ "cache_dir": "/tmp/models/tmpnthtz7dc",
15
  "task": "",
16
  "save_load_fn": "hqq",
17
  "save_load_fn_args": {},