sharpenb commited on
Commit
b90d713
·
verified ·
1 Parent(s): 0256be4

Upload folder using huggingface_hub (#3)

Browse files

- 3dbf594323d1cc656a082376c520fe8e1da4130f2d31f0fdcdcdc1db73a7f11b (caeb44b2403a2ed0b8533486c508dc06c9dcc9b3)
- 54f6b34a04e47a6c932a99dcbbc5c0d73570f00f2654156b7a4e2bc90814b28d (47f896edd5a8a81634eb0acffeb59f3a5404091d)

Files changed (3) hide show
  1. config.json +21 -1
  2. qmodel.pt +2 -2
  3. smash_config.json +1 -1
config.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
  "_attn_implementation_autoset": true,
3
- "_name_or_path": "google/gemma-2-2b",
4
  "architectures": [
5
  "Gemma2ForCausalLM"
6
  ],
@@ -23,6 +23,26 @@
23
  "num_hidden_layers": 26,
24
  "num_key_value_heads": 4,
25
  "pad_token_id": 0,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
  "query_pre_attn_scalar": 256,
27
  "rms_norm_eps": 1e-06,
28
  "rope_theta": 10000.0,
 
1
  {
2
  "_attn_implementation_autoset": true,
3
+ "_name_or_path": "/tmp/models/tmpnthtz7dc/tmp_e9rkptp",
4
  "architectures": [
5
  "Gemma2ForCausalLM"
6
  ],
 
23
  "num_hidden_layers": 26,
24
  "num_key_value_heads": 4,
25
  "pad_token_id": 0,
26
+ "quantization_config": {
27
+ "quant_config": {
28
+ "offload_meta": false,
29
+ "scale_quant_params": null,
30
+ "weight_quant_params": {
31
+ "axis": 1,
32
+ "channel_wise": true,
33
+ "group_size": 64,
34
+ "nbits": 4,
35
+ "optimize": true,
36
+ "round_zero": true,
37
+ "view_as_float": false
38
+ },
39
+ "zero_quant_params": null
40
+ },
41
+ "quant_method": "hqq",
42
+ "skip_modules": [
43
+ "lm_head"
44
+ ]
45
+ },
46
  "query_pre_attn_scalar": 256,
47
  "rms_norm_eps": 1e-06,
48
  "rope_theta": 10000.0,
qmodel.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9d7bd97f932122941f252fccf549f67bd44e9267dd9dd0bcade16a0908abc982
3
- size 2318999562
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bc3bbb4fc134dd9f425ae38b97b07d97fbf1a3c3293439c420d1a56c0b8440c8
3
+ size 3625639370
smash_config.json CHANGED
@@ -11,7 +11,7 @@
11
  "quant_hqq_weight_bits": 4,
12
  "max_batch_size": 1,
13
  "device": "cuda",
14
- "cache_dir": "/tmp/models/tmp28ewnzjn",
15
  "task": "",
16
  "save_load_fn": "hqq",
17
  "save_load_fn_args": {},
 
11
  "quant_hqq_weight_bits": 4,
12
  "max_batch_size": 1,
13
  "device": "cuda",
14
+ "cache_dir": "/tmp/models/tmpnthtz7dc",
15
  "task": "",
16
  "save_load_fn": "hqq",
17
  "save_load_fn_args": {},