xzuyn commited on
Commit
c19cdfe
·
verified ·
1 Parent(s): 2e748ed

Upload 4bit bnb double quant model

Browse files
Files changed (3) hide show
  1. config.json +77 -0
  2. generation_config.json +13 -0
  3. model.safetensors +3 -0
config.json ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Gemma3ForConditionalGeneration"
4
+ ],
5
+ "boi_token_index": 255999,
6
+ "eoi_token_index": 256000,
7
+ "eos_token_id": [
8
+ 1,
9
+ 106
10
+ ],
11
+ "image_token_index": 262144,
12
+ "initializer_range": 0.02,
13
+ "mm_tokens_per_image": 256,
14
+ "model_type": "gemma3",
15
+ "quantization_config": {
16
+ "_load_in_4bit": true,
17
+ "_load_in_8bit": false,
18
+ "bnb_4bit_compute_dtype": "bfloat16",
19
+ "bnb_4bit_quant_storage": "bfloat16",
20
+ "bnb_4bit_quant_type": "nf4",
21
+ "bnb_4bit_use_double_quant": true,
22
+ "llm_int8_enable_fp32_cpu_offload": false,
23
+ "llm_int8_has_fp16_weight": false,
24
+ "llm_int8_skip_modules": null,
25
+ "llm_int8_threshold": 6.0,
26
+ "load_in_4bit": true,
27
+ "load_in_8bit": false,
28
+ "quant_method": "bitsandbytes"
29
+ },
30
+ "text_config": {
31
+ "attention_bias": false,
32
+ "attention_dropout": 0.0,
33
+ "attn_logit_softcapping": null,
34
+ "cache_implementation": "hybrid",
35
+ "final_logit_softcapping": null,
36
+ "head_dim": 256,
37
+ "hidden_activation": "gelu_pytorch_tanh",
38
+ "hidden_size": 2560,
39
+ "initializer_range": 0.02,
40
+ "intermediate_size": 10240,
41
+ "max_position_embeddings": 131072,
42
+ "model_type": "gemma3_text",
43
+ "num_attention_heads": 8,
44
+ "num_hidden_layers": 34,
45
+ "num_key_value_heads": 4,
46
+ "query_pre_attn_scalar": 256,
47
+ "rms_norm_eps": 1e-06,
48
+ "rope_local_base_freq": 10000.0,
49
+ "rope_scaling": {
50
+ "factor": 8.0,
51
+ "rope_type": "linear"
52
+ },
53
+ "rope_theta": 1000000.0,
54
+ "sliding_window": 1024,
55
+ "sliding_window_pattern": 6,
56
+ "torch_dtype": "bfloat16",
57
+ "use_cache": true,
58
+ "vocab_size": 262208
59
+ },
60
+ "torch_dtype": "bfloat16",
61
+ "transformers_version": "4.51.3",
62
+ "vision_config": {
63
+ "attention_dropout": 0.0,
64
+ "hidden_act": "gelu_pytorch_tanh",
65
+ "hidden_size": 1152,
66
+ "image_size": 896,
67
+ "intermediate_size": 4304,
68
+ "layer_norm_eps": 1e-06,
69
+ "model_type": "siglip_vision_model",
70
+ "num_attention_heads": 16,
71
+ "num_channels": 3,
72
+ "num_hidden_layers": 27,
73
+ "patch_size": 14,
74
+ "torch_dtype": "bfloat16",
75
+ "vision_use_head": false
76
+ }
77
+ }
generation_config.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 2,
3
+ "cache_implementation": "hybrid",
4
+ "do_sample": true,
5
+ "eos_token_id": [
6
+ 1,
7
+ 106
8
+ ],
9
+ "pad_token_id": 0,
10
+ "top_k": 64,
11
+ "top_p": 0.95,
12
+ "transformers_version": "4.51.3"
13
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:600100f128f9e840f19bff5ad4e79bdd1881e2cb7eec29af857b7720a7ceb37f
3
+ size 3200212716