danielhanchen commited on
Commit
0d6bd5e
·
verified ·
1 Parent(s): e21ba4e

Upload Gemma3ForCausalLM

Browse files
Files changed (3) hide show
  1. config.json +106 -0
  2. generation_config.json +14 -0
  3. pytorch_model.bin +3 -0
config.json ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_sliding_window_pattern": 6,
3
+ "architectures": [
4
+ "Gemma3ForCausalLM"
5
+ ],
6
+ "attention_bias": false,
7
+ "attention_dropout": 0.0,
8
+ "attn_logit_softcapping": null,
9
+ "bos_token_id": 2,
10
+ "eos_token_id": 106,
11
+ "final_logit_softcapping": null,
12
+ "head_dim": 256,
13
+ "hidden_activation": "gelu_pytorch_tanh",
14
+ "hidden_size": 640,
15
+ "initializer_range": 0.02,
16
+ "intermediate_size": 2048,
17
+ "layer_types": [
18
+ "sliding_attention",
19
+ "sliding_attention",
20
+ "sliding_attention",
21
+ "sliding_attention",
22
+ "sliding_attention",
23
+ "full_attention",
24
+ "sliding_attention",
25
+ "sliding_attention",
26
+ "sliding_attention",
27
+ "sliding_attention",
28
+ "sliding_attention",
29
+ "full_attention",
30
+ "sliding_attention",
31
+ "sliding_attention",
32
+ "sliding_attention",
33
+ "sliding_attention",
34
+ "sliding_attention",
35
+ "full_attention"
36
+ ],
37
+ "max_position_embeddings": 32768,
38
+ "model_type": "gemma3_text",
39
+ "num_attention_heads": 4,
40
+ "num_hidden_layers": 18,
41
+ "num_key_value_heads": 1,
42
+ "pad_token_id": 0,
43
+ "quantization_config": {
44
+ "include_input_output_embeddings": false,
45
+ "modules_to_not_convert": null,
46
+ "quant_method": "torchao",
47
+ "quant_type": {
48
+ "default": {
49
+ "_data": {
50
+ "activation_dtype": {
51
+ "_data": "float8_e4m3fn",
52
+ "_type": "torch.dtype"
53
+ },
54
+ "activation_value_lb": null,
55
+ "activation_value_ub": null,
56
+ "granularity": [
57
+ {
58
+ "_data": {},
59
+ "_type": "PerRow",
60
+ "_version": 1
61
+ },
62
+ {
63
+ "_data": {},
64
+ "_type": "PerRow",
65
+ "_version": 1
66
+ }
67
+ ],
68
+ "kernel_preference": {
69
+ "_data": "AUTO",
70
+ "_type": "KernelPreference"
71
+ },
72
+ "mm_config": {
73
+ "_data": {
74
+ "emulate": false,
75
+ "pad_inner_dim": false,
76
+ "use_fast_accum": true
77
+ },
78
+ "_type": "Float8MMConfig",
79
+ "_version": 1
80
+ },
81
+ "set_inductor_config": true,
82
+ "weight_dtype": {
83
+ "_data": "float8_e4m3fn",
84
+ "_type": "torch.dtype"
85
+ }
86
+ },
87
+ "_type": "Float8DynamicActivationFloat8WeightConfig",
88
+ "_version": 2
89
+ }
90
+ },
91
+ "quant_type_kwargs": {},
92
+ "untie_embedding_weights": false
93
+ },
94
+ "query_pre_attn_scalar": 256,
95
+ "rms_norm_eps": 1e-06,
96
+ "rope_local_base_freq": 10000.0,
97
+ "rope_scaling": null,
98
+ "rope_theta": 1000000.0,
99
+ "sliding_window": 512,
100
+ "torch_dtype": "bfloat16",
101
+ "transformers_version": "4.55.2",
102
+ "unsloth_fixed": true,
103
+ "use_bidirectional_attention": false,
104
+ "use_cache": true,
105
+ "vocab_size": 262144
106
+ }
generation_config.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 2,
3
+ "cache_implementation": "hybrid",
4
+ "do_sample": true,
5
+ "eos_token_id": [
6
+ 1,
7
+ 106
8
+ ],
9
+ "max_length": 32768,
10
+ "pad_token_id": 0,
11
+ "top_k": 64,
12
+ "top_p": 0.95,
13
+ "transformers_version": "4.55.2"
14
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5835ae44499a2e54a4f2d5aa235d05f7b0f9ce16ae1ea4f71de62e754e1600d3
3
+ size 436562955