riczhou commited on
Commit
37605a7
·
verified ·
1 Parent(s): 24b244a

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
added_tokens.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "<image_soft_token>": 262144
3
+ }
mlc-chat-config.json ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": "0.1.0",
3
+ "model_type": "gemma3",
4
+ "quantization": "q4bf16_0",
5
+ "model_config": {
6
+ "text_config": {
7
+ "hidden_size": 2560,
8
+ "intermediate_size": 10240,
9
+ "num_hidden_layers": 34,
10
+ "attention_bias": false,
11
+ "num_attention_heads": 8,
12
+ "num_key_value_heads": 4,
13
+ "head_dim": 256,
14
+ "rms_norm_eps": 1e-06,
15
+ "hidden_activation": "gelu_pytorch_tanh",
16
+ "position_embedding_base": 10000,
17
+ "context_window_size": 8192,
18
+ "prefill_chunk_size": 8192,
19
+ "query_pre_attn_scalar": 256,
20
+ "sliding_window": 1024,
21
+ "kwargs": {
22
+ "model_type": "gemma3_text",
23
+ "rope_scaling": {
24
+ "factor": 8.0,
25
+ "rope_type": "linear"
26
+ }
27
+ }
28
+ },
29
+ "vocab_size": 262208,
30
+ "tensor_parallel_shards": 1,
31
+ "max_batch_size": 128,
32
+ "context_window_size": 8192,
33
+ "sliding_window_size": -1,
34
+ "prefill_chunk_size": 8192,
35
+ "is_text_model": false
36
+ },
37
+ "vocab_size": 262208,
38
+ "context_window_size": 8192,
39
+ "sliding_window_size": -1,
40
+ "prefill_chunk_size": 8192,
41
+ "attention_sink_size": -1,
42
+ "tensor_parallel_shards": 1,
43
+ "pipeline_parallel_stages": 1,
44
+ "temperature": 1.0,
45
+ "presence_penalty": 0.0,
46
+ "frequency_penalty": 0.0,
47
+ "repetition_penalty": 1.0,
48
+ "top_p": 1.0,
49
+ "tokenizer_files": [
50
+ "tokenizer.model",
51
+ "tokenizer.json",
52
+ "added_tokens.json",
53
+ "tokenizer_config.json"
54
+ ],
55
+ "tokenizer_info": {
56
+ "token_postproc_method": "byte_fallback",
57
+ "prepend_space_in_encode": false,
58
+ "strip_space_in_decode": false
59
+ },
60
+ "conv_template": {
61
+ "name": "gemma_instruction",
62
+ "system_template": "{system_message}",
63
+ "system_message": "",
64
+ "system_prefix_token_ids": [
65
+ 2
66
+ ],
67
+ "add_role_after_system_message": true,
68
+ "roles": {
69
+ "user": "<start_of_turn>user",
70
+ "assistant": "<start_of_turn>model"
71
+ },
72
+ "role_templates": {
73
+ "user": "{user_message}",
74
+ "assistant": "{assistant_message}",
75
+ "tool": "{tool_message}"
76
+ },
77
+ "messages": [],
78
+ "seps": [
79
+ "<end_of_turn>\n"
80
+ ],
81
+ "role_content_sep": "\n",
82
+ "role_empty_sep": "\n",
83
+ "stop_str": [
84
+ "<end_of_turn>"
85
+ ],
86
+ "stop_token_ids": [
87
+ 1,
88
+ 107
89
+ ],
90
+ "function_string": "",
91
+ "use_function_calling": false
92
+ },
93
+ "pad_token_id": 0,
94
+ "bos_token_id": 2,
95
+ "eos_token_id": [
96
+ 1,
97
+ 106
98
+ ]
99
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4667f2089529e8e7657cfb6d1c19910ae71ff5f28aa7ab2ff2763330affad795
3
+ size 33384568
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1299c11d7cf632ef3b4e11937501358ada021bbdf7c47638d13c0ee982f2e79c
3
+ size 4689074
tokenizer_config.json ADDED
The diff for this file is too large to render. See raw diff