Upload folder using huggingface_hub
Browse files- .gitattributes +14 -11
- README.md +51 -0
- config.json +10 -0
- llm.mnn +3 -0
- llm.mnn.weight +3 -0
- llm_config.json +31 -0
- tokenizer.txt +0 -0
.gitattributes
CHANGED
@@ -1,35 +1,38 @@
|
|
1 |
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
*.bin filter=lfs diff=lfs merge=lfs -text
|
|
|
4 |
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
-
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
-
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
-
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
-
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
-
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
-
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
-
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
-
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
-
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
-
*.
|
35 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bin.* filter=lfs diff=lfs merge=lfs -text
|
5 |
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
|
|
6 |
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
|
|
11 |
*.model filter=lfs diff=lfs merge=lfs -text
|
12 |
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
13 |
*.onnx filter=lfs diff=lfs merge=lfs -text
|
14 |
*.ot filter=lfs diff=lfs merge=lfs -text
|
15 |
*.parquet filter=lfs diff=lfs merge=lfs -text
|
16 |
*.pb filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
17 |
*.pt filter=lfs diff=lfs merge=lfs -text
|
18 |
*.pth filter=lfs diff=lfs merge=lfs -text
|
19 |
*.rar filter=lfs diff=lfs merge=lfs -text
|
|
|
20 |
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
21 |
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
|
|
22 |
*.tflite filter=lfs diff=lfs merge=lfs -text
|
23 |
*.tgz filter=lfs diff=lfs merge=lfs -text
|
|
|
24 |
*.xz filter=lfs diff=lfs merge=lfs -text
|
25 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
26 |
+
*.zstandard filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tfevents* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.db* filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.ark* filter=lfs diff=lfs merge=lfs -text
|
30 |
+
**/*ckpt*data* filter=lfs diff=lfs merge=lfs -text
|
31 |
+
**/*ckpt*.meta filter=lfs diff=lfs merge=lfs -text
|
32 |
+
**/*ckpt*.index filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
35 |
+
*.mnn filter=lfs diff=lfs merge=lfs -text
|
36 |
+
*.mnn.* filter=lfs diff=lfs merge=lfs -text
|
37 |
+
*.weight filter=lfs diff=lfs merge=lfs -text
|
38 |
+
|
README.md
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
license: apache-2.0
|
3 |
+
language:
|
4 |
+
- en
|
5 |
+
pipeline_tag: text-generation
|
6 |
+
tags:
|
7 |
+
- chat
|
8 |
+
---
|
9 |
+
# gemma-3-270m-it-MNN
|
10 |
+
|
11 |
+
## Introduction
|
12 |
+
This model is a 4-bit quantized version of the MNN model exported from gemma-3-270m-it using [llmexport](https://github.com/alibaba/MNN/tree/master/transformers/llm/export).
|
13 |
+
|
14 |
+
## Download
|
15 |
+
```bash
|
16 |
+
# install huggingface
|
17 |
+
pip install huggingface
|
18 |
+
```
|
19 |
+
```bash
|
20 |
+
# shell download
|
21 |
+
huggingface download --model 'taobao-mnn/gemma-3-270m-it-MNN' --local_dir 'path/to/dir'
|
22 |
+
```
|
23 |
+
```python
|
24 |
+
# SDK download
|
25 |
+
from huggingface_hub import snapshot_download
|
26 |
+
model_dir = snapshot_download('taobao-mnn/gemma-3-270m-it-MNN')
|
27 |
+
```
|
28 |
+
|
29 |
+
```bash
|
30 |
+
# git clone
|
31 |
+
git clone https://www.modelscope.cn/taobao-mnn/gemma-3-270m-it-MNN
|
32 |
+
```
|
33 |
+
|
34 |
+
## Usage
|
35 |
+
```bash
|
36 |
+
# clone MNN source
|
37 |
+
git clone https://github.com/alibaba/MNN.git
|
38 |
+
|
39 |
+
# compile
|
40 |
+
cd MNN
|
41 |
+
mkdir build && cd build
|
42 |
+
cmake .. -DMNN_LOW_MEMORY=true -DMNN_CPU_WEIGHT_DEQUANT_GEMM=true -DMNN_BUILD_LLM=true -DMNN_SUPPORT_TRANSFORMER_FUSE=true
|
43 |
+
make -j
|
44 |
+
|
45 |
+
# run
|
46 |
+
./llm_demo /path/to/gemma-3-270m-it-MNN/config.json prompt.txt
|
47 |
+
```
|
48 |
+
|
49 |
+
## Document
|
50 |
+
[MNN-LLM](https://mnn-docs.readthedocs.io/en/latest/transformers/llm.html#)
|
51 |
+
|
config.json
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"llm_model": "llm.mnn",
|
3 |
+
"llm_weight": "llm.mnn.weight",
|
4 |
+
"backend_type": "cpu",
|
5 |
+
"thread_num": 4,
|
6 |
+
"precision": "low",
|
7 |
+
"memory": "low",
|
8 |
+
"sampler_type": "penalty",
|
9 |
+
"penalty": 1.1
|
10 |
+
}
|
llm.mnn
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:584b72890da15f85f5aedc81d7a202ef7b402c8e78f032031a382dc2c4a55190
|
3 |
+
size 347904
|
llm.mnn.weight
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:805d281786a42c4c1ab8d79709ca8614c4e9fdd6145af9fabebea9ad30021638
|
3 |
+
size 302028286
|
llm_config.json
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"hidden_size": 640,
|
3 |
+
"layer_nums": 18,
|
4 |
+
"attention_mask": "float",
|
5 |
+
"key_value_shape": [
|
6 |
+
2,
|
7 |
+
1,
|
8 |
+
0,
|
9 |
+
1,
|
10 |
+
256
|
11 |
+
],
|
12 |
+
"bos": "<bos><start_of_turn>user\n",
|
13 |
+
"system_prompt_template": "%s\n\n",
|
14 |
+
"user_prompt_template": "%s<end_of_turn>\n",
|
15 |
+
"assistant_prompt_template": "<start_of_turn>model\n%s<end_of_turn>\n<start_of_turn>user\n",
|
16 |
+
"is_visual": false,
|
17 |
+
"attention_type": "mix",
|
18 |
+
"sliding_window": 512,
|
19 |
+
"jinja": {
|
20 |
+
"chat_template": "{{ bos_token }}\n{%- if messages[0]['role'] == 'system' -%}\n {%- if messages[0]['content'] is string -%}\n {%- set first_user_prefix = messages[0]['content'] + '\n\n' -%}\n {%- else -%}\n {%- set first_user_prefix = messages[0]['content'][0]['text'] + '\n\n' -%}\n {%- endif -%}\n {%- set loop_messages = messages[1:] -%}\n{%- else -%}\n {%- set first_user_prefix = \"\" -%}\n {%- set loop_messages = messages -%}\n{%- endif -%}\n{%- for message in loop_messages -%}\n {%- if (message['role'] == 'user') != (loop.index0 % 2 == 0) -%}\n {{ raise_exception(\"Conversation roles must alternate user/assistant/user/assistant/...\") }}\n {%- endif -%}\n {%- if (message['role'] == 'assistant') -%}\n {%- set role = \"model\" -%}\n {%- else -%}\n {%- set role = message['role'] -%}\n {%- endif -%}\n {{ '<start_of_turn>' + role + '\n' + (first_user_prefix if loop.first else \"\") }}\n {%- if message['content'] is string -%}\n {{ message['content'] | trim }}\n {%- elif message['content'] is iterable -%}\n {%- for item in message['content'] -%}\n {%- if item['type'] == 'image' -%}\n {{ '<start_of_image>' }}\n {%- elif item['type'] == 'text' -%}\n {{ item['text'] | trim }}\n {%- endif -%}\n {%- endfor -%}\n {%- else -%}\n {{ raise_exception(\"Invalid content type\") }}\n {%- endif -%}\n {{ '<end_of_turn>\n' }}\n{%- endfor -%}\n{%- if add_generation_prompt -%}\n {{'<start_of_turn>model\n'}}\n{%- endif -%}\n",
|
21 |
+
"bos": "<bos>",
|
22 |
+
"eos": "<eos>"
|
23 |
+
},
|
24 |
+
"tie_embeddings": [
|
25 |
+
113284606,
|
26 |
+
281056766,
|
27 |
+
20971520,
|
28 |
+
8,
|
29 |
+
64
|
30 |
+
]
|
31 |
+
}
|
tokenizer.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|