jackyoung96 commited on
Commit
c0e88ae
·
verified ·
1 Parent(s): 471fa1b

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,7 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ assets/A.X_logo_ko.jpg filter=lfs diff=lfs merge=lfs -text
37
+ A.X_logo_ko.png filter=lfs diff=lfs merge=lfs -text
38
+ assets/A.X_logo_ko.png filter=lfs diff=lfs merge=lfs -text
39
+ assets/A.X_logo_ko_4x3.png filter=lfs diff=lfs merge=lfs -text
LICENSE ADDED
@@ -0,0 +1,233 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Copyright (c) 2025 SK Telecom Co., Ltd. All rights reserved.
2
+
3
+ Built with Qwen 2.5 — original model by Alibaba Cloud, licensed under the Apache License 2.0.
4
+
5
+ Unless otherwise stated, all files in this repository (including modified model weights
6
+ and tokenizer files) are distributed under the terms of the Apache License, Version 2.0
7
+ (the "License"). You may obtain a copy of the License at:
8
+
9
+ http://www.apache.org/licenses/LICENSE-2.0
10
+
11
+ Unless required by applicable law or agreed to in writing, software distributed under
12
+ the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
13
+ ANY KIND, either express or implied. See the License for the specific language governing
14
+ permissions and limitations under the License.
15
+
16
+ ================================================================================
17
+ NOTICE (Apache-2.0 §4 d)
18
+ ================================================================================
19
+
20
+ This product is built with Qwen 2.5 developed by Alibaba Cloud
21
+ (https://github.com/QwenLM) and distributed under the Apache License 2.0.
22
+
23
+ See the upstream Qwen 2.5 repository for additional attribution details.
24
+
25
+ ================================================================================
26
+ TRADEMARK
27
+ ================================================================================
28
+
29
+ "SK Telecom" and associated logos are trademarks of SK Telecom Co., Ltd.
30
+ This License does not grant permission to use these trademarks without prior
31
+ written consent.
32
+
33
+ ================================================================================
34
+ APACHE LICENSE 2.0
35
+ ================================================================================
36
+
37
+ Apache License
38
+ Version 2.0, January 2004
39
+ http://www.apache.org/licenses/
40
+
41
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
42
+
43
+ 1. Definitions.
44
+
45
+ "License" shall mean the terms and conditions for use, reproduction,
46
+ and distribution as defined by Sections 1 through 9 of this document.
47
+
48
+ "Licensor" shall mean the copyright owner or entity authorized by
49
+ the copyright owner that is granting the License.
50
+
51
+ "Legal Entity" shall mean the union of the acting entity and all
52
+ other entities that control, are controlled by, or are under common
53
+ control with that entity. For the purposes of this definition,
54
+ "control" means (i) the power, direct or indirect, to cause the
55
+ direction or management of such entity, whether by contract or
56
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
57
+ outstanding shares, or (iii) beneficial ownership of such entity.
58
+
59
+ "You" (or "Your") shall mean an individual or Legal Entity
60
+ exercising permissions granted by this License.
61
+
62
+ "Source" form shall mean the preferred form for making modifications,
63
+ including but not limited to software source code, documentation
64
+ source, and configuration files.
65
+
66
+ "Object" form shall mean any form resulting from mechanical
67
+ transformation or translation of a Source form, including but
68
+ not limited to compiled object code, generated documentation,
69
+ and conversions to other media types.
70
+
71
+ "Work" shall mean the work of authorship, whether in Source or
72
+ Object form, made available under the License, as indicated by a
73
+ copyright notice that is included in or attached to the work
74
+ (an example is provided in the Appendix below).
75
+
76
+ "Derivative Works" shall mean any work, whether in Source or Object
77
+ form, that is based on (or derived from) the Work and for which the
78
+ editorial revisions, annotations, elaborations, or other modifications
79
+ represent, as a whole, an original work of authorship. For the purposes
80
+ of this License, Derivative Works shall not include works that remain
81
+ separable from, or merely link (or bind by name) to the interfaces of,
82
+ the Work and Derivative Works thereof.
83
+
84
+ "Contribution" shall mean any work of authorship, including
85
+ the original version of the Work and any modifications or additions
86
+ to that Work or Derivative Works thereof, that is intentionally
87
+ submitted to Licensor for inclusion in the Work by the copyright owner
88
+ or by an individual or Legal Entity authorized to submit on behalf of
89
+ the copyright owner. For the purposes of this definition, "submitted"
90
+ means any form of electronic, verbal, or written communication sent
91
+ to the Licensor or its representatives, including but not limited to
92
+ communication on electronic mailing lists, source code control systems,
93
+ and issue tracking systems that are managed by, or on behalf of, the
94
+ Licensor for the purpose of discussing and improving the Work, but
95
+ excluding communication that is conspicuously marked or otherwise
96
+ designated in writing by the copyright owner as "Not a Contribution."
97
+
98
+ "Contributor" shall mean Licensor and any individual or Legal Entity
99
+ on behalf of whom a Contribution has been received by Licensor and
100
+ subsequently incorporated within the Work.
101
+
102
+ 2. Grant of Copyright License. Subject to the terms and conditions of
103
+ this License, each Contributor hereby grants to You a perpetual,
104
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
105
+ copyright license to reproduce, prepare Derivative Works of,
106
+ publicly display, publicly perform, sublicense, and distribute the
107
+ Work and such Derivative Works in Source or Object form.
108
+
109
+ 3. Grant of Patent License. Subject to the terms and conditions of
110
+ this License, each Contributor hereby grants to You a perpetual,
111
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
112
+ (except as stated in this section) patent license to make, have made,
113
+ use, offer to sell, sell, import, and otherwise transfer the Work,
114
+ where such license applies only to those patent claims licensable
115
+ by such Contributor that are necessarily infringed by their
116
+ Contribution(s) alone or by combination of their Contribution(s)
117
+ with the Work to which such Contribution(s) was submitted. If You
118
+ institute patent litigation against any entity (including a
119
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
120
+ or a Contribution incorporated within the Work constitutes direct
121
+ or contributory patent infringement, then any patent licenses
122
+ granted to You under this License for that Work shall terminate
123
+ as of the date such litigation is filed.
124
+
125
+ 4. Redistribution. You may reproduce and distribute copies of the
126
+ Work or Derivative Works thereof in any medium, with or without
127
+ modifications, and in Source or Object form, provided that You
128
+ meet the following conditions:
129
+
130
+ (a) You must give any other recipients of the Work or
131
+ Derivative Works a copy of this License; and
132
+
133
+ (b) You must cause any modified files to carry prominent notices
134
+ stating that You changed the files; and
135
+
136
+ (c) You must retain, in the Source form of any Derivative Works
137
+ that You distribute, all copyright, patent, trademark, and
138
+ attribution notices from the Source form of the Work,
139
+ excluding those notices that do not pertain to any part of
140
+ the Derivative Works; and
141
+
142
+ (d) If the Work includes a "NOTICE" text file as part of its
143
+ distribution, then any Derivative Works that You distribute must
144
+ include a readable copy of the attribution notices contained
145
+ within such NOTICE file, excluding those notices that do not
146
+ pertain to any part of the Derivative Works, in at least one
147
+ of the following places: within a NOTICE text file distributed
148
+ as part of the Derivative Works; within the Source form or
149
+ documentation, if provided along with the Derivative Works; or,
150
+ within a display generated by the Derivative Works, if and
151
+ wherever such third-party notices normally appear. The contents
152
+ of the NOTICE file are for informational purposes only and
153
+ do not modify the License. You may add Your own attribution
154
+ notices within Derivative Works that You distribute, alongside
155
+ or as an addendum to the NOTICE text from the Work, provided
156
+ that such additional attribution notices cannot be construed
157
+ as modifying the License.
158
+
159
+ You may add Your own copyright statement to Your modifications and
160
+ may provide additional or different license terms and conditions
161
+ for use, reproduction, or distribution of Your modifications, or
162
+ for any such Derivative Works as a whole, provided Your use,
163
+ reproduction, and distribution of the Work otherwise complies with
164
+ the conditions stated in this License.
165
+
166
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
167
+ any Contribution intentionally submitted for inclusion in the Work
168
+ by You to the Licensor shall be under the terms and conditions of
169
+ this License, without any additional terms or conditions.
170
+ Notwithstanding the above, nothing herein shall supersede or modify
171
+ the terms of any separate license agreement you may have executed
172
+ with Licensor regarding such Contributions.
173
+
174
+ 6. Trademarks. This License does not grant permission to use the trade
175
+ names, trademarks, service marks, or product names of the Licensor,
176
+ except as required for reasonable and customary use in describing the
177
+ origin of the Work and reproducing the content of the NOTICE file.
178
+
179
+ 7. Disclaimer of Warranty. Unless required by applicable law or
180
+ agreed to in writing, Licensor provides the Work (and each
181
+ Contributor provides its Contributions) on an "AS IS" BASIS,
182
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
183
+ implied, including, without limitation, any warranties or conditions
184
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
185
+ PARTICULAR PURPOSE. You are solely responsible for determining the
186
+ appropriateness of using or redistributing the Work and assume any
187
+ risks associated with Your exercise of permissions under this License.
188
+
189
+ 8. Limitation of Liability. In no event and under no legal theory,
190
+ whether in tort (including negligence), contract, or otherwise,
191
+ unless required by applicable law (such as deliberate and grossly
192
+ negligent acts) or agreed to in writing, shall any Contributor be
193
+ liable to You for damages, including any direct, indirect, special,
194
+ incidental, or consequential damages of any character arising as a
195
+ result of this License or out of the use or inability to use the
196
+ Work (including but not limited to damages for loss of goodwill,
197
+ work stoppage, computer failure or malfunction, or any and all
198
+ other commercial damages or losses), even if such Contributor
199
+ has been advised of the possibility of such damages.
200
+
201
+ 9. Accepting Warranty or Additional Liability. While redistributing
202
+ the Work or Derivative Works thereof, You may choose to offer,
203
+ and charge a fee for, acceptance of support, warranty, indemnity,
204
+ or other liability obligations and/or rights consistent with this
205
+ License. However, in accepting such obligations, You may act only
206
+ on Your own behalf and on Your sole responsibility, not on behalf
207
+ of any other Contributor, and only if You agree to indemnify,
208
+ defend, and hold each Contributor harmless for any liability
209
+ incurred by, or claims asserted against, such Contributor by reason
210
+ of your accepting any such warranty or additional liability.
211
+
212
+ END OF TERMS AND CONDITIONS
213
+
214
+ APPENDIX: How to apply the Apache License to your work.
215
+
216
+ To apply the Apache License to your work, attach the following
217
+ boilerplate notice, with the fields enclosed by brackets "[]"
218
+ replaced with your own identifying information. (Don't include
219
+ the brackets!) The text should be enclosed in the appropriate
220
+ comment syntax for the file format. We also recommend that a
221
+ file or class name and description of purpose be included on the
222
+ same "printed page" as the copyright notice for easier
223
+ identification within third-party archives.
224
+ Copyright 2024 Alibaba Cloud
225
+ Licensed under the Apache License, Version 2.0 (the "License");
226
+ you may not use this file except in compliance with the License.
227
+ You may obtain a copy of the License at
228
+ http://www.apache.org/licenses/LICENSE-2.0
229
+ Unless required by applicable law or agreed to in writing, software
230
+ distributed under the License is distributed on an "AS IS" BASIS,
231
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
232
+ See the License for the specific language governing permissions and
233
+ limitations under the License.
README.md CHANGED
@@ -1,3 +1,512 @@
1
- ---
2
- license: apache-2.0
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ license_link: https://huggingface.co/skt/A.X-4.0-Light/blob/main/LICENSE
4
+ language:
5
+ - en
6
+ - ko
7
+ pipeline_tag: text-generation
8
+ library_name: transformers
9
+ model_id: skt/A.X-4.0-Light
10
+ developers: SKT AI Model Lab
11
+ model-index:
12
+ - name: A.X-4.0-Light
13
+ results:
14
+ - task:
15
+ type: generate_until
16
+ name: mmlu
17
+ dataset:
18
+ name: mmlu (chat CoT)
19
+ type: hails/mmlu_no_train
20
+ metrics:
21
+ - type: exact_match
22
+ value: 75.43
23
+ name: exact_match
24
+ - task:
25
+ type: generate_until
26
+ name: kmmlu
27
+ dataset:
28
+ name: kmmlu (chat CoT)
29
+ type: HAERAE-HUB/KMMLU
30
+ metrics:
31
+ - type: exact_match
32
+ value: 64.15
33
+ name: exact_match
34
+ ---
35
+
36
+ # A.X 4.0 Light
37
+
38
+ <p align="center">
39
+ <picture>
40
+ <img src="./assets/A.X_logo_ko_4x3.png" width="45%" style="margin: 40px auto;">
41
+ </picture>
42
+ </p>
43
+ <p align="center"> <a href="https://huggingface.co/collections/skt/ax-4-68637ebaa63b9cc51925e886">🤗 Models</a> | <a href="https://sktax.chat/chat">💬 Chat</a> | <a href="https://github.com/SKT-AI/A.X-4.0">🖥️ Github</a> </p>
44
+
45
+ ## A.X 4.0 Family Highlights
46
+
47
+ SK Telecom released **A.X 4.0** (pronounced "A dot X"), a large language model (LLM) optimized for Korean-language understanding and enterprise deployment, on July 03, 2025. Built on the open-source [Qwen2.5](https://huggingface.co/collections/Qwen/qwen25-66e81a666513e518adb90d9e) model, A.X 4.0 has been further trained with large-scale Korean datasets to deliver outstanding performance in real-world business environments.
48
+
49
+ - **Superior Korean Proficiency**: Achieved a score of 78.3 on [KMMLU](https://huggingface.co/datasets/HAERAE-HUB/KMMLU), the leading benchmark for Korean-language evaluation and a Korean-specific adaptation of MMLU, outperforming GPT-4o (72.5).
50
+ - **Deep Cultural Understanding**: Scored 83.5 on [CLIcK](https://huggingface.co/datasets/EunsuKim/CLIcK), a benchmark for Korean cultural and contextual comprehension, surpassing GPT-4o (80.2).
51
+ - **Efficient Token Usage**: A.X 4.0 uses approximately 33% fewer tokens than GPT-4o for the same Korean input, enabling more cost-effective and efficient processing.
52
+ - **Deployment Flexibility**: Offered in both a 72B-parameter standard model (A.X 4.0) and a 7B lightweight version (A.X 4.0 Light).
53
+ - **Long Context Handling**: Supports up to 131,072 tokens, allowing comprehension of lengthy documents and conversations. (Lightweight model supports up to 16,384 tokens length)
54
+
55
+ ## Performance
56
+
57
+ ### Model Performance
58
+
59
+ <table><thead>
60
+ <tr>
61
+ <th colspan="2">Benchmarks</th>
62
+ <th>A.X 4.0</th>
63
+ <th>Qwen3-235B-A22B<br/>(w/o reasoning)</th>
64
+ <th>Qwen2.5-72B</th>
65
+ <th>GPT-4o</th>
66
+ </tr></thead>
67
+ <tbody>
68
+ <tr>
69
+ <td rowspan="4">Knowledge</td>
70
+ <td>KMMLU</td>
71
+ <td>78.32</td>
72
+ <td>73.64</td>
73
+ <td>66.44</td>
74
+ <td>72.51</td>
75
+ </tr>
76
+ <tr>
77
+ <td>CLIcK</td>
78
+ <td>83.51</td>
79
+ <td>74.55</td>
80
+ <td>72.59</td>
81
+ <td>80.22</td>
82
+ </tr>
83
+ <tr>
84
+ <td>KoBALT</td>
85
+ <td>47.30</td>
86
+ <td>41.57</td>
87
+ <td>37.00</td>
88
+ <td>44.00</td>
89
+ </tr>
90
+ <tr>
91
+ <td>MMLU</td>
92
+ <td>86.62</td>
93
+ <td>87.37</td>
94
+ <td>85.70</td>
95
+ <td>88.70</td>
96
+ </tr>
97
+ <tr>
98
+ <td rowspan="3">General</td>
99
+ <td>Ko-MT-Bench</td>
100
+ <td>86.69</td>
101
+ <td>88.00</td>
102
+ <td>82.69</td>
103
+ <td>88.44</td>
104
+ </tr>
105
+ <tr>
106
+ <td>MT-Bench</td>
107
+ <td>83.25</td>
108
+ <td>86.56</td>
109
+ <td>93.50</td>
110
+ <td>88.19</td>
111
+ </tr>
112
+ <tr>
113
+ <td>LiveBench<sup>2024.11</sup></td>
114
+ <td>52.30</td>
115
+ <td>64.50</td>
116
+ <td>54.20</td>
117
+ <td>52.19</td>
118
+ </tr>
119
+ <tr>
120
+ <td rowspan="2">Instruction Following</td>
121
+ <td>Ko-IFEval</td>
122
+ <td>77.96</td>
123
+ <td>77.53</td>
124
+ <td>77.07</td>
125
+ <td>75.38</td>
126
+ </tr>
127
+ <tr>
128
+ <td>IFEval</td>
129
+ <td>86.05</td>
130
+ <td>85.77</td>
131
+ <td>86.54</td>
132
+ <td>83.86</td>
133
+ </tr>
134
+ <tr>
135
+ <td rowspan="2">Math</td>
136
+ <td>HRM8K</td>
137
+ <td>48.55</td>
138
+ <td>54.52</td>
139
+ <td>46.37</td>
140
+ <td>43.27</td>
141
+ </tr>
142
+ <tr>
143
+ <td>MATH</td>
144
+ <td>74.28</td>
145
+ <td>72.72</td>
146
+ <td>77.00</td>
147
+ <td>72.38</td>
148
+ </tr>
149
+ <tr>
150
+ <td rowspan="3">Code</td>
151
+ <td>HumanEval+</td>
152
+ <td>79.27</td>
153
+ <td>79.27</td>
154
+ <td>81.71</td>
155
+ <td>86.00</td>
156
+ </tr>
157
+ <tr>
158
+ <td>MBPP+</td>
159
+ <td>73.28</td>
160
+ <td>70.11</td>
161
+ <td>75.66</td>
162
+ <td>75.10</td>
163
+ </tr>
164
+ <tr>
165
+ <td>LiveCodeBench<sup>2024.10~2025.04</sup></td>
166
+ <td>26.07</td>
167
+ <td>33.09</td>
168
+ <td>27.58</td>
169
+ <td>29.30</td>
170
+ </tr>
171
+ <tr>
172
+ <td>Long Context</td>
173
+ <td>LongBench<sup>&lt;128K</sup></td>
174
+ <td>56.70</td>
175
+ <td>49.40</td>
176
+ <td>45.60</td>
177
+ <td>47.50</td>
178
+ </tr>
179
+ <tr>
180
+ <td>Tool-use</td>
181
+ <td>FunctionChatBench</td>
182
+ <td>85.96</td>
183
+ <td>82.43</td>
184
+ <td>88.30</td>
185
+ <td>95.70</td>
186
+ </tr>
187
+ </tbody></table>
188
+
189
+ ### Lightweight Model Performance
190
+
191
+ <table><thead>
192
+ <tr>
193
+ <th colspan="2">Benchmarks</th>
194
+ <th>A.X 4.0 Light</th>
195
+ <th>Qwen3-8B<br/>(w/o reasoning)</th>
196
+ <th>Qwen2.5-7B</th>
197
+ <th>EXAONE-3.5-7.8B</th>
198
+ <th>Kanana-1.5-8B</th>
199
+ </tr></thead>
200
+ <tbody>
201
+ <tr>
202
+ <td rowspan="4">Knowledge</td>
203
+ <td>KMMLU</td>
204
+ <td>64.15</td>
205
+ <td>63.53</td>
206
+ <td>49.56</td>
207
+ <td>53.76</td>
208
+ <td>48.28</td>
209
+ </tr>
210
+ <tr>
211
+ <td>CLIcK</td>
212
+ <td>68.05</td>
213
+ <td>62.71</td>
214
+ <td>60.56</td>
215
+ <td>64.30</td>
216
+ <td>61.30</td>
217
+ </tr>
218
+ <tr>
219
+ <td>KoBALT</td>
220
+ <td>30.29</td>
221
+ <td>26.57</td>
222
+ <td>21.57</td>
223
+ <td>21.71</td>
224
+ <td>23.14</td>
225
+ </tr>
226
+ <tr>
227
+ <td>MMLU</td>
228
+ <td>75.43</td>
229
+ <td>82.89</td>
230
+ <td>75.40</td>
231
+ <td>72.20</td>
232
+ <td>68.82</td>
233
+ </tr>
234
+ <tr>
235
+ <td rowspan="3">General</td>
236
+ <td>Ko-MT-Bench</td>
237
+ <td>79.50</td>
238
+ <td>64.06</td>
239
+ <td>61.31</td>
240
+ <td>81.06</td>
241
+ <td>76.30</td>
242
+ </tr>
243
+ <tr>
244
+ <td>MT-Bench</td>
245
+ <td>81.56</td>
246
+ <td>65.69</td>
247
+ <td>79.37</td>
248
+ <td>83.50</td>
249
+ <td>77.60</td>
250
+ </tr>
251
+ <tr>
252
+ <td>LiveBench</td>
253
+ <td>37.10</td>
254
+ <td>50.20</td>
255
+ <td>37.00</td>
256
+ <td>40.20</td>
257
+ <td>29.40</td>
258
+ </tr>
259
+ <tr>
260
+ <td rowspan="2">Instruction Following</td>
261
+ <td>Ko-IFEval</td>
262
+ <td>72.99</td>
263
+ <td>73.39</td>
264
+ <td>60.73</td>
265
+ <td>65.01</td>
266
+ <td>69.96</td>
267
+ </tr>
268
+ <tr>
269
+ <td>IFEval</td>
270
+ <td>84.68</td>
271
+ <td>85.38</td>
272
+ <td>76.73</td>
273
+ <td>82.61</td>
274
+ <td>80.11</td>
275
+ </tr>
276
+ <tr>
277
+ <td rowspan="2">Math</td>
278
+ <td>HRM8K</td>
279
+ <td>40.12</td>
280
+ <td>52.50</td>
281
+ <td>35.13</td>
282
+ <td>31.88</td>
283
+ <td>30.87</td>
284
+ </tr>
285
+ <tr>
286
+ <td>MATH</td>
287
+ <td>68.88</td>
288
+ <td>71.48</td>
289
+ <td>65.58</td>
290
+ <td>63.20</td>
291
+ <td>59.28</td>
292
+ </tr>
293
+ <tr>
294
+ <td rowspan="3">Code</td>
295
+ <td>HumanEval+</td>
296
+ <td>75.61</td>
297
+ <td>77.44</td>
298
+ <td>74.39</td>
299
+ <td>76.83</td>
300
+ <td>76.83</td>
301
+ </tr>
302
+ <tr>
303
+ <td>MBPP+</td>
304
+ <td>67.20</td>
305
+ <td>62.17</td>
306
+ <td>68.50</td>
307
+ <td>64.29</td>
308
+ <td>67.99</td>
309
+ </tr>
310
+ <tr>
311
+ <td>LiveCodeBench</td>
312
+ <td>18.03</td>
313
+ <td>23.93</td>
314
+ <td>16.62</td>
315
+ <td>17.98</td>
316
+ <td>16.52</td>
317
+ </tr>
318
+ </tbody></table>
319
+
320
+ ## 🚀 Quickstart
321
+
322
+ ### with HuggingFace Transformers
323
+
324
+ - `transformers>=4.46.0` or the latest version is required to use `skt/A.X-4.0-Light`
325
+ ```bash
326
+ pip install transformers>=4.46.0
327
+ ```
328
+
329
+ #### Example Usage
330
+
331
+ ```python
332
+ import torch
333
+ from transformers import AutoModelForCausalLM, AutoTokenizer
334
+
335
+ model_name = "skt/A.X-4.0-Light"
336
+ model = AutoModelForCausalLM.from_pretrained(
337
+ model_name,
338
+ torch_dtype=torch.bfloat16,
339
+ device_map="auto",
340
+ )
341
+ model.eval()
342
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
343
+
344
+ messages = [
345
+ {"role": "system", "content": "당신은 사용자가 제공하는 영어 문장들을 한국어로 번역하는 AI 전문가입니다."},
346
+ {"role": "user", "content": "The first human went into space and orbited the Earth on April 12, 1961."},
347
+ ]
348
+ input_ids = tokenizer.apply_chat_template(messages, add_generation_prompt=True, return_tensors="pt").to(model.device)
349
+
350
+ with torch.no_grad():
351
+ output = model.generate(
352
+ input_ids,
353
+ max_new_tokens=128,
354
+ do_sample=False,
355
+ )
356
+
357
+ len_input_prompt = len(input_ids[0])
358
+ response = tokenizer.decode(output[0][len_input_prompt:], skip_special_tokens=True)
359
+ print(response)
360
+ # Output:
361
+ # 1961년 4월 12일, 최초의 인간이 우주로 나가 지구를 공전했습니다.
362
+ ```
363
+
364
+ ### with vLLM
365
+
366
+ - `vllm>=v0.6.4.post1` or the latest version is required to use tool-use function
367
+ ```bash
368
+ pip install vllm>=v0.6.4.post1
369
+ # if you don't want to activate tool-use function, just commenting out below vLLM option
370
+ VLLM_OPTION="--enable-auto-tool-choice --tool-call-parser hermes"
371
+ vllm serve skt/A.X-4.0-Light $VLLM_OPTION
372
+ ```
373
+
374
+ #### Example Usage
375
+
376
+ ```python
377
+ from openai import OpenAI
378
+
379
+ def call(messages, model):
380
+ completion = client.chat.completions.create(
381
+ model=model,
382
+ messages=messages,
383
+ )
384
+ print(completion.choices[0].message)
385
+
386
+ client = OpenAI(
387
+ base_url="http://localhost:8000/v1",
388
+ api_key="api_key"
389
+ )
390
+ model = "skt/A.X-4.0-Light"
391
+ messages = [{"role": "user", "content": "에어컨 여름철 적정 온도는? 한줄로 답변해줘"}]
392
+ call(messages, model)
393
+ # Output:
394
+ # ChatCompletionMessage(content='여름철 적정 에어컨 온도는 일반적으로 24-26도입니다.', refusal=None, role='assistant', audio=None, function_call=None, tool_calls=[], reasoning_content=None)
395
+
396
+ messages = [{"role": "user", "content": "What is the appropriate temperature for air conditioning in summer? Response in a single sentence."}]
397
+ call(messages, model)
398
+ # Output:
399
+ # ChatCompletionMessage(content='The appropriate temperature for air conditioning in summer generally ranges from 72°F to 78°F (22°C to 26°C) for comfort and energy efficiency.', refusal=None, role='assistant', audio=None, function_call=None, tool_calls=[], reasoning_content=None)
400
+ ```
401
+
402
+ #### Examples for tool-use
403
+ ```python
404
+ from openai import OpenAI
405
+
406
+
407
+ def call(messages, model):
408
+ completion = client.chat.completions.create(
409
+ model=model,
410
+ messages=messages,
411
+ tools=tools
412
+ )
413
+ print(completion.choices[0].message)
414
+
415
+
416
+ client = OpenAI(
417
+ base_url="http://ip:port/v1",
418
+ api_key="api_key"
419
+ )
420
+ model = "skt/A.X-4.0-Light"
421
+
422
+ calculate_discount = {
423
+ "type": "function",
424
+ "function": {
425
+ "name": "calculate_discount",
426
+ "description": "원가격과 할인율(퍼센트 단위)을 입력받아 할인된 가격을계산한다.",
427
+ "parameters": {
428
+ "type": "object",
429
+ "properties": {
430
+ "original_price": {
431
+ "type": "number",
432
+ "description": "상품의 원래 가격"
433
+ },
434
+ "discount_percentage": {
435
+ "type": "number",
436
+ "description": "적용할 할인율(예: 20% 할인의 경우 20을 입력)"
437
+ }
438
+ },
439
+ "required": ["original_price", "discount_percentage"]
440
+ }
441
+ }
442
+ }
443
+ get_exchange_rate = {
444
+ "type": "function",
445
+ "function": {
446
+ "name": "get_exchange_rate",
447
+ "description": "두 통화 간의 환율을 가져온다.",
448
+ "parameters": {
449
+ "type": "object",
450
+ "properties": {
451
+ "base_currency": {
452
+ "type": "string",
453
+ "description": "The currency to convert from."
454
+ },
455
+ "target_currency": {
456
+ "type": "string",
457
+ "description": "The currency to convert to."
458
+ }
459
+ },
460
+ "required": ["base_currency", "target_currency"]
461
+ }
462
+ }
463
+ }
464
+ tools = [calculate_discount, get_exchange_rate]
465
+
466
+ ### Slot filling ###
467
+ messages = [{"role": "user", "content": "우리가 뭘 사야되는데 원래 57600원인데 직원할인 받을 수 있거든? 할인가좀 계산해줘"}]
468
+ call(messages, model)
469
+ # Output:
470
+ # ChatCompletionMessage(content='할인율을 알려주시겠습니까?', refusal=None, role='assistant', audio=None, function_call=None, tool_calls=[], reasoning_content=None)
471
+
472
+
473
+ ### Function calling ###
474
+ messages = [
475
+ {"role": "user", "content": "우리가 뭘 사야되는데 원래 57600원인데 직원할인 받을 수 있거든? 할인가좀 계산해줘"},
476
+ {"role": "assistant", "content": "할인율을 알려주시겠습니까?"},
477
+ {"role": "user", "content": "15% 할인 받을 수 있어."},
478
+ ]
479
+ call(messages, model)
480
+ # Output:
481
+ # ChatCompletionMessage(content=None, refusal=None, role='assistant', audio=None, function_call=None, tool_calls=[ChatCompletionMessageToolCall(id='chatcmpl-tool-7778d1d9fca94bf2acbb44c79359502c', function=Function(arguments='{"original_price": 57600, "discount_percentage": 15}', name='calculate_discount'), type='function')], reasoning_content=None)
482
+
483
+
484
+ ### Completion ###
485
+ messages = [
486
+ {"role": "user", "content": "우리가 뭘 사야되는데 원래 57600원인데 직원할인 받을 수 있거든? 할인가좀 계산해줘"},
487
+ {"role": "assistant", "content": "할인율을 알려주시겠습니까?"},
488
+ {"role": "user", "content": "15% 할인 받을 수 있어."},
489
+ {"role": "tool", "tool_call_id": "random_id", "name": "calculate_discount", "content": "{\"original_price\": 57600, \"discount_percentage\": 15, \"discounted_price\": 48960.0}"}
490
+ ]
491
+ call(messages, model)
492
+ # Output:
493
+ # ChatCompletionMessage(content='57600원의 상품에서 15% 할인을 적용하면, 할인된 가격은 48960원입니다.', refusal=None, role='assistant', audio=None, function_call=None, tool_calls=[], reasoning_content=None)
494
+ ```
495
+
496
+ ## License
497
+
498
+ The `A.X 4.0 Light` models are licensed under `Apache License 2.0`.
499
+
500
+ ## Citation
501
+ ```
502
+ @article{SKTAdotX4Light,
503
+ title={A.X 4.0 Light},
504
+ author={SKT AI Model Lab},
505
+ year={2025},
506
+ url={https://huggingface.co/skt/A.X-4.0-Light}
507
+ }
508
+ ```
509
+
510
+ ## Contact
511
+
512
+ - Business & Partnership Contact: [[email protected]]([email protected])
assets/A.X_logo.png ADDED
assets/A.X_logo_ko_4x3.png ADDED

Git LFS Details

  • SHA256: 45eecde59e22d7be0f8a451c2cd7bb7f1526e0bb5b021ed202ce12c0838fc53d
  • Pointer size: 131 Bytes
  • Size of remote file: 183 kB
config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Qwen2ForCausalLM"
4
+ ],
5
+ "attention_dropout": 0.0,
6
+ "bos_token_id": 0,
7
+ "eos_token_id": 0,
8
+ "hidden_act": "silu",
9
+ "hidden_size": 3584,
10
+ "initializer_range": 0.02,
11
+ "intermediate_size": 18944,
12
+ "max_position_embeddings": 16384,
13
+ "max_window_layers": 28,
14
+ "model_type": "qwen2",
15
+ "num_attention_heads": 28,
16
+ "num_hidden_layers": 28,
17
+ "num_key_value_heads": 4,
18
+ "rms_norm_eps": 1e-05,
19
+ "rope_scaling": null,
20
+ "rope_theta": 1000000,
21
+ "sliding_window": null,
22
+ "tie_word_embeddings": false,
23
+ "torch_dtype": "bfloat16",
24
+ "transformers_version": "4.51.3",
25
+ "use_cache": false,
26
+ "use_sliding_window": false,
27
+ "vocab_size": 102400
28
+ }
generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 0,
3
+ "eos_token_id": 27,
4
+ "max_new_tokens": 16384,
5
+ "pad_token_id": 1,
6
+ "transformers_version": "4.51.3"
7
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model-00001-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2f80502d7d801be45497f1391d82f0ff2e4afb7f0c26cbe988fcd3455d768655
3
+ size 4987786168
model-00002-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1725192e45669b4b6ee825aaa79832a82b2f92e9dac18a68a7f69d0bcfa5feec
3
+ size 4932751024
model-00003-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cfdeb1f54032d8b6a34b235a4f9f2a67325b1a3dfc26f7d271964ae1b1950822
3
+ size 4598751568
model.safetensors.index.json ADDED
@@ -0,0 +1,346 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 14519249920
4
+ },
5
+ "weight_map": {
6
+ "lm_head.weight": "model-00003-of-00003.safetensors",
7
+ "model.embed_tokens.weight": "model-00001-of-00003.safetensors",
8
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00003.safetensors",
9
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
10
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
11
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
12
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
13
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
14
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
15
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
16
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
17
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
18
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
19
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
20
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00003.safetensors",
21
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
22
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
23
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
24
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
25
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
26
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
27
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
28
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
29
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
30
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
31
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
32
+ "model.layers.10.input_layernorm.weight": "model-00002-of-00003.safetensors",
33
+ "model.layers.10.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
34
+ "model.layers.10.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
35
+ "model.layers.10.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
36
+ "model.layers.10.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
37
+ "model.layers.10.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
38
+ "model.layers.10.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
39
+ "model.layers.10.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
40
+ "model.layers.10.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
41
+ "model.layers.10.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
42
+ "model.layers.10.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
43
+ "model.layers.10.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
44
+ "model.layers.11.input_layernorm.weight": "model-00002-of-00003.safetensors",
45
+ "model.layers.11.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
46
+ "model.layers.11.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
47
+ "model.layers.11.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
48
+ "model.layers.11.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
49
+ "model.layers.11.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
50
+ "model.layers.11.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
51
+ "model.layers.11.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
52
+ "model.layers.11.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
53
+ "model.layers.11.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
54
+ "model.layers.11.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
55
+ "model.layers.11.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
56
+ "model.layers.12.input_layernorm.weight": "model-00002-of-00003.safetensors",
57
+ "model.layers.12.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
58
+ "model.layers.12.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
59
+ "model.layers.12.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
60
+ "model.layers.12.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
61
+ "model.layers.12.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
62
+ "model.layers.12.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
63
+ "model.layers.12.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
64
+ "model.layers.12.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
65
+ "model.layers.12.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
66
+ "model.layers.12.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
67
+ "model.layers.12.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
68
+ "model.layers.13.input_layernorm.weight": "model-00002-of-00003.safetensors",
69
+ "model.layers.13.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
70
+ "model.layers.13.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
71
+ "model.layers.13.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
72
+ "model.layers.13.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
73
+ "model.layers.13.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
74
+ "model.layers.13.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
75
+ "model.layers.13.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
76
+ "model.layers.13.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
77
+ "model.layers.13.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
78
+ "model.layers.13.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
79
+ "model.layers.13.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
80
+ "model.layers.14.input_layernorm.weight": "model-00002-of-00003.safetensors",
81
+ "model.layers.14.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
82
+ "model.layers.14.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
83
+ "model.layers.14.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
84
+ "model.layers.14.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
85
+ "model.layers.14.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
86
+ "model.layers.14.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
87
+ "model.layers.14.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
88
+ "model.layers.14.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
89
+ "model.layers.14.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
90
+ "model.layers.14.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
91
+ "model.layers.14.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
92
+ "model.layers.15.input_layernorm.weight": "model-00002-of-00003.safetensors",
93
+ "model.layers.15.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
94
+ "model.layers.15.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
95
+ "model.layers.15.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
96
+ "model.layers.15.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
97
+ "model.layers.15.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
98
+ "model.layers.15.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
99
+ "model.layers.15.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
100
+ "model.layers.15.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
101
+ "model.layers.15.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
102
+ "model.layers.15.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
103
+ "model.layers.15.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
104
+ "model.layers.16.input_layernorm.weight": "model-00002-of-00003.safetensors",
105
+ "model.layers.16.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
106
+ "model.layers.16.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
107
+ "model.layers.16.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
108
+ "model.layers.16.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
109
+ "model.layers.16.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
110
+ "model.layers.16.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
111
+ "model.layers.16.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
112
+ "model.layers.16.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
113
+ "model.layers.16.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
114
+ "model.layers.16.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
115
+ "model.layers.16.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
116
+ "model.layers.17.input_layernorm.weight": "model-00002-of-00003.safetensors",
117
+ "model.layers.17.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
118
+ "model.layers.17.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
119
+ "model.layers.17.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
120
+ "model.layers.17.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
121
+ "model.layers.17.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
122
+ "model.layers.17.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
123
+ "model.layers.17.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
124
+ "model.layers.17.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
125
+ "model.layers.17.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
126
+ "model.layers.17.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
127
+ "model.layers.17.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
128
+ "model.layers.18.input_layernorm.weight": "model-00002-of-00003.safetensors",
129
+ "model.layers.18.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
130
+ "model.layers.18.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
131
+ "model.layers.18.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
132
+ "model.layers.18.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
133
+ "model.layers.18.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
134
+ "model.layers.18.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
135
+ "model.layers.18.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
136
+ "model.layers.18.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
137
+ "model.layers.18.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
138
+ "model.layers.18.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
139
+ "model.layers.18.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
140
+ "model.layers.19.input_layernorm.weight": "model-00003-of-00003.safetensors",
141
+ "model.layers.19.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
142
+ "model.layers.19.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
143
+ "model.layers.19.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
144
+ "model.layers.19.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
145
+ "model.layers.19.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
146
+ "model.layers.19.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
147
+ "model.layers.19.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
148
+ "model.layers.19.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
149
+ "model.layers.19.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
150
+ "model.layers.19.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
151
+ "model.layers.19.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
152
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00003.safetensors",
153
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
154
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
155
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
156
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
157
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
158
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
159
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
160
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
161
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
162
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
163
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
164
+ "model.layers.20.input_layernorm.weight": "model-00003-of-00003.safetensors",
165
+ "model.layers.20.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
166
+ "model.layers.20.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
167
+ "model.layers.20.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
168
+ "model.layers.20.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
169
+ "model.layers.20.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
170
+ "model.layers.20.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
171
+ "model.layers.20.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
172
+ "model.layers.20.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
173
+ "model.layers.20.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
174
+ "model.layers.20.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
175
+ "model.layers.20.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
176
+ "model.layers.21.input_layernorm.weight": "model-00003-of-00003.safetensors",
177
+ "model.layers.21.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
178
+ "model.layers.21.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
179
+ "model.layers.21.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
180
+ "model.layers.21.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
181
+ "model.layers.21.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
182
+ "model.layers.21.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
183
+ "model.layers.21.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
184
+ "model.layers.21.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
185
+ "model.layers.21.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
186
+ "model.layers.21.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
187
+ "model.layers.21.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
188
+ "model.layers.22.input_layernorm.weight": "model-00003-of-00003.safetensors",
189
+ "model.layers.22.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
190
+ "model.layers.22.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
191
+ "model.layers.22.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
192
+ "model.layers.22.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
193
+ "model.layers.22.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
194
+ "model.layers.22.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
195
+ "model.layers.22.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
196
+ "model.layers.22.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
197
+ "model.layers.22.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
198
+ "model.layers.22.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
199
+ "model.layers.22.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
200
+ "model.layers.23.input_layernorm.weight": "model-00003-of-00003.safetensors",
201
+ "model.layers.23.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
202
+ "model.layers.23.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
203
+ "model.layers.23.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
204
+ "model.layers.23.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
205
+ "model.layers.23.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
206
+ "model.layers.23.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
207
+ "model.layers.23.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
208
+ "model.layers.23.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
209
+ "model.layers.23.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
210
+ "model.layers.23.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
211
+ "model.layers.23.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
212
+ "model.layers.24.input_layernorm.weight": "model-00003-of-00003.safetensors",
213
+ "model.layers.24.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
214
+ "model.layers.24.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
215
+ "model.layers.24.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
216
+ "model.layers.24.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
217
+ "model.layers.24.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
218
+ "model.layers.24.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
219
+ "model.layers.24.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
220
+ "model.layers.24.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
221
+ "model.layers.24.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
222
+ "model.layers.24.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
223
+ "model.layers.24.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
224
+ "model.layers.25.input_layernorm.weight": "model-00003-of-00003.safetensors",
225
+ "model.layers.25.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
226
+ "model.layers.25.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
227
+ "model.layers.25.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
228
+ "model.layers.25.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
229
+ "model.layers.25.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
230
+ "model.layers.25.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
231
+ "model.layers.25.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
232
+ "model.layers.25.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
233
+ "model.layers.25.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
234
+ "model.layers.25.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
235
+ "model.layers.25.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
236
+ "model.layers.26.input_layernorm.weight": "model-00003-of-00003.safetensors",
237
+ "model.layers.26.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
238
+ "model.layers.26.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
239
+ "model.layers.26.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
240
+ "model.layers.26.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
241
+ "model.layers.26.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
242
+ "model.layers.26.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
243
+ "model.layers.26.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
244
+ "model.layers.26.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
245
+ "model.layers.26.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
246
+ "model.layers.26.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
247
+ "model.layers.26.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
248
+ "model.layers.27.input_layernorm.weight": "model-00003-of-00003.safetensors",
249
+ "model.layers.27.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
250
+ "model.layers.27.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
251
+ "model.layers.27.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
252
+ "model.layers.27.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
253
+ "model.layers.27.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
254
+ "model.layers.27.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
255
+ "model.layers.27.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
256
+ "model.layers.27.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
257
+ "model.layers.27.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
258
+ "model.layers.27.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
259
+ "model.layers.27.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
260
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00003.safetensors",
261
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
262
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
263
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
264
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
265
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
266
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
267
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
268
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
269
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
270
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
271
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
272
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00003.safetensors",
273
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
274
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
275
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
276
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
277
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
278
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
279
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
280
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
281
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
282
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
283
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
284
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00003.safetensors",
285
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
286
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
287
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
288
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
289
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
290
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
291
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
292
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
293
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
294
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
295
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
296
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00003.safetensors",
297
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
298
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
299
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
300
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
301
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
302
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
303
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
304
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
305
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
306
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
307
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
308
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00003.safetensors",
309
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
310
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
311
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
312
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
313
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
314
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
315
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
316
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
317
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
318
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
319
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
320
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00003.safetensors",
321
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
322
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
323
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
324
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
325
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
326
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
327
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
328
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
329
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
330
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
331
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
332
+ "model.layers.9.input_layernorm.weight": "model-00002-of-00003.safetensors",
333
+ "model.layers.9.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
334
+ "model.layers.9.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
335
+ "model.layers.9.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
336
+ "model.layers.9.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
337
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
338
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
339
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
340
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
341
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
342
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
343
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
344
+ "model.norm.weight": "model-00003-of-00003.safetensors"
345
+ }
346
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|endoftext|>",
4
+ "<|pad|>",
5
+ "<|unk|>",
6
+ "<|sep|>",
7
+ "<|mask|>",
8
+ "<|cls|>",
9
+ "<|image|>",
10
+ "<|audio|>",
11
+ "<|user|>",
12
+ "<|system|>",
13
+ "<|assistant|>",
14
+ "<|extra_id_0|>",
15
+ "<|extra_id_1|>",
16
+ "<|extra_id_2|>",
17
+ "<|extra_id_3|>",
18
+ "<|extra_id_4|>",
19
+ "<|extra_id_5|>",
20
+ "<|extra_id_6|>",
21
+ "<|extra_id_7|>",
22
+ "<|extra_id_8|>",
23
+ "<|extra_id_9|>",
24
+ "<|extra_id_10|>",
25
+ "<|extra_id_13|>",
26
+ "<|im_start|>",
27
+ "<|im_sep|>",
28
+ "<|im_end|>",
29
+ "<|resident_reg|>",
30
+ "<|foreigner_reg|>",
31
+ "<|business_reg|>",
32
+ "<|credit_card|>",
33
+ "<|passport|>",
34
+ "<|driver_license|>",
35
+ "<|telephone|>",
36
+ "<|health_insurance|>",
37
+ "<|bank_account|>"
38
+ ],
39
+ "bos_token": {
40
+ "content": "<|endoftext|>",
41
+ "lstrip": false,
42
+ "normalized": false,
43
+ "rstrip": false,
44
+ "single_word": false
45
+ },
46
+ "cls_token": {
47
+ "content": "<|cls|>",
48
+ "lstrip": false,
49
+ "normalized": false,
50
+ "rstrip": false,
51
+ "single_word": false
52
+ },
53
+ "eos_token": {
54
+ "content": "<|im_end|>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false
59
+ },
60
+ "mask_token": {
61
+ "content": "<|mask|>",
62
+ "lstrip": false,
63
+ "normalized": false,
64
+ "rstrip": false,
65
+ "single_word": false
66
+ },
67
+ "pad_token": {
68
+ "content": "<|pad|>",
69
+ "lstrip": false,
70
+ "normalized": false,
71
+ "rstrip": false,
72
+ "single_word": false
73
+ },
74
+ "sep_token": {
75
+ "content": "<|sep|>",
76
+ "lstrip": false,
77
+ "normalized": false,
78
+ "rstrip": false,
79
+ "single_word": false
80
+ },
81
+ "unk_token": {
82
+ "content": "<|unk|>",
83
+ "lstrip": false,
84
+ "normalized": false,
85
+ "rstrip": false,
86
+ "single_word": false
87
+ }
88
+ }
tokenizer_config.json ADDED
@@ -0,0 +1,389 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "0": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "1": {
14
+ "content": "<|pad|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "2": {
22
+ "content": "<|unk|>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "3": {
30
+ "content": "<|sep|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "4": {
38
+ "content": "<|mask|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "5": {
46
+ "content": "<|cls|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "6": {
54
+ "content": "<|image|>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "7": {
62
+ "content": "<|audio|>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "8": {
70
+ "content": "<|user|>",
71
+ "lstrip": false,
72
+ "normalized": false,
73
+ "rstrip": false,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "9": {
78
+ "content": "<|system|>",
79
+ "lstrip": false,
80
+ "normalized": false,
81
+ "rstrip": false,
82
+ "single_word": false,
83
+ "special": true
84
+ },
85
+ "10": {
86
+ "content": "<|assistant|>",
87
+ "lstrip": false,
88
+ "normalized": false,
89
+ "rstrip": false,
90
+ "single_word": false,
91
+ "special": true
92
+ },
93
+ "11": {
94
+ "content": "<|extra_id_0|>",
95
+ "lstrip": false,
96
+ "normalized": false,
97
+ "rstrip": false,
98
+ "single_word": false,
99
+ "special": true
100
+ },
101
+ "12": {
102
+ "content": "<|extra_id_1|>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false,
107
+ "special": true
108
+ },
109
+ "13": {
110
+ "content": "<|extra_id_2|>",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": false,
114
+ "single_word": false,
115
+ "special": true
116
+ },
117
+ "14": {
118
+ "content": "<|extra_id_3|>",
119
+ "lstrip": false,
120
+ "normalized": false,
121
+ "rstrip": false,
122
+ "single_word": false,
123
+ "special": true
124
+ },
125
+ "15": {
126
+ "content": "<|extra_id_4|>",
127
+ "lstrip": false,
128
+ "normalized": false,
129
+ "rstrip": false,
130
+ "single_word": false,
131
+ "special": true
132
+ },
133
+ "16": {
134
+ "content": "<|extra_id_5|>",
135
+ "lstrip": false,
136
+ "normalized": false,
137
+ "rstrip": false,
138
+ "single_word": false,
139
+ "special": true
140
+ },
141
+ "17": {
142
+ "content": "<|extra_id_6|>",
143
+ "lstrip": false,
144
+ "normalized": false,
145
+ "rstrip": false,
146
+ "single_word": false,
147
+ "special": true
148
+ },
149
+ "18": {
150
+ "content": "<|extra_id_7|>",
151
+ "lstrip": false,
152
+ "normalized": false,
153
+ "rstrip": false,
154
+ "single_word": false,
155
+ "special": true
156
+ },
157
+ "19": {
158
+ "content": "<|extra_id_8|>",
159
+ "lstrip": false,
160
+ "normalized": false,
161
+ "rstrip": false,
162
+ "single_word": false,
163
+ "special": true
164
+ },
165
+ "20": {
166
+ "content": "<|extra_id_9|>",
167
+ "lstrip": false,
168
+ "normalized": false,
169
+ "rstrip": false,
170
+ "single_word": false,
171
+ "special": true
172
+ },
173
+ "21": {
174
+ "content": "<|extra_id_10|>",
175
+ "lstrip": false,
176
+ "normalized": false,
177
+ "rstrip": false,
178
+ "single_word": false,
179
+ "special": true
180
+ },
181
+ "22": {
182
+ "content": "</think>",
183
+ "lstrip": false,
184
+ "normalized": false,
185
+ "rstrip": false,
186
+ "single_word": false,
187
+ "special": false
188
+ },
189
+ "23": {
190
+ "content": "<think>",
191
+ "lstrip": false,
192
+ "normalized": false,
193
+ "rstrip": false,
194
+ "single_word": false,
195
+ "special": false
196
+ },
197
+ "24": {
198
+ "content": "<|extra_id_13|>",
199
+ "lstrip": false,
200
+ "normalized": false,
201
+ "rstrip": false,
202
+ "single_word": false,
203
+ "special": true
204
+ },
205
+ "25": {
206
+ "content": "<|im_start|>",
207
+ "lstrip": false,
208
+ "normalized": false,
209
+ "rstrip": false,
210
+ "single_word": false,
211
+ "special": true
212
+ },
213
+ "26": {
214
+ "content": "<|im_sep|>",
215
+ "lstrip": false,
216
+ "normalized": false,
217
+ "rstrip": false,
218
+ "single_word": false,
219
+ "special": true
220
+ },
221
+ "27": {
222
+ "content": "<|im_end|>",
223
+ "lstrip": false,
224
+ "normalized": false,
225
+ "rstrip": false,
226
+ "single_word": false,
227
+ "special": true
228
+ },
229
+ "28": {
230
+ "content": "<|resident_reg|>",
231
+ "lstrip": false,
232
+ "normalized": false,
233
+ "rstrip": false,
234
+ "single_word": false,
235
+ "special": true
236
+ },
237
+ "29": {
238
+ "content": "<|foreigner_reg|>",
239
+ "lstrip": false,
240
+ "normalized": false,
241
+ "rstrip": false,
242
+ "single_word": false,
243
+ "special": true
244
+ },
245
+ "30": {
246
+ "content": "<|business_reg|>",
247
+ "lstrip": false,
248
+ "normalized": false,
249
+ "rstrip": false,
250
+ "single_word": false,
251
+ "special": true
252
+ },
253
+ "31": {
254
+ "content": "<|credit_card|>",
255
+ "lstrip": false,
256
+ "normalized": false,
257
+ "rstrip": false,
258
+ "single_word": false,
259
+ "special": true
260
+ },
261
+ "32": {
262
+ "content": "<|passport|>",
263
+ "lstrip": false,
264
+ "normalized": false,
265
+ "rstrip": false,
266
+ "single_word": false,
267
+ "special": true
268
+ },
269
+ "33": {
270
+ "content": "<|driver_license|>",
271
+ "lstrip": false,
272
+ "normalized": false,
273
+ "rstrip": false,
274
+ "single_word": false,
275
+ "special": true
276
+ },
277
+ "34": {
278
+ "content": "<|telephone|>",
279
+ "lstrip": false,
280
+ "normalized": false,
281
+ "rstrip": false,
282
+ "single_word": false,
283
+ "special": true
284
+ },
285
+ "35": {
286
+ "content": "<|health_insurance|>",
287
+ "lstrip": false,
288
+ "normalized": false,
289
+ "rstrip": false,
290
+ "single_word": false,
291
+ "special": true
292
+ },
293
+ "36": {
294
+ "content": "<|bank_account|>",
295
+ "lstrip": false,
296
+ "normalized": false,
297
+ "rstrip": false,
298
+ "single_word": false,
299
+ "special": true
300
+ },
301
+ "37": {
302
+ "content": "</tool_output>",
303
+ "lstrip": false,
304
+ "normalized": false,
305
+ "rstrip": false,
306
+ "single_word": false,
307
+ "special": false
308
+ },
309
+ "38": {
310
+ "content": "<tool_output>",
311
+ "lstrip": false,
312
+ "normalized": false,
313
+ "rstrip": false,
314
+ "single_word": false,
315
+ "special": false
316
+ },
317
+ "39": {
318
+ "content": "</tool_call>",
319
+ "lstrip": false,
320
+ "normalized": false,
321
+ "rstrip": false,
322
+ "single_word": false,
323
+ "special": false
324
+ },
325
+ "40": {
326
+ "content": "<tool_call>",
327
+ "lstrip": false,
328
+ "normalized": false,
329
+ "rstrip": false,
330
+ "single_word": false,
331
+ "special": false
332
+ }
333
+ },
334
+ "additional_special_tokens": [
335
+ "<|endoftext|>",
336
+ "<|pad|>",
337
+ "<|unk|>",
338
+ "<|sep|>",
339
+ "<|mask|>",
340
+ "<|cls|>",
341
+ "<|image|>",
342
+ "<|audio|>",
343
+ "<|user|>",
344
+ "<|system|>",
345
+ "<|assistant|>",
346
+ "<|extra_id_0|>",
347
+ "<|extra_id_1|>",
348
+ "<|extra_id_2|>",
349
+ "<|extra_id_3|>",
350
+ "<|extra_id_4|>",
351
+ "<|extra_id_5|>",
352
+ "<|extra_id_6|>",
353
+ "<|extra_id_7|>",
354
+ "<|extra_id_8|>",
355
+ "<|extra_id_9|>",
356
+ "<|extra_id_10|>",
357
+ "<|extra_id_13|>",
358
+ "<|im_start|>",
359
+ "<|im_sep|>",
360
+ "<|im_end|>",
361
+ "<|resident_reg|>",
362
+ "<|foreigner_reg|>",
363
+ "<|business_reg|>",
364
+ "<|credit_card|>",
365
+ "<|passport|>",
366
+ "<|driver_license|>",
367
+ "<|telephone|>",
368
+ "<|health_insurance|>",
369
+ "<|bank_account|>"
370
+ ],
371
+ "bos_token": "<|endoftext|>",
372
+ "chat_template": "{%- if tools is iterable and tools | length > 0 %}\n {{- '<|im_start|><|system|>'}}\n {{- '당신은 도구 호출 기능을 갖춘 유용한 도우미입니다. 사용자의 요청을 처리하기 위해서 필요한 도구가 주어진 목록에 있는 경우 도구 호출로 응답하세요.\n필요한 도구가 목록에 없는 경우에는 도구 호출 없이 사용자가 요구한 정보를 제공하세요.\n필요한 도구가 목록에 있지만 해당 도구를 호출하는데 필요한 argument 정보가 부족한 경우 해당 정보를 사용자에게 요청하세요.\n사용자의 요청을 처리하기 위해 여러번 도구를 호출할 수 있어야 합니다.\n도구 호출 이후 도구 실행 결과를 입력으로 받으면 해당 결과를 활용하여 답변을 생성하세요.\n\n다음은 접근할 수 있는 도구들의 목록 입니다:\n<tools>\n'}}\n {%- for t in tools %}\n {{- t | tojson }}\n {{- '\n' }}\n {%- endfor %}\n {{- '</tools>' }}\n {{- '\n\n도구를 호출하려면 아래의 JSON으로 응답하세요.\n도구 호출 형식: <tool_call>{\"name\": 도구 이름, \"arguments\": dictionary 형태의 도구 인자값}</tool_call>' }}\n {{- '<|im_end|>' }}\n {%- endif %}\n \n {%- for message in messages %}\n {%- if message.role == 'system' %}\n {{- '<|im_start|><|system|>' + message.content + '<|im_end|>'}}\n {%- elif message.role == 'user' %}\n {{- '<|im_start|><|user|>' + message.content + '<|im_end|>'}}\n {%- elif message.role == 'assistant' %}\n {{- '<|im_start|><|assistant|>'}}\n {%- set content = '' %}\n {%- if message.content is defined %}\n {%- set content = message.content %}\n {%- endif %}\n \n {%- if add_generation_prompt and not (message.reasoning_content is defined and message.reasoning_content is not none) %}\n {%- if '</think>' in message.content %}\n {%- set content = message.content.split('</think>'.strip())[-1].lstrip('\\n') %}\n {%- endif %}\n {%- endif %}\n \n {{- content}}\n {%- if message.tool_calls is defined %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '<tool_call>' }}\n {{- '{' }}\n {{- '\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\"' }}\n {%- if tool_call.arguments is defined %}\n {{- ', ' }}\n {{- '\"arguments\": ' }}\n {{- tool_call.arguments|tojson }}\n {%- endif %}\n {{- '}' }}\n {{- '</tool_call>' }}\n {%- endfor %}\n {%- endif %}\n {{- '<|im_end|>'}}\n \n {%- elif message.role == 'tool' %}\n {{- '<|im_start|><|extra_id_13|><tool_output>' + message.content + '</tool_output><|im_end|>'}}\n {%- endif %}\n {%- endfor %}\n \n {%- if add_generation_prompt %}\n {{- '<|im_start|><|assistant|>' }}\n {%- endif %}",
373
+ "clean_up_tokenization_spaces": true,
374
+ "cls_token": "<|cls|>",
375
+ "eod_token": "<|endoftext|>",
376
+ "eos_token": "<|im_end|>",
377
+ "errors": "replace",
378
+ "extra_special_tokens": {},
379
+ "mask_token": "<|mask|>",
380
+ "max_length": 7680,
381
+ "model_max_length": 16384,
382
+ "pad_token": "<|pad|>",
383
+ "padding_side": "right",
384
+ "sep_token": "<|sep|>",
385
+ "tokenizer_class": "GPT2Tokenizer",
386
+ "truncation_side": "left",
387
+ "unk_token": "<|unk|>",
388
+ "vocab_size": 102400
389
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff