File size: 1,473 Bytes
d3ba21b
 
 
 
 
 
 
a21f3cd
 
d3ba21b
 
ed8b0c6
92c981d
 
 
d3ba21b
 
a21f3cd
d3ba21b
ed8b0c6
a21f3cd
 
e584a13
d3ba21b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42


import os
from transformers import AutoTokenizer


# MODEL_PATH = "meta-llama/Llama-3.1-405B-Instruct"
# MODEL_PATH = "NousResearch/Hermes-3-Llama-3.1-405B"  # messages里不支持tool_calls 
# MODEL_PATH = "../../test/Llama-4-Maverick-17B-128E-Instruct"
# MODEL_PATH = "meta-llama/Llama-4-Maverick-17B-128E-Instruct"
# MODEL_PATH = "Qwen/Qwen3-235B-A22B-Instruct-2507"
# MODEL_PATH = "Qwen/Qwen3-235B-A22B-Thinking-2507"
# MODEL_PATH = "Qwen/Qwen3-235B-A22B"
# MODEL_PATH = "Qwen/Qwen2.5-72B-Instruct"
MODEL_PATH = "Qwen/QwQ-32B"
# MODEL_PATH = "mistralai/Mistral-7B-Instruct-v0.1"  # messages里不支持tool_calls,不支持 role=tool,不支持 tools
# MODEL_PATH = "mistralai/Ministral-8B-Instruct-2410" # 支持 tools, 支持tool_calls(必须要有id), 格式非主流
# MODEL_PATH = "deepseek-ai/DeepSeek-R1"
# MODEL_PATH = "deepseek-ai/DeepSeek-R1-0528"
# MODEL_PATH = 'deepseek-ai/DeepSeek-V3.1'
# MODEL_PATH = "google/gemma-3-27b-it" 


tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
chat_template = tokenizer.chat_template


output_dir = MODEL_PATH.split("/")[-1]

os.makedirs(output_dir, exist_ok=True)
if isinstance(chat_template, dict):
    for k, v in chat_template.items():
        with open(f"{output_dir}/chat_template.{k}.jinja", "w") as f_out:
            f_out.write(v)
else:
    # chat_template = chat_template.replace("\\n", "\n")
    with open(f"{output_dir}/chat_template.jinja", "w") as f_out:
        f_out.write(chat_template)