File size: 4,374 Bytes
f97eef1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
import glob
import re
import shutil
import sys

import accelerate
import torch
from configuration_qwen3_shared_moe import Qwen3SharedMoeConfig
from modeling_qwen3_shared_moe import Qwen3SharedMoeForCausalLM
from safetensors import safe_open
from transformers.models.qwen3_moe.configuration_qwen3_moe import Qwen3MoeConfig

input_model = sys.argv[1]
output_model_path = sys.argv[2]

cfg_standard_moe = Qwen3MoeConfig.from_pretrained(input_model)
cfg_shared_moe = Qwen3SharedMoeConfig(
    vocab_size=cfg_standard_moe.vocab_size,
    hidden_size=cfg_standard_moe.hidden_size,
    intermediate_size=cfg_standard_moe.intermediate_size,
    num_hidden_layers=cfg_standard_moe.num_hidden_layers,
    num_attention_heads=cfg_standard_moe.num_attention_heads,
    num_key_value_heads=cfg_standard_moe.num_key_value_heads,
    hidden_act=cfg_standard_moe.hidden_act,
    max_position_embeddings=cfg_standard_moe.max_position_embeddings,
    initializer_range=cfg_standard_moe.initializer_range,
    rms_norm_eps=cfg_standard_moe.rms_norm_eps,
    use_cache=cfg_standard_moe.use_cache,
    tie_word_embeddings=cfg_standard_moe.tie_word_embeddings,
    rope_theta=cfg_standard_moe.rope_theta,
    rope_scaling=cfg_standard_moe.rope_scaling,
    attention_bias=cfg_standard_moe.attention_bias,
    use_sliding_window=cfg_standard_moe.use_sliding_window,
    sliding_window=cfg_standard_moe.sliding_window,
    max_window_layers=cfg_standard_moe.max_window_layers,
    attention_dropout=cfg_standard_moe.attention_dropout,
    decoder_sparse_step=cfg_standard_moe.decoder_sparse_step,
    moe_intermediate_size=cfg_standard_moe.moe_intermediate_size,
    num_experts_per_tok=cfg_standard_moe.num_experts_per_tok,
    num_experts=cfg_standard_moe.num_experts,
    norm_topk_prob=cfg_standard_moe.norm_topk_prob,
    output_router_logits=cfg_standard_moe.output_router_logits,
    router_aux_loss_coef=cfg_standard_moe.router_aux_loss_coef,
    shared_expert_intermediate_size=None,
    mlp_only_layers=cfg_standard_moe.mlp_only_layers,
    head_dim=cfg_standard_moe.head_dim,
)

num_experts = cfg_standard_moe.num_experts

with accelerate.init_empty_weights():
    model_shared_moe = Qwen3SharedMoeForCausalLM(cfg_shared_moe)

model_shared_moe = model_shared_moe.to(torch.bfloat16)
new_state_dict = {}
pattern = f"{input_model}/model-*-of-*.safetensors"
files = sorted(glob.glob(pattern))

if len(files) == 0:
    raise FileNotFoundError
tensors = {}

for file_path in files:
    print(f"processing {file_path}")
    with safe_open(file_path, framework="pt", device="cpu") as f:
        for key in f.keys():
            tensor = f.get_tensor(key)
            tensors[key] = tensor

for key in tensors:
    if "experts" not in key:
        new_state_dict[key] = tensors[key]
    elif "experts.0" in key:
        layer_num = int(re.search(r"\d+", key).group())
        new_state_dict[
            f"model.layers.{layer_num}.mlp.moe_mlp.output_experts.weight"
        ] = torch.stack(
            [
                tensors[f"model.layers.{layer_num}.mlp.experts.{i}.down_proj.weight"]
                for i in range(num_experts)
            ]
        )
        new_state_dict[f"model.layers.{layer_num}.mlp.moe_mlp.experts.weight"] = (
            torch.stack(
                [
                    torch.cat(
                        [
                            tensors[
                                f"model.layers.{layer_num}.mlp.experts.{i}.up_proj.weight"
                            ],
                            tensors[
                                f"model.layers.{layer_num}.mlp.experts.{i}.gate_proj.weight"
                            ],
                        ],
                        dim=0,
                    )
                    for i in range(num_experts)
                ]
            )
        )
model_shared_moe.load_state_dict(new_state_dict, strict=True, assign=True)
model_shared_moe.save_pretrained(output_model_path)
cfg_shared_moe.save_pretrained(output_model_path)


shutil.copy(
    "modeling_qwen3_shared_moe.py",
    output_model_path + "/" + "modeling_qwen3_shared_moe.py",
)
shutil.copy(
    "configuration_qwen3_shared_moe.py",
    output_model_path + "/" + "configuration_qwen3_shared_moe.py",
)
for i in ["merges.txt", "tokenizer_config.json", "tokenizer.json", "vocab.json"]:
    shutil.copy(input_model + "/" + i, output_model_path + "/" + i)