jingyaogong commited on
Commit
44c632c
·
verified ·
1 Parent(s): 84483a6

Upload 7 files

Browse files
config.json CHANGED
@@ -1,32 +1,35 @@
1
- {
2
- "architectures": [
3
- "MiniMindLM"
4
- ],
5
- "auto_map": {
6
- "AutoConfig": "LMConfig.LMConfig",
7
- "AutoModelForCausalLM": "model.MiniMindLM"
8
- },
9
- "aux_loss_alpha": 0.1,
10
- "dim": 640,
11
- "dropout": 0.0,
12
- "flash_attn": true,
13
- "hidden_dim": 1728,
14
- "max_seq_len": 8192,
15
- "model_type": "minimind",
16
- "multiple_of": 64,
17
- "n_heads": 8,
18
- "n_kv_heads": 2,
19
- "n_layers": 8,
20
- "n_routed_experts": 4,
21
- "n_shared_experts": true,
22
- "norm_eps": 1e-05,
23
- "norm_topk_prob": true,
24
- "num_experts_per_tok": 2,
25
- "rope_theta": 1000000.0,
26
- "scoring_func": "softmax",
27
- "seq_aux": true,
28
- "torch_dtype": "float32",
29
- "transformers_version": "4.44.0",
30
- "use_moe": true,
31
- "vocab_size": 6400
32
- }
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "MiniMindForCausalLM"
4
+ ],
5
+ "auto_map": {
6
+ "AutoConfig": "model_minimind.MiniMindConfig",
7
+ "AutoModelForCausalLM": "model_minimind.MiniMindForCausalLM"
8
+ },
9
+ "aux_loss_alpha": 0.1,
10
+ "bos_token_id": 1,
11
+ "dropout": 0.0,
12
+ "eos_token_id": 2,
13
+ "flash_attn": true,
14
+ "hidden_act": "silu",
15
+ "hidden_size": 640,
16
+ "intermediate_size": 1728,
17
+ "max_position_embeddings": 32768,
18
+ "max_seq_len": 8192,
19
+ "model_type": "minimind",
20
+ "n_routed_experts": 4,
21
+ "n_shared_experts": 1,
22
+ "norm_topk_prob": true,
23
+ "num_attention_heads": 8,
24
+ "num_experts_per_tok": 2,
25
+ "num_hidden_layers": 8,
26
+ "num_key_value_heads": 2,
27
+ "rms_norm_eps": 1e-05,
28
+ "rope_theta": 1000000.0,
29
+ "scoring_func": "softmax",
30
+ "seq_aux": true,
31
+ "torch_dtype": "bfloat16",
32
+ "transformers_version": "4.51.3",
33
+ "use_moe": true,
34
+ "vocab_size": 6400
35
+ }
generation_config.json CHANGED
@@ -1,4 +1,6 @@
1
- {
2
- "_from_model_config": true,
3
- "transformers_version": "4.44.0"
4
- }
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "transformers_version": "4.51.3"
6
+ }
model_minimind.py ADDED
@@ -0,0 +1,446 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘
2
+ # MiniMind Config
3
+ # 📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘
4
+
5
+ from transformers import PretrainedConfig
6
+
7
+
8
+ class MiniMindConfig(PretrainedConfig):
9
+ model_type = "minimind"
10
+
11
+ def __init__(
12
+ self,
13
+ dropout: float = 0.0,
14
+ bos_token_id: int = 1,
15
+ eos_token_id: int = 2,
16
+ hidden_act: str = 'silu',
17
+ hidden_size: int = 512,
18
+ intermediate_size: int = None,
19
+ max_position_embeddings: int = 32768,
20
+ num_attention_heads: int = 8,
21
+ num_hidden_layers: int = 8,
22
+ num_key_value_heads: int = 2,
23
+ vocab_size: int = 6400,
24
+ rms_norm_eps: float = 1e-05,
25
+ rope_theta: int = 1000000.0,
26
+ flash_attn: bool = True,
27
+ ####################################################
28
+ # Here are the specific configurations of MOE
29
+ # When use_moe is false, the following is invalid
30
+ ####################################################
31
+ use_moe: bool = False,
32
+ num_experts_per_tok: int = 2,
33
+ n_routed_experts: int = 4,
34
+ n_shared_experts: int = 1,
35
+ scoring_func: str = 'softmax',
36
+ aux_loss_alpha: float = 0.1,
37
+ seq_aux: bool = True,
38
+ norm_topk_prob: bool = True,
39
+ **kwargs
40
+ ):
41
+ super().__init__(**kwargs)
42
+ self.dropout = dropout
43
+ self.bos_token_id = bos_token_id
44
+ self.eos_token_id = eos_token_id
45
+ self.hidden_act = hidden_act
46
+ self.hidden_size = hidden_size
47
+ self.intermediate_size = intermediate_size
48
+ self.max_position_embeddings = max_position_embeddings
49
+ self.num_attention_heads = num_attention_heads
50
+ self.num_hidden_layers = num_hidden_layers
51
+ self.num_key_value_heads = num_key_value_heads
52
+ self.vocab_size = vocab_size
53
+ self.rms_norm_eps = rms_norm_eps
54
+ self.rope_theta = rope_theta
55
+ self.flash_attn = flash_attn
56
+ ####################################################
57
+ # Here are the specific configurations of MOE
58
+ # When use_moe is false, the following is invalid
59
+ ####################################################
60
+ self.use_moe = use_moe
61
+ self.num_experts_per_tok = num_experts_per_tok # 每个token选择的专家数量
62
+ self.n_routed_experts = n_routed_experts # 总的专家数量
63
+ self.n_shared_experts = n_shared_experts # 共享专家
64
+ self.scoring_func = scoring_func # 评分函数,默认为'softmax'
65
+ self.aux_loss_alpha = aux_loss_alpha # 辅助损失的alpha参数
66
+ self.seq_aux = seq_aux # 是否在序列级别上计算辅助损失
67
+ self.norm_topk_prob = norm_topk_prob # 是否标准化top-k概率
68
+
69
+
70
+ # 📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘
71
+ # MiniMind Model
72
+ # 📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘
73
+
74
+ import math
75
+ import torch
76
+ from torch import nn
77
+ from transformers.activations import ACT2FN
78
+ from typing import Optional, Tuple, List, Union
79
+ import torch.nn.functional as F
80
+ from transformers import PreTrainedModel, GenerationMixin, PretrainedConfig
81
+ from transformers.modeling_outputs import CausalLMOutputWithPast
82
+
83
+
84
+ class RMSNorm(torch.nn.Module):
85
+ def __init__(self, dim: int, eps: float = 1e-5):
86
+ super().__init__()
87
+ self.eps = eps
88
+ self.weight = nn.Parameter(torch.ones(dim))
89
+
90
+ def _norm(self, x):
91
+ return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps)
92
+
93
+ def forward(self, x):
94
+ return self.weight * self._norm(x.float()).type_as(x)
95
+
96
+
97
+ def precompute_freqs_cis(dim: int, end: int = int(32 * 1024), theta: float = 1e6):
98
+ freqs = 1.0 / (theta ** (torch.arange(0, dim, 2)[: (dim // 2)].float() / dim))
99
+ t = torch.arange(end, device=freqs.device)
100
+ freqs = torch.outer(t, freqs).float()
101
+ freqs_cos = torch.cat([torch.cos(freqs), torch.cos(freqs)], dim=-1)
102
+ freqs_sin = torch.cat([torch.sin(freqs), torch.sin(freqs)], dim=-1)
103
+ return freqs_cos, freqs_sin
104
+
105
+
106
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
107
+ def rotate_half(x):
108
+ return torch.cat((-x[..., x.shape[-1] // 2:], x[..., : x.shape[-1] // 2]), dim=-1)
109
+
110
+ q_embed = (q * cos.unsqueeze(unsqueeze_dim)) + (rotate_half(q) * sin.unsqueeze(unsqueeze_dim))
111
+ k_embed = (k * cos.unsqueeze(unsqueeze_dim)) + (rotate_half(k) * sin.unsqueeze(unsqueeze_dim))
112
+ return q_embed, k_embed
113
+
114
+
115
+ def repeat_kv(x: torch.Tensor, n_rep: int) -> torch.Tensor:
116
+ """torch.repeat_interleave(x, dim=2, repeats=n_rep)"""
117
+ bs, slen, num_key_value_heads, head_dim = x.shape
118
+ if n_rep == 1:
119
+ return x
120
+ return (
121
+ x[:, :, :, None, :]
122
+ .expand(bs, slen, num_key_value_heads, n_rep, head_dim)
123
+ .reshape(bs, slen, num_key_value_heads * n_rep, head_dim)
124
+ )
125
+
126
+
127
+ class Attention(nn.Module):
128
+ def __init__(self, args: MiniMindConfig):
129
+ super().__init__()
130
+ self.num_key_value_heads = args.num_attention_heads if args.num_key_value_heads is None else args.num_key_value_heads
131
+ assert args.num_attention_heads % self.num_key_value_heads == 0
132
+ self.n_local_heads = args.num_attention_heads
133
+ self.n_local_kv_heads = self.num_key_value_heads
134
+ self.n_rep = self.n_local_heads // self.n_local_kv_heads
135
+ self.head_dim = args.hidden_size // args.num_attention_heads
136
+ self.q_proj = nn.Linear(args.hidden_size, args.num_attention_heads * self.head_dim, bias=False)
137
+ self.k_proj = nn.Linear(args.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
138
+ self.v_proj = nn.Linear(args.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
139
+ self.o_proj = nn.Linear(args.num_attention_heads * self.head_dim, args.hidden_size, bias=False)
140
+ self.attn_dropout = nn.Dropout(args.dropout)
141
+ self.resid_dropout = nn.Dropout(args.dropout)
142
+ self.dropout = args.dropout
143
+ self.flash = hasattr(torch.nn.functional, 'scaled_dot_product_attention') and args.flash_attn
144
+ # print("WARNING: using slow attention. Flash Attention requires PyTorch >= 2.0")
145
+
146
+ def forward(self,
147
+ x: torch.Tensor,
148
+ position_embeddings: Tuple[torch.Tensor, torch.Tensor], # 修改为接收cos和sin
149
+ past_key_value: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
150
+ use_cache=False,
151
+ attention_mask: Optional[torch.Tensor] = None):
152
+ bsz, seq_len, _ = x.shape
153
+ xq, xk, xv = self.q_proj(x), self.k_proj(x), self.v_proj(x)
154
+ xq = xq.view(bsz, seq_len, self.n_local_heads, self.head_dim)
155
+ xk = xk.view(bsz, seq_len, self.n_local_kv_heads, self.head_dim)
156
+ xv = xv.view(bsz, seq_len, self.n_local_kv_heads, self.head_dim)
157
+
158
+ cos, sin = position_embeddings
159
+ xq, xk = apply_rotary_pos_emb(xq, xk, cos[:seq_len], sin[:seq_len])
160
+
161
+ # kv_cache实现
162
+ if past_key_value is not None:
163
+ xk = torch.cat([past_key_value[0], xk], dim=1)
164
+ xv = torch.cat([past_key_value[1], xv], dim=1)
165
+ past_kv = (xk, xv) if use_cache else None
166
+
167
+ xq, xk, xv = (
168
+ xq.transpose(1, 2),
169
+ repeat_kv(xk, self.n_rep).transpose(1, 2),
170
+ repeat_kv(xv, self.n_rep).transpose(1, 2)
171
+ )
172
+
173
+ if False and self.flash and seq_len != 1:
174
+ dropout_p = self.dropout if self.training else 0.0
175
+ attn_mask = None
176
+ if attention_mask is not None:
177
+ attn_mask = attention_mask.view(bsz, 1, 1, -1).expand(bsz, self.n_local_heads, seq_len, -1)
178
+ attn_mask = attn_mask.bool() if attention_mask is not None else None
179
+
180
+ output = F.scaled_dot_product_attention(xq, xk, xv, attn_mask=attn_mask, dropout_p=dropout_p, is_causal=True)
181
+ else:
182
+ scores = (xq @ xk.transpose(-2, -1)) / math.sqrt(self.head_dim)
183
+ scores = scores + torch.triu(
184
+ torch.full((seq_len, seq_len), float("-inf"), device=scores.device),
185
+ diagonal=1
186
+ ).unsqueeze(0).unsqueeze(0) # scores+mask
187
+
188
+ if attention_mask is not None:
189
+ extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
190
+ extended_attention_mask = (1.0 - extended_attention_mask) * -1e9
191
+ scores = scores + extended_attention_mask
192
+
193
+ scores = F.softmax(scores.float(), dim=-1).type_as(xq)
194
+ scores = self.attn_dropout(scores)
195
+ output = scores @ xv
196
+
197
+ output = output.transpose(1, 2).reshape(bsz, seq_len, -1)
198
+ output = self.resid_dropout(self.o_proj(output))
199
+ return output, past_kv
200
+
201
+
202
+ class FeedForward(nn.Module):
203
+ def __init__(self, config: MiniMindConfig):
204
+ super().__init__()
205
+ if config.intermediate_size is None:
206
+ intermediate_size = int(config.hidden_size * 8 / 3)
207
+ config.intermediate_size = 64 * ((intermediate_size + 64 - 1) // 64)
208
+ self.gate_proj = nn.Linear(config.hidden_size, config.intermediate_size, bias=False)
209
+ self.down_proj = nn.Linear(config.intermediate_size, config.hidden_size, bias=False)
210
+ self.up_proj = nn.Linear(config.hidden_size, config.intermediate_size, bias=False)
211
+ self.dropout = nn.Dropout(config.dropout)
212
+ self.act_fn = ACT2FN[config.hidden_act]
213
+
214
+ def forward(self, x):
215
+ return self.dropout(self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x)))
216
+
217
+
218
+ class MoEGate(nn.Module):
219
+ def __init__(self, config: MiniMindConfig):
220
+ super().__init__()
221
+ self.config = config
222
+ self.top_k = config.num_experts_per_tok
223
+ self.n_routed_experts = config.n_routed_experts
224
+
225
+ self.scoring_func = config.scoring_func
226
+ self.alpha = config.aux_loss_alpha
227
+ self.seq_aux = config.seq_aux
228
+
229
+ self.norm_topk_prob = config.norm_topk_prob
230
+ self.gating_dim = config.hidden_size
231
+ self.weight = nn.Parameter(torch.empty((self.n_routed_experts, self.gating_dim)))
232
+ self.reset_parameters()
233
+
234
+ def reset_parameters(self) -> None:
235
+ import torch.nn.init as init
236
+ init.kaiming_uniform_(self.weight, a=math.sqrt(5))
237
+
238
+ def forward(self, hidden_states):
239
+ bsz, seq_len, h = hidden_states.shape
240
+ hidden_states = hidden_states.view(-1, h)
241
+ logits = F.linear(hidden_states, self.weight, None)
242
+ if self.scoring_func == 'softmax':
243
+ scores = logits.softmax(dim=-1)
244
+ else:
245
+ raise NotImplementedError(f'insupportable scoring function for MoE gating: {self.scoring_func}')
246
+
247
+ topk_weight, topk_idx = torch.topk(scores, k=self.top_k, dim=-1, sorted=False)
248
+
249
+ if self.top_k > 1 and self.norm_topk_prob:
250
+ denominator = topk_weight.sum(dim=-1, keepdim=True) + 1e-20
251
+ topk_weight = topk_weight / denominator
252
+
253
+ if self.training and self.alpha > 0.0:
254
+ scores_for_aux = scores
255
+ aux_topk = self.top_k
256
+ topk_idx_for_aux_loss = topk_idx.view(bsz, -1)
257
+ if self.seq_aux:
258
+ scores_for_seq_aux = scores_for_aux.view(bsz, seq_len, -1)
259
+ ce = torch.zeros(bsz, self.n_routed_experts, device=hidden_states.device)
260
+ ce.scatter_add_(1, topk_idx_for_aux_loss,
261
+ torch.ones(bsz, seq_len * aux_topk, device=hidden_states.device)).div_(
262
+ seq_len * aux_topk / self.n_routed_experts)
263
+ aux_loss = (ce * scores_for_seq_aux.mean(dim=1)).sum(dim=1).mean() * self.alpha
264
+ else:
265
+ mask_ce = F.one_hot(topk_idx_for_aux_loss.view(-1), num_classes=self.n_routed_experts)
266
+ ce = mask_ce.float().mean(0)
267
+ Pi = scores_for_aux.mean(0)
268
+ fi = ce * self.n_routed_experts
269
+ aux_loss = (Pi * fi).sum() * self.alpha
270
+ else:
271
+ aux_loss = 0
272
+ return topk_idx, topk_weight, aux_loss
273
+
274
+
275
+ class MOEFeedForward(nn.Module):
276
+ def __init__(self, config: MiniMindConfig):
277
+ super().__init__()
278
+ self.config = config
279
+ self.experts = nn.ModuleList([
280
+ FeedForward(config)
281
+ for _ in range(config.n_routed_experts)
282
+ ])
283
+ self.gate = MoEGate(config)
284
+ if config.n_shared_experts > 0:
285
+ self.shared_experts = nn.ModuleList([
286
+ FeedForward(config)
287
+ for _ in range(config.n_shared_experts)
288
+ ])
289
+
290
+ def forward(self, x):
291
+ identity = x
292
+ orig_shape = x.shape
293
+ bsz, seq_len, _ = x.shape
294
+ # 使用门控机制选择专家
295
+ topk_idx, topk_weight, aux_loss = self.gate(x)
296
+ x = x.view(-1, x.shape[-1])
297
+ flat_topk_idx = topk_idx.view(-1)
298
+ if self.training:
299
+ x = x.repeat_interleave(self.config.num_experts_per_tok, dim=0)
300
+ y = torch.empty_like(x, dtype=torch.float16)
301
+ for i, expert in enumerate(self.experts):
302
+ y[flat_topk_idx == i] = expert(x[flat_topk_idx == i]).to(y.dtype) # 确保类型一致
303
+ y = (y.view(*topk_weight.shape, -1) * topk_weight.unsqueeze(-1)).sum(dim=1)
304
+ y = y.view(*orig_shape)
305
+ else:
306
+ y = self.moe_infer(x, flat_topk_idx, topk_weight.view(-1, 1)).view(*orig_shape)
307
+ if self.config.n_shared_experts > 0:
308
+ for expert in self.shared_experts:
309
+ y = y + expert(identity)
310
+ self.aux_loss = aux_loss
311
+ return y
312
+
313
+ @torch.no_grad()
314
+ def moe_infer(self, x, flat_expert_indices, flat_expert_weights):
315
+ expert_cache = torch.zeros_like(x)
316
+ idxs = flat_expert_indices.argsort()
317
+ tokens_per_expert = flat_expert_indices.bincount().cpu().numpy().cumsum(0)
318
+ token_idxs = idxs // self.config.num_experts_per_tok
319
+ # 当tokens_per_expert = [6, 15, 20, 26],tokens_per_expert.shape[0]即为专家数量(此时为4)
320
+ # 且token_idxs = [3, 7, 19, 21, 24, 25, 4, 5, 6, 10, 11, 12...] 时
321
+ # 意味token_idxs[:6] -> [3, 7, 19, 21, 24, 25]这6个位置属于专家0处理的token(每个token有可能被多个专家处理,这取决于num_experts_per_tok)
322
+ # 接下来9个位置token_idxs[6:15] -> [4, 5, 6, 10, 11, 12...]属于专家1处理的token...依此类推
323
+ for i, end_idx in enumerate(tokens_per_expert):
324
+ start_idx = 0 if i == 0 else tokens_per_expert[i - 1]
325
+ if start_idx == end_idx:
326
+ continue
327
+ expert = self.experts[i]
328
+ exp_token_idx = token_idxs[start_idx:end_idx]
329
+ expert_tokens = x[exp_token_idx]
330
+ expert_out = expert(expert_tokens).to(expert_cache.dtype)
331
+ expert_out.mul_(flat_expert_weights[idxs[start_idx:end_idx]])
332
+ expert_cache.scatter_add_(0, exp_token_idx.view(-1, 1).repeat(1, x.shape[-1]), expert_out)
333
+
334
+ return expert_cache
335
+
336
+
337
+ class MiniMindBlock(nn.Module):
338
+ def __init__(self, layer_id: int, config: MiniMindConfig):
339
+ super().__init__()
340
+ self.num_attention_heads = config.num_attention_heads
341
+ self.hidden_size = config.hidden_size
342
+ self.head_dim = config.hidden_size // config.num_attention_heads
343
+ self.self_attn = Attention(config)
344
+
345
+ self.layer_id = layer_id
346
+ self.input_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
347
+ self.post_attention_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
348
+ self.mlp = FeedForward(config) if not config.use_moe else MOEFeedForward(config)
349
+
350
+ def forward(self, hidden_states, position_embeddings, past_key_value=None, use_cache=False, attention_mask=None):
351
+ residual = hidden_states
352
+ hidden_states, present_key_value = self.self_attn(
353
+ self.input_layernorm(hidden_states), position_embeddings,
354
+ past_key_value, use_cache, attention_mask
355
+ )
356
+ hidden_states += residual
357
+ hidden_states = hidden_states + self.mlp(self.post_attention_layernorm(hidden_states))
358
+ return hidden_states, present_key_value
359
+
360
+
361
+ class MiniMindModel(nn.Module):
362
+ def __init__(self, config: MiniMindConfig):
363
+ super().__init__()
364
+ self.config = config
365
+ self.vocab_size, self.num_hidden_layers = config.vocab_size, config.num_hidden_layers
366
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size)
367
+ self.dropout = nn.Dropout(config.dropout)
368
+ self.layers = nn.ModuleList([MiniMindBlock(l, config) for l in range(self.num_hidden_layers)])
369
+ self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
370
+
371
+ freqs_cos, freqs_sin = precompute_freqs_cis(dim=config.hidden_size // config.num_attention_heads,
372
+ end=config.max_position_embeddings, theta=config.rope_theta)
373
+ self.register_buffer("freqs_cos", freqs_cos, persistent=False)
374
+ self.register_buffer("freqs_sin", freqs_sin, persistent=False)
375
+
376
+ def forward(self,
377
+ input_ids: Optional[torch.Tensor] = None,
378
+ attention_mask: Optional[torch.Tensor] = None,
379
+ past_key_values: Optional[List[Tuple[torch.Tensor, torch.Tensor]]] = None,
380
+ use_cache: bool = False,
381
+ **kwargs):
382
+ batch_size, seq_length = input_ids.shape
383
+ past_key_values = past_key_values or [None] * len(self.layers)
384
+ start_pos = past_key_values[0][0].shape[1] if past_key_values[0] is not None else 0
385
+
386
+ hidden_states = self.dropout(self.embed_tokens(input_ids))
387
+
388
+ position_embeddings = (
389
+ self.freqs_cos[start_pos:start_pos + seq_length],
390
+ self.freqs_sin[start_pos:start_pos + seq_length]
391
+ )
392
+
393
+ presents = []
394
+ for layer_idx, (layer, past_key_value) in enumerate(zip(self.layers, past_key_values)):
395
+ hidden_states, present = layer(
396
+ hidden_states,
397
+ position_embeddings,
398
+ past_key_value=past_key_value,
399
+ use_cache=use_cache,
400
+ attention_mask=attention_mask
401
+ )
402
+ presents.append(present)
403
+
404
+ hidden_states = self.norm(hidden_states)
405
+
406
+ aux_loss = sum(
407
+ layer.mlp.aux_loss
408
+ for layer in self.layers
409
+ if isinstance(layer.mlp, MOEFeedForward)
410
+ )
411
+
412
+ return hidden_states, presents, aux_loss
413
+
414
+
415
+ class MiniMindForCausalLM(PreTrainedModel, GenerationMixin):
416
+ config_class = MiniMindConfig
417
+
418
+ def __init__(self, config: MiniMindConfig = None):
419
+ self.config = config or MiniMindConfig()
420
+ super().__init__(self.config)
421
+ self.model = MiniMindModel(self.config)
422
+ self.lm_head = nn.Linear(self.config.hidden_size, self.config.vocab_size, bias=False)
423
+ self.model.embed_tokens.weight = self.lm_head.weight
424
+ self.OUT = CausalLMOutputWithPast()
425
+
426
+ def forward(self,
427
+ input_ids: Optional[torch.Tensor] = None,
428
+ attention_mask: Optional[torch.Tensor] = None,
429
+ past_key_values: Optional[List[Tuple[torch.Tensor, torch.Tensor]]] = None,
430
+ use_cache: bool = False,
431
+ logits_to_keep: Union[int, torch.Tensor] = 0,
432
+ **args):
433
+ h, past_kvs, aux_loss = self.model(
434
+ input_ids=input_ids,
435
+ attention_mask=attention_mask,
436
+ past_key_values=past_key_values,
437
+ use_cache=use_cache,
438
+ **args
439
+ )
440
+ slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
441
+ logits = self.lm_head(h[:, slice_indices, :])
442
+ self.OUT.__setitem__('last_hidden_state', h)
443
+ self.OUT.__setitem__('logits', logits)
444
+ self.OUT.__setitem__('aux_loss', aux_loss)
445
+ self.OUT.__setitem__('past_key_values', past_kvs)
446
+ return self.OUT
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eda2077ba415f651ff07e3e1408d268e1dcdc15a86364454747753a5b12d9f0e
3
+ size 290118354
special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|im_start|>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|im_end|>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "<|endoftext|>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "<|endoftext|>",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json CHANGED
@@ -1,43 +1,44 @@
1
- {
2
- "add_bos_token": false,
3
- "add_eos_token": false,
4
- "add_prefix_space": false,
5
- "added_tokens_decoder": {
6
- "0": {
7
- "content": "<unk>",
8
- "lstrip": false,
9
- "normalized": false,
10
- "rstrip": false,
11
- "single_word": false,
12
- "special": true
13
- },
14
- "1": {
15
- "content": "<s>",
16
- "lstrip": false,
17
- "normalized": false,
18
- "rstrip": false,
19
- "single_word": false,
20
- "special": true
21
- },
22
- "2": {
23
- "content": "</s>",
24
- "lstrip": false,
25
- "normalized": false,
26
- "rstrip": false,
27
- "single_word": false,
28
- "special": true
29
- }
30
- },
31
- "additional_special_tokens": [],
32
- "bos_token": "<s>",
33
- "chat_template": "{% if messages[0]['role'] == 'system' %}{% set system_message = messages[0]['content'] %}{{ '<s>system\\n' + system_message + '</s>\\n' }}{% else %}{{ '<s>system\\n你是 MiniMind,是一个有用的人工智能助手。</s>\\n' }}{% endif %}{% for message in messages %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ '<s>user\\n' + content + '</s>\\n<s>assistant\\n' }}{% elif message['role'] == 'assistant' %}{{ content + '</s>' + '\\n' }}{% endif %}{% endfor %}",
34
- "clean_up_tokenization_spaces": false,
35
- "eos_token": "</s>",
36
- "legacy": true,
37
- "model_max_length": 32768,
38
- "pad_token": "<unk>",
39
- "sp_model_kwargs": {},
40
- "spaces_between_special_tokens": false,
41
- "tokenizer_class": "PreTrainedTokenizerFast",
42
- "unk_token": "<unk>"
43
- }
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_eos_token": false,
4
+ "add_prefix_space": false,
5
+ "added_tokens_decoder": {
6
+ "0": {
7
+ "content": "<|endoftext|>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false,
12
+ "special": true
13
+ },
14
+ "1": {
15
+ "content": "<|im_start|>",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false,
20
+ "special": true
21
+ },
22
+ "2": {
23
+ "content": "<|im_end|>",
24
+ "lstrip": false,
25
+ "normalized": false,
26
+ "rstrip": false,
27
+ "single_word": false,
28
+ "special": true
29
+ }
30
+ },
31
+ "additional_special_tokens": [],
32
+ "bos_token": "<|im_start|>",
33
+ "chat_template": "{% if messages[0]['role'] == 'system' %}{% set system_message = messages[0]['content'] %}{{ '<|im_start|>system\\n' + system_message + '<|im_end|>\\n' }}{% else %}{{ '<|im_start|>system\\nYou are a helpful assistant<|im_end|>\\n' }}{% endif %}{% for message in messages %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ '<|im_start|>user\\n' + content + '<|im_end|>\\n<|im_start|>assistant\\n' }}{% elif message['role'] == 'assistant' %}{{ content + '<|im_end|>' + '\\n' }}{% endif %}{% endfor %}",
34
+ "clean_up_tokenization_spaces": false,
35
+ "eos_token": "<|im_end|>",
36
+ "extra_special_tokens": {},
37
+ "legacy": true,
38
+ "model_max_length": 32768,
39
+ "pad_token": "<|endoftext|>",
40
+ "sp_model_kwargs": {},
41
+ "spaces_between_special_tokens": false,
42
+ "tokenizer_class": "PreTrainedTokenizer",
43
+ "unk_token": "<|endoftext|>"
44
+ }