README.md CHANGED
@@ -211,9 +211,6 @@ python3 -m sglang.launch_server --model deepseek-ai/DeepSeek-R1-Distill-Qwen-32B
211
  3. For mathematical problems, it is advisable to include a directive in your prompt such as: "Please reason step by step, and put your final answer within \boxed{}."
212
  4. When evaluating model performance, it is recommended to conduct multiple tests and average the results.
213
 
214
- Additionally, we have observed that the DeepSeek-R1 series models tend to bypass thinking pattern (i.e., outputting "\<think\>\n\n\</think\>") when responding to certain queries, which can adversely affect the model's performance.
215
- **To ensure that the model engages in thorough reasoning, we recommend enforcing the model to initiate its response with "\<think\>\n" at the beginning of every output.**
216
-
217
  ## 7. License
218
  This code repository and the model weights are licensed under the [MIT License](https://github.com/deepseek-ai/DeepSeek-R1/blob/main/LICENSE).
219
  DeepSeek-R1 series support commercial use, allow for any modifications and derivative works, including, but not limited to, distillation for training other LLMs. Please note that:
@@ -225,7 +222,7 @@ DeepSeek-R1 series support commercial use, allow for any modifications and deriv
225
  ```
226
  @misc{deepseekai2025deepseekr1incentivizingreasoningcapability,
227
  title={DeepSeek-R1: Incentivizing Reasoning Capability in LLMs via Reinforcement Learning},
228
- author={DeepSeek-AI},
229
  year={2025},
230
  eprint={2501.12948},
231
  archivePrefix={arXiv},
 
211
  3. For mathematical problems, it is advisable to include a directive in your prompt such as: "Please reason step by step, and put your final answer within \boxed{}."
212
  4. When evaluating model performance, it is recommended to conduct multiple tests and average the results.
213
 
 
 
 
214
  ## 7. License
215
  This code repository and the model weights are licensed under the [MIT License](https://github.com/deepseek-ai/DeepSeek-R1/blob/main/LICENSE).
216
  DeepSeek-R1 series support commercial use, allow for any modifications and derivative works, including, but not limited to, distillation for training other LLMs. Please note that:
 
222
  ```
223
  @misc{deepseekai2025deepseekr1incentivizingreasoningcapability,
224
  title={DeepSeek-R1: Incentivizing Reasoning Capability in LLMs via Reinforcement Learning},
225
+ author={DeepSeek-AI and Daya Guo and Dejian Yang and Haowei Zhang and Junxiao Song and Ruoyu Zhang and Runxin Xu and Qihao Zhu and Shirong Ma and Peiyi Wang and Xiao Bi and Xiaokang Zhang and Xingkai Yu and Yu Wu and Z. F. Wu and Zhibin Gou and Zhihong Shao and Zhuoshu Li and Ziyi Gao and Aixin Liu and Bing Xue and Bingxuan Wang and Bochao Wu and Bei Feng and Chengda Lu and Chenggang Zhao and Chengqi Deng and Chenyu Zhang and Chong Ruan and Damai Dai and Deli Chen and Dongjie Ji and Erhang Li and Fangyun Lin and Fucong Dai and Fuli Luo and Guangbo Hao and Guanting Chen and Guowei Li and H. Zhang and Han Bao and Hanwei Xu and Haocheng Wang and Honghui Ding and Huajian Xin and Huazuo Gao and Hui Qu and Hui Li and Jianzhong Guo and Jiashi Li and Jiawei Wang and Jingchang Chen and Jingyang Yuan and Junjie Qiu and Junlong Li and J. L. Cai and Jiaqi Ni and Jian Liang and Jin Chen and Kai Dong and Kai Hu and Kaige Gao and Kang Guan and Kexin Huang and Kuai Yu and Lean Wang and Lecong Zhang and Liang Zhao and Litong Wang and Liyue Zhang and Lei Xu and Leyi Xia and Mingchuan Zhang and Minghua Zhang and Minghui Tang and Meng Li and Miaojun Wang and Mingming Li and Ning Tian and Panpan Huang and Peng Zhang and Qiancheng Wang and Qinyu Chen and Qiushi Du and Ruiqi Ge and Ruisong Zhang and Ruizhe Pan and Runji Wang and R. J. Chen and R. L. Jin and Ruyi Chen and Shanghao Lu and Shangyan Zhou and Shanhuang Chen and Shengfeng Ye and Shiyu Wang and Shuiping Yu and Shunfeng Zhou and Shuting Pan and S. S. Li and Shuang Zhou and Shaoqing Wu and Shengfeng Ye and Tao Yun and Tian Pei and Tianyu Sun and T. Wang and Wangding Zeng and Wanjia Zhao and Wen Liu and Wenfeng Liang and Wenjun Gao and Wenqin Yu and Wentao Zhang and W. L. Xiao and Wei An and Xiaodong Liu and Xiaohan Wang and Xiaokang Chen and Xiaotao Nie and Xin Cheng and Xin Liu and Xin Xie and Xingchao Liu and Xinyu Yang and Xinyuan Li and Xuecheng Su and Xuheng Lin and X. Q. Li and Xiangyue Jin and Xiaojin Shen and Xiaosha Chen and Xiaowen Sun and Xiaoxiang Wang and Xinnan Song and Xinyi Zhou and Xianzu Wang and Xinxia Shan and Y. K. Li and Y. Q. Wang and Y. X. Wei and Yang Zhang and Yanhong Xu and Yao Li and Yao Zhao and Yaofeng Sun and Yaohui Wang and Yi Yu and Yichao Zhang and Yifan Shi and Yiliang Xiong and Ying He and Yishi Piao and Yisong Wang and Yixuan Tan and Yiyang Ma and Yiyuan Liu and Yongqiang Guo and Yuan Ou and Yuduan Wang and Yue Gong and Yuheng Zou and Yujia He and Yunfan Xiong and Yuxiang Luo and Yuxiang You and Yuxuan Liu and Yuyang Zhou and Y. X. Zhu and Yanhong Xu and Yanping Huang and Yaohui Li and Yi Zheng and Yuchen Zhu and Yunxian Ma and Ying Tang and Yukun Zha and Yuting Yan and Z. Z. Ren and Zehui Ren and Zhangli Sha and Zhe Fu and Zhean Xu and Zhenda Xie and Zhengyan Zhang and Zhewen Hao and Zhicheng Ma and Zhigang Yan and Zhiyu Wu and Zihui Gu and Zijia Zhu and Zijun Liu and Zilin Li and Ziwei Xie and Ziyang Song and Zizheng Pan and Zhen Huang and Zhipeng Xu and Zhongyu Zhang and Zhen Zhang},
226
  year={2025},
227
  eprint={2501.12948},
228
  archivePrefix={arXiv},
config.json CHANGED
@@ -9,6 +9,7 @@
9
  "AutoModel": "modeling_deepseek.DeepseekV3Model",
10
  "AutoModelForCausalLM": "modeling_deepseek.DeepseekV3ForCausalLM"
11
  },
 
12
  "bos_token_id": 0,
13
  "eos_token_id": 1,
14
  "ep_size": 1,
@@ -31,6 +32,7 @@
31
  "num_hidden_layers": 61,
32
  "num_key_value_heads": 128,
33
  "num_nextn_predict_layers": 1,
 
34
  "q_lora_rank": 1536,
35
  "qk_nope_head_dim": 128,
36
  "qk_rope_head_dim": 64,
@@ -56,6 +58,7 @@
56
  "rope_theta": 10000,
57
  "routed_scaling_factor": 2.5,
58
  "scoring_func": "sigmoid",
 
59
  "tie_word_embeddings": false,
60
  "topk_group": 4,
61
  "topk_method": "noaux_tc",
 
9
  "AutoModel": "modeling_deepseek.DeepseekV3Model",
10
  "AutoModelForCausalLM": "modeling_deepseek.DeepseekV3ForCausalLM"
11
  },
12
+ "aux_loss_alpha": 0.001,
13
  "bos_token_id": 0,
14
  "eos_token_id": 1,
15
  "ep_size": 1,
 
32
  "num_hidden_layers": 61,
33
  "num_key_value_heads": 128,
34
  "num_nextn_predict_layers": 1,
35
+ "pretraining_tp": 1,
36
  "q_lora_rank": 1536,
37
  "qk_nope_head_dim": 128,
38
  "qk_rope_head_dim": 64,
 
58
  "rope_theta": 10000,
59
  "routed_scaling_factor": 2.5,
60
  "scoring_func": "sigmoid",
61
+ "seq_aux": true,
62
  "tie_word_embeddings": false,
63
  "topk_group": 4,
64
  "topk_method": "noaux_tc",
configuration_deepseek.py CHANGED
@@ -82,6 +82,11 @@ class DeepseekV3Config(PretrainedConfig):
82
  Beginning of stream token id.
83
  eos_token_id (`int`, *optional*, defaults to 2):
84
  End of stream token id.
 
 
 
 
 
85
  tie_word_embeddings (`bool`, *optional*, defaults to `False`):
86
  Whether to tie weight embeddings
87
  rope_theta (`float`, *optional*, defaults to 10000.0):
@@ -136,6 +141,8 @@ class DeepseekV3Config(PretrainedConfig):
136
  first_k_dense_replace = 3,
137
  norm_topk_prob = True,
138
  scoring_func = 'sigmoid',
 
 
139
  hidden_act="silu",
140
  max_position_embeddings=4096,
141
  initializer_range=0.02,
@@ -144,6 +151,7 @@ class DeepseekV3Config(PretrainedConfig):
144
  pad_token_id=None,
145
  bos_token_id=0,
146
  eos_token_id=1,
 
147
  tie_word_embeddings=False,
148
  rope_theta=10000.0,
149
  rope_scaling=None,
@@ -176,6 +184,8 @@ class DeepseekV3Config(PretrainedConfig):
176
  self.first_k_dense_replace = first_k_dense_replace
177
  self.norm_topk_prob = norm_topk_prob
178
  self.scoring_func = scoring_func
 
 
179
  # for backward compatibility
180
  if num_key_value_heads is None:
181
  num_key_value_heads = num_attention_heads
@@ -184,6 +194,7 @@ class DeepseekV3Config(PretrainedConfig):
184
  self.hidden_act = hidden_act
185
  self.initializer_range = initializer_range
186
  self.rms_norm_eps = rms_norm_eps
 
187
  self.use_cache = use_cache
188
  self.rope_theta = rope_theta
189
  self.rope_scaling = rope_scaling
 
82
  Beginning of stream token id.
83
  eos_token_id (`int`, *optional*, defaults to 2):
84
  End of stream token id.
85
+ pretraining_tp (`int`, *optional*, defaults to 1):
86
+ Experimental feature. Tensor parallelism rank used during pretraining. Please refer to [this
87
+ document](https://huggingface.co/docs/transformers/parallelism) to understand more about it. This value is
88
+ necessary to ensure exact reproducibility of the pretraining results. Please refer to [this
89
+ issue](https://github.com/pytorch/pytorch/issues/76232).
90
  tie_word_embeddings (`bool`, *optional*, defaults to `False`):
91
  Whether to tie weight embeddings
92
  rope_theta (`float`, *optional*, defaults to 10000.0):
 
141
  first_k_dense_replace = 3,
142
  norm_topk_prob = True,
143
  scoring_func = 'sigmoid',
144
+ aux_loss_alpha = 0.001,
145
+ seq_aux = True,
146
  hidden_act="silu",
147
  max_position_embeddings=4096,
148
  initializer_range=0.02,
 
151
  pad_token_id=None,
152
  bos_token_id=0,
153
  eos_token_id=1,
154
+ pretraining_tp=1,
155
  tie_word_embeddings=False,
156
  rope_theta=10000.0,
157
  rope_scaling=None,
 
184
  self.first_k_dense_replace = first_k_dense_replace
185
  self.norm_topk_prob = norm_topk_prob
186
  self.scoring_func = scoring_func
187
+ self.aux_loss_alpha = aux_loss_alpha
188
+ self.seq_aux = seq_aux
189
  # for backward compatibility
190
  if num_key_value_heads is None:
191
  num_key_value_heads = num_attention_heads
 
194
  self.hidden_act = hidden_act
195
  self.initializer_range = initializer_range
196
  self.rms_norm_eps = rms_norm_eps
197
+ self.pretraining_tp = pretraining_tp
198
  self.use_cache = use_cache
199
  self.rope_theta = rope_theta
200
  self.rope_scaling = rope_scaling
modeling_deepseek.py CHANGED
@@ -398,6 +398,7 @@ class MoEGate(nn.Module):
398
  self.n_routed_experts = config.n_routed_experts
399
  self.routed_scaling_factor = config.routed_scaling_factor
400
  self.scoring_func = config.scoring_func
 
401
  self.topk_method = config.topk_method
402
  self.n_group = config.n_group
403
  self.topk_group = config.topk_group
@@ -454,7 +455,7 @@ class MoEGate(nn.Module):
454
  )
455
  .reshape(bsz * seq_len, -1)
456
  ) # [n, e]
457
- tmp_scores = scores_for_choice.masked_fill(~score_mask.bool(), float("-inf")) # [n, e]
458
  _, topk_idx = torch.topk(
459
  tmp_scores, k=self.top_k, dim=-1, sorted=False
460
  )
 
398
  self.n_routed_experts = config.n_routed_experts
399
  self.routed_scaling_factor = config.routed_scaling_factor
400
  self.scoring_func = config.scoring_func
401
+ self.seq_aux = config.seq_aux
402
  self.topk_method = config.topk_method
403
  self.n_group = config.n_group
404
  self.topk_group = config.topk_group
 
455
  )
456
  .reshape(bsz * seq_len, -1)
457
  ) # [n, e]
458
+ tmp_scores = scores_for_choice.masked_fill(~score_mask.bool(), 0.0) # [n, e]
459
  _, topk_idx = torch.topk(
460
  tmp_scores, k=self.top_k, dim=-1, sorted=False
461
  )
tokenizer_config.json CHANGED
@@ -31,5 +31,5 @@
31
  "sp_model_kwargs": {},
32
  "unk_token": null,
33
  "tokenizer_class": "LlamaTokenizerFast",
34
- "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='', is_first_sp=true) %}{%- for message in messages %}{%- if message['role'] == 'system' %}{%- if ns.is_first_sp %}{% set ns.system_prompt = ns.system_prompt + message['content'] %}{% set ns.is_first_sp = false %}{%- else %}{% set ns.system_prompt = ns.system_prompt + '\\n\\n' + message['content'] %}{%- endif %}{%- endif %}{%- endfor %}{{ bos_token }}{{ ns.system_prompt }}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and 'tool_calls' in message %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls'] %}{%- if not ns.is_first %}{%- if message['content'] is none %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- else %}{{'<|Assistant|>' + message['content'] + '<|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- endif %}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- endif %}{%- endfor %}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- if message['role'] == 'assistant' and 'tool_calls' not in message %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '</think>' in content %}{% set content = content.split('</think>')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|><think>\\n'}}{% endif %}"
35
  }
 
31
  "sp_model_kwargs": {},
32
  "unk_token": null,
33
  "tokenizer_class": "LlamaTokenizerFast",
34
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='', is_first_sp=true) %}{%- for message in messages %}{%- if message['role'] == 'system' %}{%- if ns.is_first_sp %}{% set ns.system_prompt = ns.system_prompt + message['content'] %}{% set ns.is_first_sp = false %}{%- else %}{% set ns.system_prompt = ns.system_prompt + '\\n\\n' + message['content'] %}{%- endif %}{%- endif %}{%- endfor %}{{ bos_token }}{{ ns.system_prompt }}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and 'tool_calls' in message %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls'] %}{%- if not ns.is_first %}{%- if message['content'] is none %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- else %}{{'<|Assistant|>' + message['content'] + '<|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- endif %}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- endif %}{%- endfor %}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- if message['role'] == 'assistant' and 'tool_calls' not in message %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '</think>' in content %}{% set content = content.split('</think>')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>'}}{% endif %}"
35
  }