sheep33333 commited on
Commit
a36b3a8
·
verified ·
1 Parent(s): 8369150

Upload modelforseminat_v4.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. modelforseminat_v4.py +1861 -0
modelforseminat_v4.py ADDED
@@ -0,0 +1,1861 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import Olmo2Model, Olmo2ForCausalLM, AutoTokenizer, logging
2
+ from transformers.models.auto.modeling_auto import MODEL_FOR_IMAGE_TEXT_TO_TEXT_MAPPING_NAMES
3
+ from transformers.modeling_outputs import (
4
+ CausalLMOutputWithPast,
5
+ BaseModelOutputWithPast,
6
+ )
7
+ import numpy as np
8
+ import math
9
+ from torch import nn
10
+ import pandas as pd
11
+ from transformers.cache_utils import Cache, DynamicCache, StaticCache
12
+ from dataclasses import dataclass
13
+
14
+ # Olmo2
15
+ from transformers.models.olmo2.modeling_olmo2 import Olmo2RotaryEmbedding, Olmo2Attention, Olmo2MLP, Olmo2RMSNorm, apply_rotary_pos_emb, eager_attention_forward, Olmo2DecoderLayer
16
+ from transformers.models.olmo2.configuration_olmo2 import Olmo2Config
17
+ from transformers.processing_utils import Unpack
18
+ from transformers.modeling_flash_attention_utils import FlashAttentionKwargs
19
+ from transformers.utils import LossKwargs
20
+ from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS
21
+
22
+
23
+
24
+ import os
25
+ import sys
26
+ import json
27
+ import pdb
28
+ import torch.distributed as dist
29
+ from tqdm import tqdm
30
+ from torch.utils.data.distributed import DistributedSampler
31
+ import transformers
32
+ import pickle
33
+ from dataset import *
34
+ from peft import (get_peft_model, PeftModel)
35
+ import random
36
+ from config import *
37
+ from datasets import Dataset, DatasetDict, load_dataset
38
+ import wandb
39
+ import argparse
40
+ import torch
41
+ import torch.nn as nn
42
+ import torch.nn.functional as F
43
+ import torch.optim as optim
44
+ import functools
45
+ from torch.optim.lr_scheduler import StepLR
46
+ import torch.nn.functional as F
47
+ import torch.distributed as dist
48
+ import torch.multiprocessing as mp
49
+ from torch.nn.parallel import DistributedDataParallel as DDP
50
+ from torch.utils.data.distributed import DistributedSampler
51
+ from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import (
52
+ checkpoint_wrapper, CheckpointImpl)
53
+ from torch.distributed.fsdp import (
54
+ FullyShardedDataParallel as FSDP,
55
+ MixedPrecision,
56
+ BackwardPrefetch,
57
+ ShardingStrategy,
58
+ FullStateDictConfig,
59
+ StateDictType,
60
+ )
61
+ from torch.distributed.fsdp.wrap import (
62
+ transformer_auto_wrap_policy,
63
+ enable_wrap,
64
+ wrap,
65
+ )
66
+ from functools import partial
67
+ from torch.utils.data import DataLoader
68
+ from pathlib import Path
69
+ from typing import Type, List, Optional, Tuple, Union, Callable, Dict, Any
70
+
71
+
72
+ ############ specially for generate() #################
73
+ import inspect
74
+ from transformers.generation.configuration_utils import (
75
+ NEED_SETUP_CACHE_CLASSES_MAPPING,
76
+ QUANT_BACKEND_CLASSES_MAPPING,
77
+ GenerationConfig,
78
+ GenerationMode,
79
+ )
80
+ from transformers.generation.logits_process import LogitsProcessorList
81
+ from transformers.generation.stopping_criteria import StoppingCriteriaList
82
+ from transformers.integrations.deepspeed import is_deepspeed_zero3_enabled
83
+ from transformers.integrations.fsdp import is_fsdp_managed_module
84
+
85
+ from transformers.generation.utils import (
86
+ is_torchdynamo_compiling, ModelOutput, GenerateDecoderOnlyOutput,
87
+ GenerateEncoderDecoderOutput, GenerateBeamDecoderOnlyOutput,
88
+ GenerateBeamEncoderDecoderOutput, GreedySearchDecoderOnlyOutput,
89
+ ContrastiveSearchDecoderOnlyOutput, SampleDecoderOnlyOutput,
90
+ ContrastiveSearchEncoderDecoderOutput, GreedySearchEncoderDecoderOutput,
91
+ SampleEncoderDecoderOutput, BeamSearchDecoderOnlyOutput,
92
+ BeamSampleDecoderOnlyOutput, BeamSearchEncoderDecoderOutput,
93
+ BeamSampleEncoderDecoderOutput, GreedySearchOutput, SampleOutput,
94
+ BeamSearchOutput, BeamSampleOutput, ContrastiveSearchOutput,
95
+ GenerateNonBeamOutput, GenerateBeamOutput, GenerateOutput)
96
+ from transformers.generation.stopping_criteria import (
97
+ ConfidenceCriteria,
98
+ EosTokenCriteria,
99
+ MaxLengthCriteria,
100
+ MaxTimeCriteria,
101
+ StoppingCriteria,
102
+ StoppingCriteriaList,
103
+ StopStringCriteria,
104
+ )
105
+
106
+ from transformers.generation.stopping_criteria import STOPPING_CRITERIA_INPUTS_DOCSTRING
107
+ from transformers.pytorch_utils import isin_mps_friendly
108
+ from transformers.utils import add_start_docstrings
109
+
110
+
111
+ class EosTokenCriteriaForSemiNAT(StoppingCriteria):
112
+ """
113
+ This class can be used to stop generation whenever the "end-of-sequence" token is generated.
114
+ By default, it uses the `model.generation_config.eos_token_id`.
115
+
116
+ Args:
117
+ eos_token_id (`Union[int, List[int], torch.Tensor]`):
118
+ The id(s) of the *end-of-sequence* token.
119
+ """
120
+
121
+ def __init__(self, eos_token_id: Union[int, List[int], torch.Tensor]):
122
+ if not isinstance(eos_token_id, torch.Tensor):
123
+ if isinstance(eos_token_id, int):
124
+ eos_token_id = [eos_token_id]
125
+ eos_token_id = torch.tensor(eos_token_id)
126
+ self.eos_token_id = eos_token_id
127
+
128
+ @add_start_docstrings(STOPPING_CRITERIA_INPUTS_DOCSTRING)
129
+ def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, last_k: int, **kwargs) -> torch.BoolTensor:
130
+ # pdb.set_trace()
131
+ # if torch.any(input_ids == 100257):
132
+ # pdb.set_trace()
133
+ self.eos_token_id = self.eos_token_id.to(input_ids.device)
134
+ token_is_eos = isin_mps_friendly(input_ids[:, -last_k:], self.eos_token_id)
135
+ is_done = torch.any(token_is_eos, dim=1)
136
+ return is_done
137
+
138
+
139
+
140
+ ############ specially for generate() #################
141
+
142
+
143
+ class KwargsForCausalLM(FlashAttentionKwargs, LossKwargs): ...
144
+
145
+
146
+ @dataclass
147
+ class ModelOutputWithPastForSemiNAT(BaseModelOutputWithPast):
148
+
149
+ chunk_hidden_state: torch.FloatTensor = None
150
+ length_ground_truth: Optional[torch.FloatTensor] = None
151
+ length_logits: Optional[torch.FloatTensor] = None
152
+ position_embeddings: Optional[torch.FloatTensor] = None # ?
153
+ nar_hidden_state: torch.FloatTensor = None # ?
154
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
155
+ hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
156
+ attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
157
+
158
+
159
+
160
+
161
+ class TwoLayerMLP(nn.Module):
162
+ def __init__(self, hidden_size: int, dropout_rate: float = 0.1):
163
+ """
164
+ 初始化两层MLP,支持任意批处理维度
165
+
166
+ 参数:
167
+ hidden_size (int): 隐藏层维度
168
+ dropout_rate (float): dropout比率,默认0.1
169
+ """
170
+ super().__init__()
171
+
172
+ self.fc1 = nn.Linear(hidden_size, 4 * hidden_size) # 第一层将维度扩大4倍
173
+ self.fc2 = nn.Linear(4 * hidden_size, hidden_size) # 第二层将维度恢复
174
+ self.dropout = nn.Dropout(p=dropout_rate)
175
+ self.activation = nn.GELU() # 使用GELU激活函数
176
+
177
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
178
+ """
179
+ 前向传播,支持任意批处理维度
180
+
181
+ 参数:
182
+ x (torch.Tensor): 输入张量,形状为 (..., hidden_size),支持任意前置维度
183
+
184
+ 返回:
185
+ torch.Tensor: 输出张量,形状与输入相同
186
+ """
187
+ # 获取原始形状
188
+ original_shape = x.shape
189
+ hidden_size = original_shape[-1]
190
+
191
+ # 将输入重塑为2D: (batch_size, hidden_size),其中batch_size包含了所有前置维度
192
+ x_2d = x.view(-1, hidden_size)
193
+
194
+ # pdb.set_trace()
195
+ # 第一层:线性变换 -> 激活函数 -> dropout
196
+ x_2d = self.fc1(x_2d)
197
+ x_2d = self.activation(x_2d)
198
+ x_2d = self.dropout(x_2d)
199
+
200
+ # 第二层:线性变换
201
+ x_2d = self.fc2(x_2d)
202
+ # pdb.set_trace()
203
+ # 恢复原始形状
204
+ x = x_2d.view(*original_shape)
205
+ # pdb.set_trace()
206
+ return x
207
+
208
+
209
+
210
+
211
+
212
+
213
+
214
+
215
+
216
+
217
+
218
+
219
+
220
+
221
+
222
+
223
+
224
+ class Olmo2AttentionForSemiNAT(nn.Module):
225
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
226
+
227
+ def __init__(self, config: Olmo2Config, layer_idx: Optional[int] = None, is_causal: bool = True):
228
+ super().__init__()
229
+ self.config = config
230
+ self.layer_idx = layer_idx
231
+ self.head_dim = getattr(
232
+ config, "head_dim",
233
+ config.hidden_size // config.num_attention_heads)
234
+ self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
235
+ self.scaling = self.head_dim**-0.5
236
+ self.attention_dropout = config.attention_dropout
237
+ self.is_causal = is_causal
238
+
239
+ self.q_proj = nn.Linear(config.hidden_size,
240
+ config.num_attention_heads * self.head_dim,
241
+ bias=config.attention_bias)
242
+ self.k_proj = nn.Linear(config.hidden_size,
243
+ config.num_key_value_heads * self.head_dim,
244
+ bias=config.attention_bias)
245
+ self.v_proj = nn.Linear(config.hidden_size,
246
+ config.num_key_value_heads * self.head_dim,
247
+ bias=config.attention_bias)
248
+ self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim,
249
+ config.hidden_size,
250
+ bias=config.attention_bias)
251
+ self.q_norm = Olmo2RMSNorm(config.num_attention_heads * self.head_dim,
252
+ config.rms_norm_eps)
253
+ self.k_norm = Olmo2RMSNorm(config.num_key_value_heads * self.head_dim,
254
+ config.rms_norm_eps)
255
+
256
+ def forward(
257
+ self,
258
+ hidden_states: torch.Tensor,
259
+ position_embeddings: Tuple[torch.Tensor, torch.Tensor],
260
+ attention_mask: Optional[torch.Tensor],
261
+ past_key_value: Optional[Cache] = None,
262
+ cache_position: Optional[torch.LongTensor] = None,
263
+ **kwargs,
264
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor],
265
+ Optional[Tuple[torch.Tensor]]]:
266
+ input_shape = hidden_states.shape[:-1]
267
+ hidden_shape = (*input_shape, -1, self.head_dim)
268
+
269
+ query_states = self.q_norm(self.q_proj(hidden_states))
270
+ key_states = self.k_norm(self.k_proj(hidden_states))
271
+ value_states = self.v_proj(hidden_states)
272
+
273
+ query_states = query_states.view(hidden_shape).transpose(1, 2)
274
+ key_states = key_states.view(hidden_shape).transpose(1, 2)
275
+ value_states = value_states.view(hidden_shape).transpose(1, 2)
276
+
277
+
278
+
279
+ if position_embeddings is not None:
280
+ cos, sin = position_embeddings
281
+ query_states, key_states = apply_rotary_pos_emb(
282
+ query_states, key_states, cos, sin)
283
+
284
+ if past_key_value is not None:
285
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
286
+ cache_kwargs = {
287
+ "sin": sin,
288
+ "cos": cos,
289
+ "cache_position": cache_position
290
+ }
291
+ key_states, value_states = past_key_value.update(
292
+ key_states, value_states, self.layer_idx, cache_kwargs)
293
+
294
+ attention_interface: Callable = eager_attention_forward
295
+
296
+ # 默认改成eager
297
+ # if self.config._attn_implementation != "eager":
298
+ # if self.config._attn_implementation == "sdpa" and kwargs.get(
299
+ # "output_attentions", False):
300
+ # logger.warning_once(
301
+ # "`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to "
302
+ # 'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
303
+ # )
304
+ # else:
305
+ # attention_interface = ALL_ATTENTION_FUNCTIONS[
306
+ # self.config._attn_implementation]
307
+
308
+ # pdb.set_trace()
309
+ attn_output, attn_weights = attention_interface(
310
+ self,
311
+ query_states,
312
+ key_states,
313
+ value_states,
314
+ attention_mask,
315
+ dropout=0.0 if not self.training else self.attention_dropout,
316
+ scaling=self.scaling,
317
+ **kwargs,
318
+ )
319
+
320
+ attn_output = attn_output.reshape(*input_shape, -1).contiguous()
321
+ attn_output = self.o_proj(attn_output)
322
+ return attn_output, attn_weights
323
+
324
+
325
+
326
+ class Olmo2DecoderLayerForSemiNAT(nn.Module):
327
+
328
+ def __init__(
329
+ self,
330
+ config: Olmo2Config,
331
+ layer_idx: int,
332
+ is_causal: bool = True,
333
+ ):
334
+ super().__init__()
335
+ self.hidden_size = config.hidden_size
336
+ # pdb.set_trace()
337
+ self.self_attn = Olmo2AttentionForSemiNAT(config=config,
338
+ layer_idx=layer_idx,
339
+ is_causal=is_causal)
340
+ self.mlp = Olmo2MLP(config)
341
+ self.post_attention_layernorm = Olmo2RMSNorm(config.hidden_size,
342
+ eps=config.rms_norm_eps)
343
+ self.post_feedforward_layernorm = Olmo2RMSNorm(config.hidden_size,
344
+ eps=config.rms_norm_eps)
345
+
346
+ # pdb.set_trace()
347
+
348
+ def forward(
349
+ self,
350
+ hidden_states: torch.Tensor,
351
+ attention_mask: Optional[torch.Tensor] = None,
352
+ position_ids: Optional[torch.LongTensor] = None,
353
+ past_key_value: Optional[Cache] = None,
354
+ output_attentions: Optional[bool] = False,
355
+ use_cache: Optional[bool] = False,
356
+ cache_position: Optional[torch.LongTensor] = None,
357
+ position_embeddings: Optional[Tuple[torch.Tensor,
358
+ torch.Tensor]] = None,
359
+ **kwargs,
360
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor,
361
+ torch.FloatTensor]]]:
362
+ residual = hidden_states
363
+
364
+ # pdb.set_trace()
365
+ # Self Attention
366
+ hidden_states, self_attn_weights = self.self_attn(
367
+ hidden_states=hidden_states,
368
+ attention_mask=attention_mask,
369
+ position_ids=position_ids,
370
+ past_key_value=past_key_value,
371
+ output_attentions=output_attentions,
372
+ use_cache=use_cache,
373
+ cache_position=cache_position,
374
+ position_embeddings=position_embeddings,
375
+ **kwargs,
376
+ )
377
+ hidden_states = self.post_attention_layernorm(hidden_states)
378
+ hidden_states = residual + hidden_states
379
+
380
+ # Fully Connected
381
+ residual = hidden_states
382
+ hidden_states = self.mlp(hidden_states)
383
+ hidden_states = self.post_feedforward_layernorm(hidden_states)
384
+ hidden_states = residual + hidden_states
385
+
386
+ outputs = (hidden_states, )
387
+ if output_attentions:
388
+ outputs += (self_attn_weights, )
389
+
390
+ return outputs
391
+
392
+
393
+ class NATEncoderForSemiNAT(nn.Module):
394
+
395
+ def __init__(self, config: Olmo2Config, num_layer: int = 1):
396
+ super().__init__()
397
+ self.num_layer = num_layer
398
+ self.encoder_layers = nn.ModuleList([
399
+ Olmo2DecoderLayer(config, layer_idx)
400
+ for layer_idx in range(self.num_layer)
401
+ ])
402
+
403
+ def forward(
404
+ self,
405
+ hidden_states: torch.Tensor,
406
+ attention_mask: Optional[torch.Tensor] = None,
407
+ past_key_value: Optional[Cache] = None,
408
+ output_attentions: Optional[bool] = False,
409
+ use_cache: Optional[bool] = False,
410
+ cache_position: Optional[torch.LongTensor] = None,
411
+ position_embeddings: Optional[Tuple[torch.Tensor,
412
+ torch.Tensor]] = None,
413
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor,
414
+ torch.FloatTensor]]]:
415
+ # pdb.set_trace()
416
+ for layer in self.encoder_layers:
417
+ outputs = layer(hidden_states=hidden_states,
418
+ output_attentions=output_attentions,
419
+ position_embeddings=position_embeddings)
420
+ hidden_states = outputs[0]
421
+ # only the last layer attn_weights and present_key_value are stored
422
+ # mean pool the hidden states across sequence (chunk)
423
+ hidden_states = torch.mean(hidden_states, dim=1)
424
+ return hidden_states
425
+
426
+
427
+ class NATDecoderForSemiNAT(nn.Module):
428
+
429
+ def __init__(self, config: Olmo2Config, num_layer: int = 1):
430
+ super().__init__()
431
+ self.num_layer = num_layer
432
+ self.decoder_layers = nn.ModuleList([
433
+ Olmo2DecoderLayerForSemiNAT(config, layer_idx, False)
434
+ for layer_idx in range(self.num_layer)
435
+ ])
436
+
437
+ def forward(
438
+ self,
439
+ hidden_states: torch.Tensor,
440
+ attention_mask: Optional[torch.Tensor] = None,
441
+ past_key_value: Optional[Cache] = None,
442
+ output_attentions: Optional[bool] = False,
443
+ use_cache: Optional[bool] = False,
444
+ cache_position: Optional[torch.LongTensor] = None,
445
+ position_embeddings: Optional[Tuple[torch.Tensor,
446
+ torch.Tensor]] = None,
447
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor,
448
+ torch.FloatTensor]]]:
449
+
450
+ for layer in self.decoder_layers:
451
+ # pdb.set_trace()
452
+ outputs = layer(hidden_states=hidden_states,
453
+ attention_mask=attention_mask,
454
+ output_attentions=output_attentions,
455
+ position_embeddings=position_embeddings)
456
+ hidden_states = outputs[0]
457
+ return hidden_states
458
+
459
+
460
+ class Olmo2ModelForSemiNAT(Olmo2Model):
461
+
462
+ def __init__(self, config):
463
+ super().__init__(config)
464
+ self.layers = nn.ModuleList([
465
+ Olmo2DecoderLayer(config, layer_idx)
466
+ for layer_idx in range(config.num_hidden_layers)
467
+ ])
468
+
469
+ self.decoder = NATDecoderForSemiNAT(config, 1)
470
+ self.encoder = NATEncoderForSemiNAT(config, 1)
471
+
472
+
473
+ # pdb.set_trace()
474
+ self.chunk_size_limit = config.chunk_size_limit
475
+ self.norm = Olmo2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
476
+ self.rotary_emb = Olmo2RotaryEmbedding(config=config)
477
+ self.pos_encoder = AbsolutePositionalEncoding(config.hidden_size)
478
+ self.gradient_checkpointing = False
479
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size,
480
+ self.padding_idx)
481
+
482
+
483
+ self.length_predictor = nn.Linear(config.hidden_size,
484
+ self.chunk_size_limit)
485
+
486
+ self.linear_projection = TwoLayerMLP(config.hidden_size)
487
+
488
+
489
+ def forward(
490
+ self,
491
+ input_ids: torch.LongTensor = None,
492
+ attention_mask: Optional[torch.Tensor] = None,
493
+ position_ids: Optional[torch.LongTensor] = None,
494
+ slice_pos: torch.Tensor = None,
495
+ past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
496
+ inputs_embeds: Optional[torch.FloatTensor] = None,
497
+ use_cache: Optional[bool] = None,
498
+ output_attentions: Optional[bool] = None,
499
+ output_hidden_states: Optional[bool] = None,
500
+ return_dict: Optional[bool] = None,
501
+ cache_position: Optional[torch.LongTensor] = None,
502
+ inference: Optional[bool] = None,
503
+ padding: Optional[torch.Tensor] = None,
504
+ **flash_attn_kwargs: Unpack[FlashAttentionKwargs],
505
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
506
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
507
+ output_hidden_states = (output_hidden_states
508
+ if output_hidden_states is not None else
509
+ self.config.output_hidden_states)
510
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
511
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
512
+
513
+ if (input_ids is None) ^ (inputs_embeds is not None):
514
+ raise ValueError(
515
+ "You must specify exactly one of input_ids or inputs_embeds")
516
+
517
+ if self.gradient_checkpointing and self.training and use_cache:
518
+ logger.warning_once(
519
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`."
520
+ )
521
+ use_cache = False
522
+
523
+ if inputs_embeds is None:
524
+ inputs_embeds = self.embed_tokens(input_ids)
525
+
526
+ if use_cache and past_key_values is None:
527
+ past_key_values = DynamicCache()
528
+
529
+ if cache_position is None:
530
+ past_seen_tokens = past_key_values.get_seq_length(
531
+ ) if past_key_values is not None else 0
532
+ cache_position = torch.arange(past_seen_tokens,
533
+ past_seen_tokens +
534
+ inputs_embeds.shape[1],
535
+ device=inputs_embeds.device)
536
+
537
+ if position_ids is None:
538
+ position_ids = cache_position.unsqueeze(0)
539
+
540
+ if inference is not None:
541
+ position_ids = cache_position.unsqueeze(0)
542
+
543
+ position_embeddings = self.rotary_emb(inputs_embeds, position_ids)
544
+
545
+ all_hidden_states = () if output_hidden_states else None
546
+ all_self_attns = () if output_attentions else None
547
+ next_decoder_cache = None
548
+
549
+ # pdb.set_trace()
550
+
551
+ # initialize chunk inputs as embedding of [pad]
552
+ pad_token_id = padding
553
+ batch_size, seq_len, hidden_size = inputs_embeds.shape
554
+ pad_embedding = self.embed_tokens(
555
+ torch.tensor([pad_token_id]).to(inputs_embeds.device)) # 1, 2048
556
+ # pad_chunk_emb = self.encoder(
557
+ # pad_embedding.unsqueeze(0),
558
+ # attention_mask=None,
559
+ # position_embeddings=position_embeddings[:, :1, :],
560
+ # ) # 1 * 1 * hidden_size
561
+ chunk_inputs_embeds = pad_embedding.expand(
562
+ batch_size, seq_len, hidden_size).clone().to(
563
+ inputs_embeds.device) # bs * length * hidden_size 预填充
564
+
565
+ # 遍历 batch 和序列
566
+ length_ground_truth = []
567
+ chunk_attention_mask = []
568
+ chunk_labels = []
569
+ # max_chunk_num = 0
570
+ accumu_num = 0
571
+ slice_nums = []
572
+
573
+ # pdb.set_trace()
574
+ for b in range(batch_size):
575
+ slice_num = 0
576
+ start_position = 0
577
+ slice_length = []
578
+ for i in range(seq_len):
579
+ cut = slice_pos[b, i].item() # 获取切分点
580
+ if cut == -1: # 如果切分点为 -1,表示不切分
581
+ pass
582
+ else:
583
+ cut += 1 # +1表示在后面切一刀
584
+ # try:
585
+ # print(f"start_position: {start_position}, cut: {cut}")
586
+ chunk_inputs_embeds[b, i] = self.encoder(
587
+ inputs_embeds[b, start_position:cut].unsqueeze(0),
588
+ position_embeddings=tuple(
589
+ tensor[0, 0:cut -
590
+ start_position, :].unsqueeze(0)
591
+ for tensor in position_embeddings))
592
+ # except:
593
+ # pdb.set_trace()
594
+ slice_num += 1
595
+ slice_length.append(cut - start_position)
596
+ if cut - start_position > 10 or cut - start_position < 0:
597
+ pdb.set_trace()
598
+ start_position = cut # 更新切分起点
599
+ slice_nums.append(slice_num) # 每个样本的 chunk 数量
600
+ # max_chunk_num = max(max_chunk_num, slice_num) # 不用这个,直接用累计的chunk num
601
+ accumu_num += slice_num
602
+ chunk_attention_mask.append(
603
+ torch.tensor([1] * slice_num + [0] *
604
+ (seq_len - slice_num)).unsqueeze(
605
+ 0)) # 1表示切分,0表示不切分
606
+ length_ground_truth.append(
607
+ torch.tensor(slice_length + [-100] *
608
+ (seq_len - slice_num)).unsqueeze(0)) # -100表示不切分
609
+ accumu_num -= batch_size
610
+ # pdb.set_trace()
611
+
612
+
613
+ chunk_attention_mask = torch.cat(chunk_attention_mask, dim=0).to(
614
+ inputs_embeds.device) # torch.Size([1, 256]) bs * length
615
+
616
+ length_ground_truth = torch.cat(length_ground_truth,
617
+ dim=0).to(inputs_embeds.device)
618
+
619
+ # only slice the first max_chunk_num chunks for each sample
620
+ # chunk_inputs_embeds = chunk_inputs_embeds[:, :max_chunk_num, :]
621
+ # chunk_attention_mask = chunk_attention_mask[:, :max_chunk_num]
622
+ # length_ground_truth = length_ground_truth[:max_chunk_num]
623
+
624
+ chunk_cache_position = cache_position
625
+ chunk_position_embeddings = self.rotary_emb(
626
+ chunk_inputs_embeds, position_ids
627
+ ) # tuple, 第一个元素为 torch.Size([1, 256, 128]),最后一个维度是 hidden_size / head , cos 和 sin 各 64 维
628
+
629
+ hidden_states = chunk_inputs_embeds # bs * max_chunk_num * hidden_size
630
+
631
+ # pdb.set_trace()
632
+
633
+ if inference is not None:
634
+ # inference 把填充去掉
635
+ mask_bool = chunk_attention_mask.bool()
636
+ chunk_inputs_embeds = chunk_inputs_embeds[mask_bool.unsqueeze(
637
+ -1).expand_as(chunk_inputs_embeds)].view(
638
+ chunk_inputs_embeds.size(0), -1,
639
+ chunk_inputs_embeds.size(2))
640
+ chunk_attention_mask = chunk_attention_mask[mask_bool].view(
641
+ chunk_attention_mask.size(0), -1)
642
+
643
+ # pdb.set_trace()
644
+ chunk_inputs_embeds = chunk_inputs_embeds[:,
645
+ chunk_cache_position, :]
646
+ chunk_attention_mask = chunk_attention_mask[:,
647
+ chunk_cache_position]
648
+
649
+ hidden_states = chunk_inputs_embeds
650
+ # pdb.set_trace()
651
+
652
+
653
+ causal_mask = self._update_causal_mask(chunk_attention_mask,
654
+ chunk_inputs_embeds,
655
+ chunk_cache_position,
656
+ past_key_values,
657
+ output_attentions)
658
+
659
+ # pdb.set_trace()
660
+ for decoder_layer in self.layers:
661
+ if output_hidden_states:
662
+ all_hidden_states += (hidden_states, )
663
+ if self.gradient_checkpointing and self.training:
664
+ layer_outputs = self._gradient_checkpointing_func(
665
+ decoder_layer.__call__,
666
+ hidden_states,
667
+ causal_mask,
668
+ position_ids,
669
+ past_key_values,
670
+ output_attentions,
671
+ use_cache,
672
+ cache_position,
673
+ chunk_position_embeddings,
674
+ )
675
+ else:
676
+ layer_outputs = decoder_layer(
677
+ hidden_states,
678
+ attention_mask=causal_mask,
679
+ position_ids=position_ids,
680
+ past_key_value=past_key_values,
681
+ output_attentions=output_attentions,
682
+ use_cache=use_cache,
683
+ cache_position=cache_position,
684
+ position_embeddings=chunk_position_embeddings,
685
+ **flash_attn_kwargs,
686
+ )
687
+
688
+ hidden_states = layer_outputs[0]
689
+
690
+ if output_attentions:
691
+ all_self_attns += (layer_outputs[1], )
692
+
693
+ # pdb.set_trace()
694
+ # add hidden states from the last decoder layer
695
+ if output_hidden_states:
696
+ all_hidden_states += (hidden_states, )
697
+
698
+ hidden_states = self.norm(
699
+ hidden_states) # bs * max_chunk_num * hidden_size 所有chunk的hidden
700
+
701
+ # pdb.set_trace()
702
+
703
+ # 算长度预测loss
704
+ self.length_predictor = self.length_predictor.to(
705
+ hidden_states.device).to(hidden_states.dtype) #这里强行变成了bf16,因为训练是这个
706
+ length_logits = self.length_predictor(
707
+ hidden_states.to(
708
+ hidden_states.device)) # bs * length * chunk_size_limit
709
+
710
+ # pdb.set_trace()
711
+
712
+ next_cache = next_decoder_cache if use_cache else None # DynamicCache()
713
+ # if return_legacy_cache:
714
+ # next_cache = next_cache.to_legacy_cache()
715
+
716
+ nar_hidden_states = None
717
+ if inference is None:
718
+ # NAR decoder
719
+ bs, length, hidden_size = hidden_states.size()
720
+ # assert length == max_chunk_num # TODO: remove this
721
+
722
+ # shape: (bs * max_chunk_num) * chunk_size_limit * hidden_size
723
+ try:
724
+ nat_input_embeddings = torch.zeros(
725
+ accumu_num, self.chunk_size_limit,
726
+ hidden_size).to(hidden_states.device).to(hidden_states.dtype)
727
+ except:
728
+ pdb.set_trace()
729
+ nat_attention_mask = torch.zeros(
730
+ accumu_num, self.chunk_size_limit).to(hidden_states.device).to(
731
+ hidden_states.dtype)
732
+ tot_chunk_num = 0
733
+
734
+ # pdb.set_trace()
735
+ for b in range(bs):
736
+ for i in range(slice_nums[b]):
737
+ # slice_nums[b] 是每个样本的 chunk 数量
738
+ # length_ground_truth[b] 是每个样本的真实长度
739
+ # copy length_ground_truth 份的 hidden_states 到 nat_input_embeddings
740
+
741
+ if length_ground_truth[b, i + 1] != -100:
742
+ # pdb.set_trace()
743
+ nat_input_embeddings[
744
+ tot_chunk_num, :length_ground_truth[
745
+ b, i +
746
+ 1], :] = hidden_states[b, i:i + 1, :].expand(
747
+ length_ground_truth[b, i + 1], hidden_size)
748
+ nat_attention_mask[tot_chunk_num, :length_ground_truth[
749
+ b, i + 1]] = torch.tensor(
750
+ [1] * length_ground_truth[b, i + 1])
751
+ tot_chunk_num += 1
752
+ # pdb.set_trace()
753
+ else:
754
+ break
755
+
756
+ # pdb.set_trace()
757
+ nar_chunk_position = torch.arange(
758
+ 0, self.chunk_size_limit).unsqueeze(0).repeat(
759
+ accumu_num,
760
+ 1).to(hidden_states.device) # bs * max_chunk_num
761
+
762
+ nar_position_embeddings = self.rotary_emb(nat_attention_mask,
763
+ nar_chunk_position)
764
+
765
+
766
+ nat_input_embeddings = self.pos_encoder(nat_input_embeddings) # 加上绝对位置编码
767
+
768
+
769
+ self.decoder = self.decoder.to(dtype=nat_input_embeddings.dtype)
770
+
771
+
772
+ # 处理attention
773
+ mask_nat_attention_mask = self.nat_prepare_4d_full_attention_mask_without_causal(
774
+ attention_mask=nat_attention_mask,
775
+ dtype=nat_attention_mask.dtype,
776
+ device=nat_attention_mask.device)
777
+
778
+
779
+ # pdb.set_trace()
780
+ nar_hidden_states = self.decoder(
781
+ nat_input_embeddings,
782
+ attention_mask=mask_nat_attention_mask,
783
+ # attention_mask=None,
784
+ # position_embeddings=nar_position_embeddings,
785
+ position_embeddings=None, #使用绝对位置,不传相对位置
786
+ output_attentions=output_attentions,
787
+ use_cache=use_cache,
788
+ cache_position=None,
789
+ )
790
+
791
+ nar_hidden_states = self.norm(
792
+ nar_hidden_states) # bs * max_chunk_num * hidden_size
793
+
794
+ # pdb.set_trace()
795
+
796
+ return ModelOutputWithPastForSemiNAT(
797
+ chunk_hidden_state=hidden_states,
798
+ length_ground_truth=length_ground_truth,
799
+ length_logits=length_logits,
800
+ position_embeddings=position_embeddings,
801
+ nar_hidden_state=nar_hidden_states,
802
+ past_key_values=next_cache,
803
+ hidden_states=all_hidden_states,
804
+ attentions=all_self_attns,
805
+ )
806
+ # @staticmethod
807
+ # def nat_prepare_4d_full_attention_mask_without_causal(
808
+ # self,
809
+ # attention_mask: torch.Tensor,
810
+ # dtype: torch.dtype,
811
+ # device: torch.device,
812
+ # ) -> torch.Tensor:
813
+ # """
814
+ # 构造一个非 causal 的 full attention mask,仅遮挡 padding token。
815
+
816
+ # Args:
817
+ # attention_mask (torch.Tensor): (batch_size, seq_len) 中 1 表示有效 token,0 表示 padding。
818
+ # dtype (torch.dtype): 生成的 mask 的数据类型(通常为 torch.float32/bfloat16)。
819
+ # device (torch.device): mask 所在设备。
820
+
821
+ # Returns:
822
+ # torch.Tensor: shape = (batch_size, 1, seq_len, seq_len),非 padding token 两两可见,padding 被遮挡。
823
+ # """
824
+ # if attention_mask.dim() != 2:
825
+ # raise ValueError("Expected 2D attention_mask of shape (batch_size, seq_len)")
826
+
827
+ # batch_size, seq_len = attention_mask.shape
828
+ # attention_mask = attention_mask.to(dtype=torch.float32) # 强制 float32 再做广播逻辑
829
+ # attention_mask = attention_mask.to(device)
830
+
831
+ # # outer product: only keep positions where both query and key are valid (1 * 1 = 1)
832
+ # visible_mask = attention_mask[:, None, :, None] * attention_mask[:, None, None, :] # (bs, 1, seq_len, seq_len)
833
+
834
+ # # 转为 additive mask:0 -> 0.0, 1 -> -inf(被遮住的位置是 -inf)
835
+ # min_dtype = torch.finfo(dtype).min
836
+ # full_attention_mask = (1.0 - visible_mask) * min_dtype # 有效区域是 0.0,其他是 -inf
837
+
838
+ # return full_attention_mask.to(dtype=dtype)
839
+
840
+
841
+ # def nat_prepare_4d_full_attention_mask_no_masking(
842
+ # self,
843
+ # attention_mask: torch.Tensor, # (bs, L),此处不会被使用
844
+ # dtype: torch.dtype, # torch.float32/bfloat16
845
+ # device: torch.device,
846
+ # mask_val: float = -1e4, # 不会被使用
847
+ # ) -> torch.Tensor:
848
+ # """
849
+ # 构造完全互看的 attention mask,包括 padding token。
850
+ # - 所有 query 可以看所有 key;
851
+ # - additive mask 全为 0(无任何遮挡);
852
+ # 返回 shape = (bs, 1, L, L)
853
+ # """
854
+ # if attention_mask.dim() != 2:
855
+ # raise ValueError(
856
+ # "Expected 2-D attention_mask with shape (batch, seq_len)")
857
+
858
+ # bs, L = attention_mask.shape
859
+ # additive_mask = torch.zeros((bs, 1, L, L), dtype=dtype,
860
+ # device=device) # 全 0,代表全可见
861
+
862
+ # return additive_mask
863
+
864
+ def nat_prepare_4d_full_attention_mask_without_causal(
865
+ self,
866
+ attention_mask: torch.Tensor, # (bs, L) 1=real, 0=pad
867
+ dtype: torch.dtype, # torch.float32/bfloat16
868
+ device: torch.device,
869
+ mask_val: float = -1e4, # additive mask的遮挡值
870
+ ) -> torch.Tensor:
871
+ """
872
+ - 对于 query 为有效 token (attention_mask==1) 的行:
873
+ 仅允许观看 key 也是有效 token 的列 -> 完全互看
874
+ - 对于 query 为 padding 的行:
875
+ 采用 causal 下三角 (j <= i) -> 避免整行 -inf
876
+ 返回 shape = (bs, 1, L, L) 的 additive mask
877
+ """
878
+ if attention_mask.dim() != 2:
879
+ raise ValueError(
880
+ "Expected 2-D attention_mask with shape (batch, seq_len)"
881
+ )
882
+
883
+ bs, L = attention_mask.shape
884
+ attn_mask_f = attention_mask.to(device=device, dtype=torch.float32) # 方便广播
885
+
886
+ # ---------- ① 有效 token 间的互看 ----------
887
+ # valid2valid[b,i,j] = 1 ⇔ query_i 与 key_j 均为 real
888
+ valid2valid = attn_mask_f[:, :, None] * attn_mask_f[:, None, :] # (bs, L, L)
889
+
890
+ # ---------- ② padding 行的因果下三角 ----------
891
+ # lower_tri[i,j] = 1 ⇔ j ≤ i
892
+ lower_tri = torch.tril(torch.ones(L, L, device=device))
893
+ # query_is_pad: (bs, L, 1) 1=pad
894
+ query_is_pad = (1.0 - attn_mask_f)[:, :, None]
895
+ causal_part = query_is_pad * lower_tri # (bs, L, L)
896
+
897
+ # ---------- ③ 合并两部分 ----------
898
+ visible = torch.clamp(valid2valid + causal_part, 0.0, 1.0) # (bs, L, L)
899
+
900
+ # ---------- ④ 变 additive mask ----------
901
+ additive_mask = (1.0 - visible) * mask_val # 0->0, 1->mask_val
902
+ additive_mask = additive_mask[:, None, :, :] # (bs,1,L,L)
903
+
904
+ return additive_mask.to(dtype=dtype)
905
+
906
+
907
+ class Olmo2ForCausalLMForSemiNAT(Olmo2ForCausalLM):
908
+
909
+ def __init__(self, config, *args, **kwargs):
910
+ super().__init__(config, *args, **kwargs)
911
+ self.pos_encoder = AbsolutePositionalEncoding(config.hidden_size)
912
+ self.config = config
913
+ self.padding_idx = config.pad_token_id
914
+ self.vocab_size = config.vocab_size
915
+
916
+ self.chunk_size_limit = config.chunk_size_limit
917
+ self.model = Olmo2ModelForSemiNAT(config)
918
+ self.vocab_size = config.vocab_size
919
+ self.lm_head = nn.Linear(config.hidden_size,
920
+ config.vocab_size,
921
+ bias=False)
922
+
923
+ # Initialize weights and apply final processing
924
+ self.post_init()
925
+
926
+ def forward(
927
+ self,
928
+ input_ids: torch.LongTensor = None,
929
+ attention_mask: Optional[torch.Tensor] = None,
930
+ position_ids: Optional[torch.LongTensor] = None,
931
+ slice_pos: Optional[torch.Tensor] = None,
932
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
933
+ inputs_embeds: Optional[torch.FloatTensor] = None,
934
+ labels: Optional[torch.LongTensor] = None,
935
+ use_cache: Optional[bool] = None,
936
+ output_attentions: Optional[bool] = None,
937
+ output_hidden_states: Optional[bool] = None,
938
+ return_dict: Optional[bool] = None,
939
+ cache_position: Optional[torch.LongTensor] = None,
940
+ logits_to_keep: Union[int, torch.Tensor] = 0,
941
+ # padding: Optional[torch.Tensor] = None,
942
+ **kwargs: Unpack[KwargsForCausalLM],
943
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
944
+
945
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
946
+ output_hidden_states = (output_hidden_states
947
+ if output_hidden_states is not None else
948
+ self.config.output_hidden_states)
949
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
950
+
951
+ # pdb.set_trace()
952
+
953
+ if labels is not None:
954
+ outputs = self.model(
955
+ input_ids=input_ids, # bs * length
956
+ attention_mask=attention_mask, # bs * length
957
+ position_ids=position_ids,
958
+ slice_pos=slice_pos,
959
+ past_key_values=past_key_values,
960
+ inputs_embeds=inputs_embeds,
961
+ use_cache=use_cache,
962
+ output_attentions=output_attentions,
963
+ output_hidden_states=output_hidden_states,
964
+ return_dict=return_dict,
965
+ cache_position=cache_position,
966
+ padding=self.padding_idx,
967
+ **kwargs,
968
+ )
969
+ else:
970
+ outputs = self.model(
971
+ input_ids=input_ids, # bs * length
972
+ attention_mask=attention_mask, # bs * length
973
+ position_ids=position_ids,
974
+ slice_pos=slice_pos,
975
+ past_key_values=past_key_values,
976
+ inputs_embeds=inputs_embeds,
977
+ use_cache=use_cache,
978
+ output_attentions=output_attentions,
979
+ output_hidden_states=output_hidden_states,
980
+ return_dict=return_dict,
981
+ cache_position=cache_position,
982
+ padding=self.padding_idx,
983
+ inference=True,
984
+ )
985
+
986
+ chunk_hidden_states = outputs.chunk_hidden_state
987
+ bs, length, hidden_size = chunk_hidden_states.size()
988
+
989
+ ############################# loss 计算,分两部分 #############################
990
+ loss = None
991
+ loss1 = None
992
+ loss2 = None
993
+ ############################# 首先, 接上mlp,预测长度的loss,维度是10#############################
994
+
995
+ if labels is not None:
996
+
997
+ length_ground_truth = outputs.length_ground_truth
998
+ length_logits = outputs.length_logits
999
+
1000
+ new_length_ground_truth = torch.where(
1001
+ length_ground_truth != -100, # 条件:不等于 -100
1002
+ length_ground_truth - 1, # 如果条件为真,执行 labels - 1
1003
+ length_ground_truth # 否则保持原值
1004
+ )
1005
+
1006
+ # pdb.set_trace()
1007
+
1008
+ shift_length_logits = length_logits[:, :-1, :]
1009
+ shift_new_length_ground_truth = new_length_ground_truth[:, 1:]
1010
+
1011
+ logits_flat = shift_length_logits.reshape(
1012
+ -1,
1013
+ self.chunk_size_limit) # 形状变为 [bs * length, chunk_size_limit]
1014
+ labels_flat = shift_new_length_ground_truth.reshape(
1015
+ -1) # [bs * length]
1016
+
1017
+ # softmax logits to get probability
1018
+ logits_flat = torch.nn.functional.softmax(logits_flat, dim=-1)
1019
+
1020
+ # 修改 loss 为 MSE: 首先根据 logits 加权得到预测长度(注意不是 argmax),之后与 label 计算 MSE
1021
+
1022
+ # pdb.set_trace()
1023
+ # 计算预测长度
1024
+ predicted_lengths = torch.sum(
1025
+ logits_flat * torch.arange(self.chunk_size_limit).to(
1026
+ chunk_hidden_states.device).to(chunk_hidden_states.dtype),
1027
+ dim=1)
1028
+ # 计算预测长度与真实长度之间的均方误差
1029
+
1030
+ loss1 = torch.mean((predicted_lengths[labels_flat != -100] -
1031
+ labels_flat[labels_flat != -100].float())**2)
1032
+
1033
+ # pdb.set_trace()
1034
+
1035
+ nar_hidden_state = outputs.nar_hidden_state
1036
+
1037
+ ############################# 其次,用chunk的hidden recover所有token,跟gt计算loss #############################
1038
+
1039
+ nar_labels = torch.full(
1040
+ (nar_hidden_state.size(0), nar_hidden_state.size(1)),
1041
+ -100).to(nar_hidden_state.device) # bs * length
1042
+
1043
+ nar_labels = self.update_nar_labels(nar_labels, labels, slice_pos,
1044
+ length_ground_truth, input_ids,
1045
+ self.chunk_size_limit)
1046
+
1047
+ # Only compute necessary logits, and do not upcast them to float if we are not computing the loss
1048
+ slice_indices = slice(-logits_to_keep, None) if isinstance(
1049
+ logits_to_keep, int) else logits_to_keep
1050
+ logits = self.lm_head(
1051
+ nar_hidden_state[:, slice_indices, :]) # 1* seq_len * 50304
1052
+ # logits = logits.float()
1053
+ # pdb.set_trace()
1054
+ # if labels is not None:
1055
+
1056
+
1057
+ loss2 = self.loss_function_seminat(
1058
+ logits,
1059
+ nar_labels,
1060
+ self.vocab_size,
1061
+ )
1062
+ # pdb.set_trace()
1063
+
1064
+ else: # for inference
1065
+ softmaxed = torch.softmax(outputs.length_logits[:, -1, :], dim=-1)
1066
+ length = torch.argmax(softmaxed, dim=-1).item() + 1
1067
+ # pdb.set_trace()
1068
+
1069
+ nat_input_embeddings = torch.zeros(
1070
+ 1, self.chunk_size_limit, hidden_size).to(input_ids.device).to(
1071
+ outputs.chunk_hidden_state.dtype)
1072
+ nat_attention_mask = torch.zeros(1, self.chunk_size_limit).to(
1073
+ input_ids.device).to(outputs.chunk_hidden_state.dtype)
1074
+
1075
+
1076
+ # pdb.set_trace()
1077
+
1078
+ nat_input_embeddings[:, :
1079
+ length, :] = outputs.chunk_hidden_state[:, -1, :].expand(
1080
+ length, -1).to(input_ids.device).to(
1081
+ outputs.chunk_hidden_state.dtype)
1082
+
1083
+ nat_attention_mask[:, :length] = torch.tensor([1] * length).to(
1084
+ input_ids.device).to(outputs.chunk_hidden_state.dtype)
1085
+
1086
+ nar_chunk_position = torch.arange(
1087
+ 0, self.chunk_size_limit).unsqueeze(0).to(input_ids.device).to(
1088
+ outputs.chunk_hidden_state.dtype) # bs * max_chunk_num
1089
+
1090
+ # nar_position_embeddings = self.pos_encoder(nat_attention_mask,
1091
+ # nar_chunk_position)
1092
+
1093
+ # pdb.set_trace()
1094
+ nat_input_embeddings = self.pos_encoder(nat_input_embeddings) # 加上绝对位置编码
1095
+
1096
+ # pdb.set_trace()
1097
+ nar_hidden_states = self.model.decoder(
1098
+ nat_input_embeddings,
1099
+ # attention_mask=nat_attention_mask,
1100
+ attention_mask=None,
1101
+ # position_embeddings=nar_position_embeddings,
1102
+ position_embeddings=None,
1103
+ output_attentions=output_attentions,
1104
+ use_cache=False,
1105
+ cache_position=None,
1106
+ )
1107
+
1108
+ nar_hidden_states = self.model.norm(nar_hidden_states)
1109
+ # slice_indices = slice(-logits_to_keep, None) if isinstance(
1110
+ # logits_to_keep, int) else logits_to_keep
1111
+ logits = self.lm_head(
1112
+ nar_hidden_states[:, :, :])
1113
+ # pdb.set_trace()
1114
+ return CausalLMOutputWithPast(
1115
+ loss=(loss1, loss2),
1116
+ logits=logits,
1117
+ past_key_values=outputs.past_key_values,
1118
+ hidden_states=outputs.hidden_states,
1119
+ attentions=outputs.attentions,
1120
+ )
1121
+
1122
+ ############################# loss 计算,分两部分 #############################
1123
+
1124
+ # if not return_dict:
1125
+ # output = (logits, ) + outputs[1:]
1126
+ # if output_router_logits:
1127
+ # output = (aux_loss, ) + output
1128
+ # return (loss, ) + output if loss is not None else output
1129
+ # pdb.set_trace()
1130
+ return CausalLMOutputWithPast(
1131
+ loss=(loss1, loss2),
1132
+ logits=logits,
1133
+ past_key_values=outputs.past_key_values,
1134
+ hidden_states=outputs.hidden_states,
1135
+ attentions=outputs.attentions,
1136
+ )
1137
+
1138
+
1139
+
1140
+
1141
+
1142
+
1143
+ def update_nar_labels(self, nar_labels, labels, slice_pos,
1144
+ length_ground_truth, input_ids, chunk_size_limit):
1145
+ bs, length = input_ids.size()
1146
+ chunk = 0
1147
+ for b in range(bs):
1148
+ last_cut = slice_pos[b][0] #第一次切分位置
1149
+ for i in range(1, length):
1150
+ if slice_pos[b, i] != -1:
1151
+ # pdb.set_trace()
1152
+ try:
1153
+ nar_labels[chunk, :length_ground_truth[b, i]] = labels[
1154
+ b, last_cut + 1:slice_pos[b, i] + 1]
1155
+ except:
1156
+ pdb.set_trace()
1157
+ last_cut = slice_pos[b, i]
1158
+ chunk += 1
1159
+ else:
1160
+ break
1161
+ # pdb.set_trace()
1162
+ return nar_labels
1163
+
1164
+ def fixed_cross_entropy(self,
1165
+ source,
1166
+ target,
1167
+ num_items_in_batch: int = None,
1168
+ ignore_index: int = -100,
1169
+ **kwargs):
1170
+ reduction = "sum" if num_items_in_batch is not None else "mean"
1171
+ loss = F.cross_entropy(source,
1172
+ target,
1173
+ ignore_index=ignore_index,
1174
+ reduction=reduction)
1175
+ if torch.isnan(loss):
1176
+ # print(f"Step {global_step}: loss is NaN, entering pdb …")
1177
+ pdb.set_trace()
1178
+ # pdb.set_trace()
1179
+ if reduction == "sum":
1180
+ loss = loss / num_items_in_batch
1181
+ return loss
1182
+
1183
+ def loss_function_seminat(self,
1184
+ logits,
1185
+ labels,
1186
+ vocab_size: int,
1187
+ num_items_in_batch: int = None,
1188
+ ignore_index: int = -100,
1189
+ **kwargs):
1190
+ # logits: (B, L, V)
1191
+ # labels: (B, L)
1192
+
1193
+
1194
+ logits = logits.float()
1195
+ labels = labels.to(logits.device)
1196
+
1197
+ # Flatten the tokens (无 shift)
1198
+ logits = logits.view(-1, vocab_size) # (B*L, V)
1199
+ labels = labels.view(-1) # (B*L)
1200
+
1201
+ # Ensure device alignment
1202
+ labels = labels.to(logits.device)
1203
+
1204
+ # Compute loss
1205
+ loss = self.fixed_cross_entropy(logits, labels, num_items_in_batch,
1206
+ ignore_index, **kwargs)
1207
+ return loss
1208
+
1209
+ def generate(
1210
+ self,
1211
+ inputs: Optional[torch.Tensor] = None,
1212
+ generation_config: Optional[GenerationConfig] = None,
1213
+ logits_processor: Optional[LogitsProcessorList] = None,
1214
+ stopping_criteria: Optional[StoppingCriteriaList] = None,
1215
+ prefix_allowed_tokens_fn: Optional[Callable[[int, torch.Tensor],
1216
+ List[int]]] = None,
1217
+ synced_gpus: Optional[bool] = None,
1218
+ assistant_model: Optional["PreTrainedModel"] = None,
1219
+ streamer: Optional["BaseStreamer"] = None,
1220
+ negative_prompt_ids: Optional[torch.Tensor] = None,
1221
+ negative_prompt_attention_mask: Optional[torch.Tensor] = None,
1222
+ prefilling_length: int = 0,
1223
+ **kwargs,
1224
+ ) -> Union[GenerateOutput, torch.LongTensor]:
1225
+
1226
+ # 1. Handle `generation_config` and kwargs that might update it, and validate the `.generate()` call
1227
+ self._validate_model_class()
1228
+ tokenizer = kwargs.pop(
1229
+ "tokenizer",
1230
+ None) # Pull this out first, we only use it for stopping criteria
1231
+ assistant_tokenizer = kwargs.pop(
1232
+ "assistant_tokenizer", None) # only used for assisted generation
1233
+
1234
+ generation_config, model_kwargs = self._prepare_generation_config(
1235
+ generation_config, **kwargs)
1236
+
1237
+ # GenerationConfig {
1238
+ # "eos_token_id": 50279,
1239
+ # "max_length": 2048,
1240
+ # "pad_token_id": 1
1241
+ # }
1242
+
1243
+ self._validate_model_kwargs(model_kwargs.copy())
1244
+ self._validate_assistant(assistant_model, tokenizer,
1245
+ assistant_tokenizer)
1246
+
1247
+ # 2. Set generation parameters if not already defined
1248
+ # 判断是否在多GPU环境下同步生成(如DeepSpeed ZeRO-3或FSDP)
1249
+ if synced_gpus is None:
1250
+ synced_gpus = (
1251
+ is_deepspeed_zero3_enabled()
1252
+ or is_fsdp_managed_module(self)) and dist.get_world_size() > 1
1253
+
1254
+ # 初始化logits处理器和停止条件
1255
+ logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList(
1256
+ ) # 定义对模型输出logits的修改规则(如禁止重复词、强制特定token等)。
1257
+ stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList(
1258
+ ) # 定义生成停止条件(如达到最大长度、检测到终止符等)。
1259
+
1260
+ accepts_attention_mask = "attention_mask" in set(
1261
+ inspect.signature(self.forward).parameters.keys()) # True
1262
+ requires_attention_mask = "encoder_outputs" not in model_kwargs # True
1263
+ kwargs_has_attention_mask = model_kwargs.get("attention_mask",
1264
+ None) is not None # False
1265
+
1266
+ # pdb.set_trace()
1267
+
1268
+ # 3. Define model inputs
1269
+ inputs_tensor, model_input_name, model_kwargs = self._prepare_model_inputs(
1270
+ inputs, generation_config.bos_token_id, model_kwargs)
1271
+ batch_size = inputs_tensor.shape[0]
1272
+
1273
+ # inputs_tensor bs * input_length; model_input_name:"input_ids";
1274
+
1275
+ device = inputs_tensor.device
1276
+ self._prepare_special_tokens(generation_config,
1277
+ kwargs_has_attention_mask,
1278
+ device=device)
1279
+
1280
+ # decoder-only models must use left-padding for batched generation.
1281
+ # batch generation用的
1282
+ if not self.config.is_encoder_decoder and not is_torchdynamo_compiling(
1283
+ ):
1284
+ # If `input_ids` was given, check if the last id in any sequence is `pad_token_id`
1285
+ # Note: If using, `inputs_embeds` this check does not work, because we want to be more hands-off.
1286
+ if (generation_config._pad_token_tensor is not None
1287
+ and batch_size > 1 and len(inputs_tensor.shape) == 2
1288
+ and torch.sum(inputs_tensor[:, -1] ==
1289
+ generation_config._pad_token_tensor) > 0):
1290
+ logger.warning(
1291
+ "A decoder-only architecture is being used, but right-padding was detected! For correct "
1292
+ "generation results, please set `padding_side='left'` when initializing the tokenizer."
1293
+ )
1294
+ # pdb.set_trace()
1295
+ # 4. Define other model kwargs
1296
+ # decoder-only models with inputs_embeds forwarding must use caching (otherwise we can't detect whether we are
1297
+ # generating the first new token or not, and we only want to use the embeddings for the first new token)
1298
+ if not self.config.is_encoder_decoder and model_input_name == "inputs_embeds":
1299
+ generation_config.use_cache = True
1300
+ # 生成第一个新token时需要依赖缓存判断是否处于生成阶段,后续token生成依赖缓存加速。
1301
+
1302
+ # 生成attention mask
1303
+ if not kwargs_has_attention_mask and requires_attention_mask and accepts_attention_mask:
1304
+ model_kwargs[
1305
+ "attention_mask"] = self._prepare_attention_mask_for_generation(
1306
+ inputs_tensor, generation_config, model_kwargs)
1307
+
1308
+ # 输入了attention,检查一下对不对
1309
+ elif kwargs_has_attention_mask:
1310
+ # TODO (joao): generalize this check with other types of inputs
1311
+ if model_input_name == "input_ids" and len(
1312
+ model_kwargs["attention_mask"].shape) > 2:
1313
+ raise ValueError(
1314
+ "`attention_mask` passed to `generate` must be 2D.")
1315
+
1316
+ # encoder-decoder model设定
1317
+ if self.config.is_encoder_decoder and "encoder_outputs" not in model_kwargs:
1318
+ # if model is encoder decoder encoder_outputs are created and added to `model_kwargs`
1319
+ model_kwargs = self._prepare_encoder_decoder_kwargs_for_generation(
1320
+ inputs_tensor, model_kwargs, model_input_name,
1321
+ generation_config)
1322
+
1323
+ # 5. Prepare `input_ids` which will be used for auto-regressive generation
1324
+ # encoder-decoder model
1325
+ if self.config.is_encoder_decoder:
1326
+ input_ids, model_kwargs = self._prepare_decoder_input_ids_for_generation(
1327
+ batch_size=batch_size,
1328
+ model_input_name=model_input_name,
1329
+ model_kwargs=model_kwargs,
1330
+ decoder_start_token_id=generation_config.
1331
+ _decoder_start_token_tensor,
1332
+ device=inputs_tensor.device,
1333
+ )
1334
+ else:
1335
+ input_ids = inputs_tensor if model_input_name == "input_ids" else model_kwargs.pop(
1336
+ "input_ids") # torch.Size([1, 25]) # torch.Size([1, 25])
1337
+
1338
+ # 修复不完整的token
1339
+ if generation_config.token_healing:
1340
+ input_ids = self.heal_tokens(input_ids, tokenizer)
1341
+
1342
+ # 流式输出
1343
+ if streamer is not None:
1344
+ streamer.put(input_ids.cpu())
1345
+
1346
+ # pdb.set_trace()
1347
+
1348
+ # 6. Prepare `max_length` depending on other stopping criteria.
1349
+ input_ids_length = input_ids.shape[-1]
1350
+ has_default_max_length = kwargs.get(
1351
+ "max_length") is None and generation_config.max_length is not None
1352
+ has_default_min_length = kwargs.get(
1353
+ "min_length") is None and generation_config.min_length is not None
1354
+ # min_length是0
1355
+
1356
+ # 生成的一些config
1357
+ generation_config = self._prepare_generated_length(
1358
+ generation_config=generation_config,
1359
+ has_default_max_length=has_default_max_length,
1360
+ has_default_min_length=has_default_min_length,
1361
+ model_input_name=model_input_name, # "input_ids"
1362
+ inputs_tensor=inputs_tensor,
1363
+ input_ids_length=input_ids_length, #输入长度
1364
+ )
1365
+
1366
+ # If the model supports `logits_to_keep` in forward(), set it to 1 to avoid computing the whole
1367
+ # logit matrix. This can save a lot of memory during the first forward pass. Note that assisted decoding
1368
+ # dynamically overrides this value as it can need more than the last token logits
1369
+ if self._supports_logits_to_keep(
1370
+ ) and "logits_to_keep" not in model_kwargs:
1371
+ model_kwargs["logits_to_keep"] = 1
1372
+ # 模型在计算时仅保留最后一个 token 的 logits,而非整个词汇表的 logits,从而大幅降低内存占用。若使用束搜索宽度为 5,辅助解码会覆盖 logits_to_keep=5,保留多个候选 token 的 logits 以支持多路径探索。
1373
+
1374
+ # 检查生成长度
1375
+ self._validate_generated_length(generation_config, input_ids_length,
1376
+ has_default_max_length)
1377
+
1378
+ # 7. Prepare the cache.
1379
+ # - `model_kwargs` may be updated in place with a cache as defined by the parameters in `generation_config`.
1380
+ # - different models have a different cache name expected by the model (default = "past_key_values")
1381
+ # - `max_length`, prepared above, is used to determine the maximum cache length
1382
+ max_cache_length = generation_config.max_length - 1 #存最长length-1个token cache
1383
+
1384
+ # 如果输入是emb
1385
+ if (inputs_tensor.shape[1] != input_ids_length
1386
+ and model_input_name == "inputs_embeds"
1387
+ and not self.config.is_encoder_decoder):
1388
+ max_cache_length += inputs_tensor.shape[1]
1389
+ self._prepare_cache_for_generation(generation_config, model_kwargs,
1390
+ assistant_model, batch_size,
1391
+ max_cache_length, device)
1392
+
1393
+ # 8. determine generation mode
1394
+ generation_mode = generation_config.get_generation_mode(
1395
+ assistant_model) # 辅助解码
1396
+
1397
+ if streamer is not None and (generation_config.num_beams > 1):
1398
+ raise ValueError(
1399
+ "`streamer` cannot be used with beam search (yet!). Make sure that `num_beams` is set to 1."
1400
+ )
1401
+
1402
+ # device检查
1403
+ if not is_torchdynamo_compiling(
1404
+ ) and self.device.type != input_ids.device.type:
1405
+ warnings.warn(
1406
+ "You are calling .generate() with the `input_ids` being on a device type different"
1407
+ f" than your model's device. `input_ids` is on {input_ids.device.type}, whereas the model"
1408
+ f" is on {self.device.type}. You may experience unexpected behaviors or slower generation."
1409
+ " Please make sure that you have put `input_ids` to the"
1410
+ f" correct device by calling for example input_ids = input_ids.to('{self.device.type}') before"
1411
+ " running `.generate()`.",
1412
+ UserWarning,
1413
+ )
1414
+
1415
+ # pdb.set_trace()
1416
+
1417
+ # 9. prepare logits processors and stopping criteria
1418
+ prepared_logits_processor = self._get_logits_processor(
1419
+ generation_config=generation_config,
1420
+ input_ids_seq_length=input_ids_length,
1421
+ encoder_input_ids=inputs_tensor,
1422
+ prefix_allowed_tokens_fn=prefix_allowed_tokens_fn,
1423
+ logits_processor=logits_processor,
1424
+ device=inputs_tensor.device,
1425
+ model_kwargs=model_kwargs,
1426
+ negative_prompt_ids=negative_prompt_ids,
1427
+ negative_prompt_attention_mask=negative_prompt_attention_mask,
1428
+ )
1429
+ prepared_stopping_criteria = self._get_stopping_criteria_for_seminat(
1430
+ generation_config=generation_config,
1431
+ stopping_criteria=stopping_criteria,
1432
+ tokenizer=tokenizer,
1433
+ **kwargs)
1434
+
1435
+ # Set model_kwargs `use_cache` so we can use it later in forward runs
1436
+ model_kwargs["use_cache"] = generation_config.use_cache
1437
+
1438
+ input_ids, model_kwargs = self._expand_inputs_for_generation(
1439
+ input_ids=input_ids,
1440
+ expand_size=generation_config.num_return_sequences, # 1
1441
+ is_encoder_decoder=self.config.is_encoder_decoder, # false
1442
+ **model_kwargs,
1443
+ )
1444
+
1445
+
1446
+ # pdb.set_trace()
1447
+ result = self._sampleforseminat(
1448
+ input_ids,
1449
+ logits_processor=prepared_logits_processor,
1450
+ stopping_criteria=prepared_stopping_criteria,
1451
+ generation_config=generation_config,
1452
+ synced_gpus=synced_gpus,
1453
+ streamer=streamer,
1454
+ prefilling_length=prefilling_length,
1455
+ **model_kwargs,
1456
+ )
1457
+
1458
+ # Convert to legacy cache format if requested
1459
+ if (generation_config.return_legacy_cache is True
1460
+ and not is_torchdynamo_compiling()
1461
+ and hasattr(result, "past_key_values") and getattr(
1462
+ result.past_key_values, "to_legacy_cache") is not None):
1463
+ result.past_key_values = result.past_key_values.to_legacy_cache()
1464
+ return result
1465
+
1466
+ def _get_stopping_criteria_for_seminat(
1467
+ self,
1468
+ generation_config: GenerationConfig,
1469
+ stopping_criteria: Optional[StoppingCriteriaList],
1470
+ tokenizer: Optional["PreTrainedTokenizerBase"] = None,
1471
+ **kwargs,
1472
+ ) -> StoppingCriteriaList:
1473
+ criteria = StoppingCriteriaList()
1474
+ if generation_config.max_length is not None:
1475
+ max_position_embeddings = getattr(self.config, "max_position_embeddings", None)
1476
+ criteria.append(
1477
+ MaxLengthCriteria(
1478
+ max_length=generation_config.max_length,
1479
+ max_position_embeddings=max_position_embeddings,
1480
+ )
1481
+ )
1482
+ if generation_config.max_time is not None:
1483
+ criteria.append(MaxTimeCriteria(max_time=generation_config.max_time))
1484
+ if generation_config.stop_strings is not None:
1485
+ if tokenizer is None:
1486
+ raise ValueError(
1487
+ "There are one or more stop strings, either in the arguments to `generate` or in the "
1488
+ "model's generation config, but we could not locate a tokenizer. When generating with "
1489
+ "stop strings, you must pass the model's tokenizer to the `tokenizer` argument of `generate`."
1490
+ )
1491
+ criteria.append(StopStringCriteria(stop_strings=generation_config.stop_strings, tokenizer=tokenizer))
1492
+ if generation_config._eos_token_tensor is not None:
1493
+ criteria.append(EosTokenCriteriaForSemiNAT(eos_token_id=generation_config._eos_token_tensor))
1494
+ if (
1495
+ generation_config.is_assistant
1496
+ and generation_config.assistant_confidence_threshold is not None
1497
+ and generation_config.assistant_confidence_threshold > 0
1498
+ ):
1499
+ criteria.append(
1500
+ ConfidenceCriteria(assistant_confidence_threshold=generation_config.assistant_confidence_threshold)
1501
+ )
1502
+ criteria = self._merge_criteria_processor_list(criteria, stopping_criteria)
1503
+ return criteria
1504
+
1505
+
1506
+ def _sampleforseminat(
1507
+ self,
1508
+ input_ids: torch.LongTensor,
1509
+ logits_processor: LogitsProcessorList,
1510
+ stopping_criteria: StoppingCriteriaList,
1511
+ generation_config: GenerationConfig,
1512
+ synced_gpus: bool,
1513
+ streamer: Optional["BaseStreamer"],
1514
+ prefilling_length: int,
1515
+ **model_kwargs,
1516
+ ) -> Union[GenerateNonBeamOutput, torch.LongTensor]:
1517
+
1518
+ # init values
1519
+ pad_token_id = generation_config._pad_token_tensor # 获取填充token的ID
1520
+ output_attentions = generation_config.output_attentions # 是否输出注意力权重
1521
+ output_hidden_states = generation_config.output_hidden_states # 是否输出隐藏状态
1522
+ output_scores = generation_config.output_scores # 是否输出分数
1523
+ output_logits = generation_config.output_logits # 是否输出原始logits
1524
+ return_dict_in_generate = generation_config.return_dict_in_generate # 是否返回结构化字典
1525
+ max_length = generation_config.max_length # 最大生成长度
1526
+ has_eos_stopping_criteria = any(
1527
+ hasattr(criteria, "eos_token_id")
1528
+ for criteria in stopping_criteria) # 检查停止条件是否包含EOS token
1529
+ do_sample = generation_config.do_sample # 是否使用采样方法
1530
+
1531
+ # 初始化结果收集容器
1532
+ # init attention / hidden states / scores tuples
1533
+ scores = () if (return_dict_in_generate and output_scores) else None
1534
+ raw_logits = () if (return_dict_in_generate
1535
+ and output_logits) else None
1536
+ decoder_attentions = () if (return_dict_in_generate
1537
+ and output_attentions) else None
1538
+ cross_attentions = () if (return_dict_in_generate
1539
+ and output_attentions) else None
1540
+ decoder_hidden_states = () if (return_dict_in_generate
1541
+ and output_hidden_states) else None
1542
+
1543
+ # # 编码器-解码器模型特殊处理 不用管
1544
+ # if model is an encoder-decoder, retrieve encoder attention weights and hidden states
1545
+ if return_dict_in_generate and self.config.is_encoder_decoder:
1546
+ encoder_attentions = model_kwargs["encoder_outputs"].get(
1547
+ "attentions") if output_attentions else None
1548
+ encoder_hidden_states = (
1549
+ model_kwargs["encoder_outputs"].get("hidden_states")
1550
+ if output_hidden_states else None)
1551
+
1552
+ # pdb.set_trace()
1553
+
1554
+ # 初始化序列跟踪
1555
+ # keep track of which sequences are already finished
1556
+ batch_size, cur_len = input_ids.shape
1557
+ this_peer_finished = False
1558
+ unfinished_sequences = torch.ones(
1559
+ batch_size, dtype=torch.long,
1560
+ device=input_ids.device) # 初始化未完成序列标记 torch.Size([1])
1561
+ model_kwargs = self._get_initial_cache_position(
1562
+ input_ids, model_kwargs) # 初始化缓存位置
1563
+
1564
+ model_forward = self.__call__ # 获取前向传播函数
1565
+ ############ 换成新的forward
1566
+ # model_forward = self.forward
1567
+
1568
+ if isinstance(model_kwargs.get("past_key_values"), Cache):
1569
+ is_compileable = model_kwargs[
1570
+ "past_key_values"].is_compileable and self._supports_static_cache #编译优化
1571
+ is_compileable = is_compileable and not self.generation_config.disable_compile
1572
+ if is_compileable and (
1573
+ self.device.type == "cuda"
1574
+ or generation_config.compile_config._compile_all_devices):
1575
+ os.environ["TOKENIZERS_PARALLELISM"] = "0"
1576
+ model_forward = self.get_compiled_call(
1577
+ generation_config.compile_config)
1578
+
1579
+ ############ prefilling ############
1580
+ start = prefilling_length-1
1581
+ chunk_length = prefilling_length
1582
+
1583
+ s_pos = [start]
1584
+ while True:
1585
+ start += chunk_length
1586
+ if start >= input_ids.shape[1] - 1:
1587
+ s_pos.append(input_ids.shape[1] - 1)
1588
+ break
1589
+ else:
1590
+ s_pos.append(start)
1591
+
1592
+ # pdb.set_trace()
1593
+ slice_pos = torch.tensor(s_pos + [-1] *
1594
+ (max_length - len(s_pos))).unsqueeze(0).to(
1595
+ input_ids.device)
1596
+
1597
+ model_kwargs['slice_pos'] = slice_pos
1598
+ count = (slice_pos != -1).sum().item()
1599
+ new_cache_position = torch.arange(0, count).to(input_ids.device)
1600
+ model_kwargs[
1601
+ 'cache_position'] = new_cache_position # 更新一下cache position
1602
+
1603
+ # pdb.set_trace()
1604
+ ############ prefilling ############
1605
+
1606
+ is_prefill = True
1607
+ while self._has_unfinished_sequences(
1608
+ this_peer_finished,
1609
+ synced_gpus,
1610
+ device=input_ids.device,
1611
+ cur_len=cur_len,
1612
+ max_length=max_length): # 循环知道序列生成完
1613
+ # prepare model inputs
1614
+
1615
+ # pdb.set_trace()
1616
+
1617
+ # model_kwargs.keys(): dict_keys(['attention_mask', 'logits_to_keep', 'past_key_values', 'use_cache', 'cache_position', 'nar_kv_cache', 'slice_pos'])
1618
+ model_inputs = self.prepare_inputs_for_generation( #加入position_id和input_id
1619
+ input_ids, **model_kwargs
1620
+ ) #dict_keys(['cache_position', 'past_key_values', 'input_ids', 'inputs_embeds', 'position_ids', 'attention_mask', 'logits_to_keep', 'use_cache'])
1621
+ # pdb.set_trace()
1622
+
1623
+ # position_ids = torch.arange(
1624
+ # input_ids.shape[1], device=input_ids.device).unsqueeze(0).to(input_ids.device)
1625
+ # model_inputs.update({"position_ids": position_ids})
1626
+
1627
+ model_inputs.update({"input_ids": input_ids})
1628
+
1629
+ # prepare variable output controls (note: some models won't accept all output controls)
1630
+ model_inputs.update({"output_attentions": output_attentions}
1631
+ if output_attentions else {})
1632
+ model_inputs.update({"output_hidden_states": output_hidden_states}
1633
+ if output_hidden_states else {})
1634
+
1635
+ if is_prefill:
1636
+ # pdb.set_trace()
1637
+ # outputs = self(**model_inputs, return_dict=True)
1638
+ # dict_keys(['cache_position', 'past_key_values', 'input_ids', 'inputs_embeds', 'position_ids', 'attention_mask', 'logits_to_keep', 'use_cache'])
1639
+ outputs = self.forward(**model_inputs, return_dict=True)
1640
+ is_prefill = False
1641
+ else:
1642
+ # pdb.set_trace()
1643
+ outputs = model_forward(**model_inputs, return_dict=True)
1644
+
1645
+ # pdb.set_trace()
1646
+
1647
+ ################ seminat ###########################
1648
+ # model_kwargs['slice_pos'] = outputs.slice_pos
1649
+ ################ seminat ###########################
1650
+
1651
+ # synced_gpus: don't waste resources running the code we don't need; kwargs must be updated before skipping
1652
+ model_kwargs = self._update_model_kwargs_for_generation_for_seminat(
1653
+ outputs,
1654
+ model_kwargs,
1655
+ is_encoder_decoder=self.config.is_encoder_decoder,
1656
+ num_new_tokens=outputs.logits.size(1))
1657
+ if synced_gpus and this_peer_finished:
1658
+ continue
1659
+
1660
+ # pdb.set_trace()
1661
+ # Clone is needed to avoid keeping a hanging ref to outputs.logits which may be very large for first iteration
1662
+ # (the clone itself is always small)
1663
+
1664
+ # next_token_logits = outputs.logits[:, -1, :].clone().float()
1665
+ next_token_logits = outputs.logits[:, :, :].clone().float(
1666
+ ) # 新生成了k个token
1667
+
1668
+ next_token_logits = next_token_logits.to(input_ids.device)
1669
+
1670
+ # pre-process distribution
1671
+ next_token_scores = logits_processor(input_ids, next_token_logits)
1672
+
1673
+ # token selection
1674
+ if do_sample:
1675
+ probs = nn.functional.softmax(next_token_scores, dim=-1)
1676
+ # TODO (joao): this OP throws "skipping cudagraphs due to ['incompatible ops']", find solution
1677
+ next_tokens = torch.multinomial(probs,
1678
+ num_samples=1).squeeze(1)
1679
+ else:
1680
+ next_tokens = torch.argmax(
1681
+ next_token_scores,
1682
+ dim=-1) # tensor([9281], device='cuda:0') token id
1683
+
1684
+ # pdb.set_trace()
1685
+ # 更新slice_pos
1686
+ count = (model_kwargs['slice_pos'] != -1).sum().item()
1687
+ model_kwargs['slice_pos'][:, count] = model_kwargs[
1688
+ 'slice_pos'][:, count - 1] + outputs.logits.size(1)
1689
+
1690
+ # pdb.set_trace()
1691
+
1692
+ # finished sentences should have their next token be a padding token
1693
+ if has_eos_stopping_criteria:
1694
+ next_tokens = next_tokens * unfinished_sequences + pad_token_id * (
1695
+ 1 - unfinished_sequences
1696
+ ) # 序列生成完的时候,unfinished_sequences为0,正好后面全填上padding
1697
+
1698
+ # pdb.set_trace()
1699
+ # update generated ids, model inputs, and length for next step
1700
+ # input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1)
1701
+ input_ids = torch.cat([input_ids, next_tokens], dim=-1)
1702
+ if streamer is not None:
1703
+ streamer.put(next_tokens.cpu())
1704
+
1705
+ # 更新完成状态
1706
+ unfinished_sequences = unfinished_sequences & ~stopping_criteria(
1707
+ input_ids, scores, last_k=next_tokens.size(1))
1708
+ this_peer_finished = unfinished_sequences.max() == 0
1709
+ cur_len += outputs.logits.size(1) # 长度 +1
1710
+
1711
+ # This is needed to properly delete outputs.logits which may be very large for first iteration
1712
+ # Otherwise a reference to outputs is kept which keeps the logits alive in the next iteration
1713
+ del outputs
1714
+
1715
+ if streamer is not None:
1716
+ streamer.end()
1717
+
1718
+ if return_dict_in_generate:
1719
+ if self.config.is_encoder_decoder:
1720
+ return GenerateEncoderDecoderOutput(
1721
+ sequences=input_ids,
1722
+ scores=scores,
1723
+ logits=raw_logits,
1724
+ encoder_attentions=encoder_attentions,
1725
+ encoder_hidden_states=encoder_hidden_states,
1726
+ decoder_attentions=decoder_attentions,
1727
+ cross_attentions=cross_attentions,
1728
+ decoder_hidden_states=decoder_hidden_states,
1729
+ past_key_values=model_kwargs.get("past_key_values"),
1730
+ )
1731
+ else:
1732
+ return GenerateDecoderOnlyOutput(
1733
+ sequences=input_ids,
1734
+ scores=scores,
1735
+ logits=raw_logits,
1736
+ attentions=decoder_attentions,
1737
+ hidden_states=decoder_hidden_states,
1738
+ past_key_values=model_kwargs.get("past_key_values"),
1739
+ )
1740
+ else:
1741
+ return input_ids
1742
+
1743
+ def _update_model_kwargs_for_generation_for_seminat(
1744
+ self,
1745
+ outputs: ModelOutput,
1746
+ model_kwargs: Dict[str, Any],
1747
+ is_encoder_decoder: bool = False,
1748
+ num_new_tokens: int = 1,
1749
+ ) -> Dict[str, Any]:
1750
+ ALL_CACHE_NAMES = [
1751
+ "past_key_values", # default
1752
+ "cache_params", # mamba-based models
1753
+ "state", # rwkv
1754
+ "mems", # xlnet
1755
+ "past_buckets_states", # reformer
1756
+ ]
1757
+ # update past_key_values keeping its naming used in model code
1758
+ for possible_cache_name in ALL_CACHE_NAMES:
1759
+ if possible_cache_name in outputs:
1760
+ # TODO (joao): remove output/input mismatch when these old models (xlnet, reformer) are deprecated
1761
+ if possible_cache_name in ("past_buckets_states", "mems"):
1762
+ cache_name = "past_key_values"
1763
+ else:
1764
+ cache_name = possible_cache_name
1765
+ model_kwargs[cache_name] = getattr(outputs,
1766
+ possible_cache_name)
1767
+ break
1768
+
1769
+ # pdb.set_trace()
1770
+
1771
+ # update token_type_ids with last value
1772
+ # false
1773
+ if "token_type_ids" in model_kwargs:
1774
+ token_type_ids = model_kwargs["token_type_ids"]
1775
+ model_kwargs["token_type_ids"] = torch.cat(
1776
+ [token_type_ids, token_type_ids[:, -1].unsqueeze(-1)], dim=-1)
1777
+
1778
+ if not is_encoder_decoder:
1779
+ # update attention mask
1780
+ # 重点看这个
1781
+ # pdb.set_trace()
1782
+ if "attention_mask" in model_kwargs:
1783
+ attention_mask = model_kwargs["attention_mask"]
1784
+ model_kwargs["attention_mask"] = torch.cat(
1785
+ [
1786
+ attention_mask,
1787
+ attention_mask.new_ones(
1788
+ (attention_mask.shape[0], num_new_tokens
1789
+ )) # 1 -> num_new_tokens 一次加多个token的attention
1790
+ ],
1791
+ dim=-1)
1792
+ else:
1793
+ # update decoder attention mask
1794
+ if "decoder_attention_mask" in model_kwargs:
1795
+ decoder_attention_mask = model_kwargs["decoder_attention_mask"]
1796
+ model_kwargs["decoder_attention_mask"] = torch.cat(
1797
+ [
1798
+ decoder_attention_mask,
1799
+ decoder_attention_mask.new_ones(
1800
+ (decoder_attention_mask.shape[0], 1))
1801
+ ],
1802
+ dim=-1,
1803
+ )
1804
+
1805
+ # pdb.set_trace()
1806
+ if model_kwargs.get("use_cache", True):
1807
+ # model_kwargs["cache_position"] = model_kwargs["cache_position"][-1:] + num_new_tokens
1808
+ model_kwargs["cache_position"] = torch.tensor([
1809
+ model_kwargs["cache_position"][-1:].item() + 1
1810
+ ]).to(model_kwargs["cache_position"].device)
1811
+ else:
1812
+ past_positions = model_kwargs.pop("cache_position")
1813
+ new_positions = torch.arange(
1814
+ past_positions[-1] + 1,
1815
+ past_positions[-1] + num_new_tokens + 1,
1816
+ dtype=past_positions.dtype).to(past_positions.device)
1817
+ model_kwargs["cache_position"] = torch.cat(
1818
+ (past_positions, new_positions))
1819
+ return model_kwargs
1820
+
1821
+ class AbsolutePositionalEncoding(nn.Module):
1822
+ def __init__(self, hidden_size: int, max_len: int = 2048):
1823
+ """
1824
+ 初始化绝对位置编码
1825
+
1826
+ 参数:
1827
+ hidden_size (int): 隐藏层维度
1828
+ max_len (int): 最大序列长度
1829
+ """
1830
+ super().__init__()
1831
+
1832
+ # 创建位置编码矩阵
1833
+ pe = torch.zeros(max_len, hidden_size)
1834
+ position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
1835
+ div_term = torch.exp(torch.arange(0, hidden_size, 2).float() * (-math.log(10000.0) / hidden_size))
1836
+
1837
+ # 使用sin和cos函数计算位置编码
1838
+ pe[:, 0::2] = torch.sin(position * div_term)
1839
+ pe[:, 1::2] = torch.cos(position * div_term)
1840
+ pe = pe.unsqueeze(0) # [1, max_len, hidden_size]
1841
+
1842
+ # 注册为buffer(不参与训练)
1843
+ self.register_buffer('pe', pe)
1844
+
1845
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
1846
+ """
1847
+ 添加位置编码到输入张量
1848
+
1849
+ 参数:
1850
+ x (torch.Tensor): 输入张量,形状为 (batch_size, seq_len, hidden_size)
1851
+
1852
+ 返回:
1853
+ torch.Tensor: 添加位置编码后的张量,形状与输入相同
1854
+ """
1855
+ seq_len = x.size(1)
1856
+
1857
+
1858
+ pos = x + self.pe[:, :seq_len]
1859
+
1860
+ # pdb.set_trace()
1861
+ return pos