ykzhang721 commited on
Commit
7387b16
·
verified ·
1 Parent(s): 7d7b7da

Upload modelforseminat_v5.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. modelforseminat_v5.py +2156 -0
modelforseminat_v5.py ADDED
@@ -0,0 +1,2156 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import Olmo2Model, Olmo2ForCausalLM, AutoTokenizer, logging
2
+ from transformers.models.auto.modeling_auto import MODEL_FOR_IMAGE_TEXT_TO_TEXT_MAPPING_NAMES
3
+ from transformers.modeling_outputs import (
4
+ CausalLMOutputWithPast,
5
+ BaseModelOutputWithPast,
6
+ )
7
+ import numpy as np
8
+ import math
9
+ from torch import nn
10
+ import pandas as pd
11
+ from transformers.cache_utils import Cache, DynamicCache, StaticCache
12
+ from dataclasses import dataclass
13
+
14
+ # Olmo2
15
+ from transformers.models.olmo2.modeling_olmo2 import Olmo2RotaryEmbedding, Olmo2Attention, Olmo2MLP, Olmo2RMSNorm, apply_rotary_pos_emb, eager_attention_forward, Olmo2DecoderLayer
16
+ from transformers.models.olmo2.configuration_olmo2 import Olmo2Config
17
+ from transformers.processing_utils import Unpack
18
+ from transformers.modeling_flash_attention_utils import FlashAttentionKwargs
19
+ from transformers.utils import LossKwargs
20
+ from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS
21
+
22
+ from torch.nn.functional import cosine_similarity
23
+ import time
24
+ import os
25
+ import sys
26
+ import json
27
+ import pdb
28
+ import torch.distributed as dist
29
+ from tqdm import tqdm
30
+ from torch.utils.data.distributed import DistributedSampler
31
+ import transformers
32
+ import pickle
33
+ from dataset import *
34
+ # from peft import (get_peft_model, PeftModel)
35
+ import random
36
+ from config import *
37
+ from datasets import Dataset, DatasetDict, load_dataset
38
+ import wandb
39
+ import argparse
40
+ import torch
41
+ import torch.nn as nn
42
+ import torch.nn.functional as F
43
+ import torch.optim as optim
44
+ import functools
45
+ from torch.optim.lr_scheduler import StepLR
46
+ import torch.nn.functional as F
47
+ import torch.distributed as dist
48
+ import torch.multiprocessing as mp
49
+ from torch.nn.parallel import DistributedDataParallel as DDP
50
+ from torch.utils.data.distributed import DistributedSampler
51
+ from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import (
52
+ checkpoint_wrapper, CheckpointImpl)
53
+ from torch.distributed.fsdp import (
54
+ FullyShardedDataParallel as FSDP,
55
+ MixedPrecision,
56
+ BackwardPrefetch,
57
+ ShardingStrategy,
58
+ FullStateDictConfig,
59
+ StateDictType,
60
+ )
61
+ from torch.distributed.fsdp.wrap import (
62
+ transformer_auto_wrap_policy,
63
+ enable_wrap,
64
+ wrap,
65
+ )
66
+ from functools import partial
67
+ from torch.utils.data import DataLoader
68
+ from pathlib import Path
69
+ from typing import Type, List, Optional, Tuple, Union, Callable, Dict, Any
70
+
71
+
72
+ ############ specially for generate() #################
73
+ import inspect
74
+ from transformers.generation.configuration_utils import (
75
+ NEED_SETUP_CACHE_CLASSES_MAPPING,
76
+ QUANT_BACKEND_CLASSES_MAPPING,
77
+ GenerationConfig,
78
+ GenerationMode,
79
+ )
80
+ from transformers.generation.logits_process import LogitsProcessorList
81
+ from transformers.generation.stopping_criteria import StoppingCriteriaList
82
+ from transformers.integrations.deepspeed import is_deepspeed_zero3_enabled
83
+ from transformers.integrations.fsdp import is_fsdp_managed_module
84
+
85
+ from transformers.generation.utils import (
86
+ is_torchdynamo_compiling, ModelOutput, GenerateDecoderOnlyOutput,
87
+ GenerateEncoderDecoderOutput, GenerateBeamDecoderOnlyOutput,
88
+ GenerateBeamEncoderDecoderOutput, GreedySearchDecoderOnlyOutput,
89
+ ContrastiveSearchDecoderOnlyOutput, SampleDecoderOnlyOutput,
90
+ ContrastiveSearchEncoderDecoderOutput, GreedySearchEncoderDecoderOutput,
91
+ SampleEncoderDecoderOutput, BeamSearchDecoderOnlyOutput,
92
+ BeamSampleDecoderOnlyOutput, BeamSearchEncoderDecoderOutput,
93
+ BeamSampleEncoderDecoderOutput, GreedySearchOutput, SampleOutput,
94
+ BeamSearchOutput, BeamSampleOutput, ContrastiveSearchOutput,
95
+ GenerateNonBeamOutput, GenerateBeamOutput, GenerateOutput)
96
+ from transformers.generation.stopping_criteria import (
97
+ ConfidenceCriteria,
98
+ EosTokenCriteria,
99
+ MaxLengthCriteria,
100
+ MaxTimeCriteria,
101
+ StoppingCriteria,
102
+ StoppingCriteriaList,
103
+ StopStringCriteria,
104
+ )
105
+
106
+ from transformers.generation.stopping_criteria import STOPPING_CRITERIA_INPUTS_DOCSTRING
107
+ from transformers.pytorch_utils import isin_mps_friendly
108
+ from transformers.utils import add_start_docstrings
109
+
110
+
111
+ class EosTokenCriteriaForSemiNAT(StoppingCriteria):
112
+ """
113
+ This class can be used to stop generation whenever the "end-of-sequence" token is generated.
114
+ By default, it uses the `model.generation_config.eos_token_id`.
115
+
116
+ Args:
117
+ eos_token_id (`Union[int, List[int], torch.Tensor]`):
118
+ The id(s) of the *end-of-sequence* token.
119
+ """
120
+
121
+ def __init__(self, eos_token_id: Union[int, List[int], torch.Tensor]):
122
+ if not isinstance(eos_token_id, torch.Tensor):
123
+ if isinstance(eos_token_id, int):
124
+ eos_token_id = [eos_token_id]
125
+ eos_token_id = torch.tensor(eos_token_id)
126
+ self.eos_token_id = eos_token_id
127
+
128
+ @add_start_docstrings(STOPPING_CRITERIA_INPUTS_DOCSTRING)
129
+ def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, last_k: int, **kwargs) -> torch.BoolTensor:
130
+ # pdb.set_trace()
131
+ # if torch.any(input_ids == 100257):
132
+ # pdb.set_trace()
133
+ self.eos_token_id = self.eos_token_id.to(input_ids.device)
134
+ token_is_eos = isin_mps_friendly(input_ids[:, -last_k:], self.eos_token_id)
135
+ is_done = torch.any(token_is_eos, dim=1)
136
+ return is_done
137
+
138
+
139
+
140
+ ############ specially for generate() #################
141
+
142
+
143
+ class KwargsForCausalLM(FlashAttentionKwargs, LossKwargs): ...
144
+
145
+
146
+ @dataclass
147
+ class ModelOutputWithPastForSemiNAT(BaseModelOutputWithPast):
148
+
149
+ chunk_hidden_state: torch.FloatTensor = None
150
+ length_ground_truth: Optional[torch.FloatTensor] = None
151
+ length_logits: Optional[torch.FloatTensor] = None
152
+ position_embeddings: Optional[torch.FloatTensor] = None # ?
153
+ nar_hidden_state: torch.FloatTensor = None # ?
154
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
155
+ hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
156
+ attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
157
+
158
+
159
+
160
+
161
+ class TwoLayerMLP(nn.Module):
162
+ def __init__(self, hidden_size: int, dropout_rate: float = 0.1):
163
+ """
164
+ 初始化两层MLP,支持任意批处理维度
165
+
166
+ 参数:
167
+ hidden_size (int): 隐藏层维度
168
+ dropout_rate (float): dropout比率,默认0.1
169
+ """
170
+ super().__init__()
171
+
172
+ self.fc1 = nn.Linear(hidden_size, 4 * hidden_size) # 第一层将维度扩大4倍
173
+ self.fc2 = nn.Linear(4 * hidden_size, hidden_size) # 第二层将维度恢复
174
+ self.dropout = nn.Dropout(p=dropout_rate)
175
+ self.activation = nn.GELU() # 使用GELU激活函数
176
+
177
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
178
+ """
179
+ 前向传播,支持任意批处理维度
180
+
181
+ 参数:
182
+ x (torch.Tensor): 输入张量,形状为 (..., hidden_size),支持任意前置维度
183
+
184
+ 返回:
185
+ torch.Tensor: 输出张量,形状与输入相同
186
+ """
187
+ # 获取原始形状
188
+ original_shape = x.shape
189
+ hidden_size = original_shape[-1]
190
+
191
+ # 将输入重塑为2D: (batch_size, hidden_size),其中batch_size包含了所有前置维度
192
+ x_2d = x.view(-1, hidden_size)
193
+
194
+ # pdb.set_trace()
195
+ # 第一层:线性变换 -> 激活函数 -> dropout
196
+ x_2d = self.fc1(x_2d)
197
+ x_2d = self.activation(x_2d)
198
+ x_2d = self.dropout(x_2d)
199
+
200
+ # 第二层:线性变换
201
+ x_2d = self.fc2(x_2d)
202
+ # pdb.set_trace()
203
+ # 恢复原始形状
204
+ x = x_2d.view(*original_shape)
205
+ # pdb.set_trace()
206
+ return x
207
+
208
+
209
+
210
+
211
+
212
+
213
+
214
+
215
+
216
+
217
+
218
+
219
+
220
+
221
+
222
+
223
+
224
+ class Olmo2AttentionForSemiNAT(nn.Module):
225
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
226
+
227
+ def __init__(self, config: Olmo2Config, layer_idx: Optional[int] = None, is_causal: bool = True):
228
+ super().__init__()
229
+ self.config = config
230
+ self.layer_idx = layer_idx
231
+ self.head_dim = getattr(
232
+ config, "head_dim",
233
+ config.hidden_size // config.num_attention_heads)
234
+ self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
235
+ self.scaling = self.head_dim**-0.5
236
+ self.attention_dropout = config.attention_dropout
237
+ self.is_causal = is_causal
238
+
239
+ self.q_proj = nn.Linear(config.hidden_size,
240
+ config.num_attention_heads * self.head_dim,
241
+ bias=config.attention_bias)
242
+ self.k_proj = nn.Linear(config.hidden_size,
243
+ config.num_key_value_heads * self.head_dim,
244
+ bias=config.attention_bias)
245
+ self.v_proj = nn.Linear(config.hidden_size,
246
+ config.num_key_value_heads * self.head_dim,
247
+ bias=config.attention_bias)
248
+ self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim,
249
+ config.hidden_size,
250
+ bias=config.attention_bias)
251
+ self.q_norm = Olmo2RMSNorm(config.num_attention_heads * self.head_dim,
252
+ config.rms_norm_eps)
253
+ self.k_norm = Olmo2RMSNorm(config.num_key_value_heads * self.head_dim,
254
+ config.rms_norm_eps)
255
+
256
+ def forward(
257
+ self,
258
+ hidden_states: torch.Tensor,
259
+ position_embeddings: Tuple[torch.Tensor, torch.Tensor],
260
+ attention_mask: Optional[torch.Tensor],
261
+ past_key_value: Optional[Cache] = None,
262
+ cache_position: Optional[torch.LongTensor] = None,
263
+ **kwargs,
264
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor],
265
+ Optional[Tuple[torch.Tensor]]]:
266
+ input_shape = hidden_states.shape[:-1]
267
+ hidden_shape = (*input_shape, -1, self.head_dim)
268
+
269
+ query_states = self.q_norm(self.q_proj(hidden_states))
270
+ key_states = self.k_norm(self.k_proj(hidden_states))
271
+ value_states = self.v_proj(hidden_states)
272
+
273
+ query_states = query_states.view(hidden_shape).transpose(1, 2)
274
+ key_states = key_states.view(hidden_shape).transpose(1, 2)
275
+ value_states = value_states.view(hidden_shape).transpose(1, 2)
276
+
277
+
278
+
279
+ if position_embeddings is not None:
280
+ cos, sin = position_embeddings
281
+ query_states, key_states = apply_rotary_pos_emb(
282
+ query_states, key_states, cos, sin)
283
+
284
+ if past_key_value is not None:
285
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
286
+ cache_kwargs = {
287
+ "sin": sin,
288
+ "cos": cos,
289
+ "cache_position": cache_position
290
+ }
291
+ key_states, value_states = past_key_value.update(
292
+ key_states, value_states, self.layer_idx, cache_kwargs)
293
+
294
+ attention_interface: Callable = eager_attention_forward
295
+
296
+
297
+ # pdb.set_trace()
298
+
299
+ self.config._attn_implementation = "sdpa"
300
+ if self.config._attn_implementation != "eager":
301
+ if self.config._attn_implementation == "sdpa" and kwargs.get(
302
+ "output_attentions", False):
303
+ logger.warning_once(
304
+ "`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to "
305
+ 'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
306
+ )
307
+ else:
308
+ attention_interface = ALL_ATTENTION_FUNCTIONS[
309
+ self.config._attn_implementation]
310
+
311
+
312
+ attn_output, attn_weights = attention_interface(
313
+ self,
314
+ query_states,
315
+ key_states,
316
+ value_states,
317
+ attention_mask,
318
+ dropout=0.0 if not self.training else self.attention_dropout,
319
+ scaling=self.scaling,
320
+ **kwargs,
321
+ )
322
+
323
+ attn_output = attn_output.reshape(*input_shape, -1).contiguous()
324
+ attn_output = self.o_proj(attn_output)
325
+ return attn_output, attn_weights
326
+
327
+
328
+
329
+ class Olmo2DecoderLayerForSemiNAT(nn.Module):
330
+
331
+ def __init__(
332
+ self,
333
+ config: Olmo2Config,
334
+ layer_idx: int,
335
+ is_causal: bool = True,
336
+ ):
337
+ super().__init__()
338
+ self.hidden_size = config.hidden_size
339
+ # pdb.set_trace()
340
+ self.self_attn = Olmo2AttentionForSemiNAT(config=config,
341
+ layer_idx=layer_idx,
342
+ is_causal=is_causal)
343
+ self.mlp = Olmo2MLP(config)
344
+ self.post_attention_layernorm = Olmo2RMSNorm(config.hidden_size,
345
+ eps=config.rms_norm_eps)
346
+ self.post_feedforward_layernorm = Olmo2RMSNorm(config.hidden_size,
347
+ eps=config.rms_norm_eps)
348
+
349
+ # pdb.set_trace()
350
+
351
+ def forward(
352
+ self,
353
+ hidden_states: torch.Tensor,
354
+ attention_mask: Optional[torch.Tensor] = None,
355
+ position_ids: Optional[torch.LongTensor] = None,
356
+ past_key_value: Optional[Cache] = None,
357
+ output_attentions: Optional[bool] = False,
358
+ use_cache: Optional[bool] = False,
359
+ cache_position: Optional[torch.LongTensor] = None,
360
+ position_embeddings: Optional[Tuple[torch.Tensor,
361
+ torch.Tensor]] = None,
362
+ **kwargs,
363
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor,
364
+ torch.FloatTensor]]]:
365
+ residual = hidden_states
366
+
367
+ # pdb.set_trace()
368
+ # Self Attention
369
+ hidden_states, self_attn_weights = self.self_attn(
370
+ hidden_states=hidden_states,
371
+ attention_mask=attention_mask,
372
+ position_ids=position_ids,
373
+ past_key_value=past_key_value,
374
+ output_attentions=output_attentions,
375
+ use_cache=use_cache,
376
+ cache_position=cache_position,
377
+ position_embeddings=position_embeddings,
378
+ **kwargs,
379
+ )
380
+ hidden_states = self.post_attention_layernorm(hidden_states)
381
+ hidden_states = residual + hidden_states
382
+
383
+ # Fully Connected
384
+ residual = hidden_states
385
+ hidden_states = self.mlp(hidden_states)
386
+ hidden_states = self.post_feedforward_layernorm(hidden_states)
387
+ hidden_states = residual + hidden_states
388
+
389
+ outputs = (hidden_states, )
390
+ if output_attentions:
391
+ outputs += (self_attn_weights, )
392
+
393
+ return outputs
394
+
395
+
396
+ class NATEncoderForSemiNAT(nn.Module):
397
+
398
+ def __init__(self, config: Olmo2Config, num_layer: int = 1):
399
+ super().__init__()
400
+ self.num_layer = num_layer
401
+ self.encoder_layers = nn.ModuleList([
402
+ Olmo2DecoderLayerForSemiNAT(config, layer_idx)
403
+ for layer_idx in range(self.num_layer)
404
+ ])
405
+
406
+ def forward(
407
+ self,
408
+ hidden_states: torch.Tensor,
409
+ attention_mask: Optional[torch.Tensor] = None,
410
+ past_key_value: Optional[Cache] = None,
411
+ output_attentions: Optional[bool] = False,
412
+ use_cache: Optional[bool] = False,
413
+ cache_position: Optional[torch.LongTensor] = None,
414
+ position_embeddings: Optional[Tuple[torch.Tensor,
415
+ torch.Tensor]] = None,
416
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor,
417
+ torch.FloatTensor]]]:
418
+ # pdb.set_trace()
419
+ for layer in self.encoder_layers:
420
+ outputs = layer(hidden_states=hidden_states,
421
+ output_attentions=output_attentions,
422
+ position_embeddings=position_embeddings,
423
+ attention_mask=attention_mask)
424
+ hidden_states = outputs[0]
425
+ # pdb.set_trace()
426
+ # only the last layer attn_weights and present_key_value are stored
427
+ # mean pool the hidden states across sequence (chunk)
428
+ # hidden_states = torch.mean(hidden_states, dim=1)
429
+ return hidden_states
430
+
431
+
432
+ class NATDecoderForSemiNAT(nn.Module):
433
+
434
+ def __init__(self, config: Olmo2Config, num_layer: int = 1):
435
+ super().__init__()
436
+ self.num_layer = num_layer
437
+ self.decoder_layers = nn.ModuleList([
438
+ Olmo2DecoderLayerForSemiNAT(config, layer_idx, False)
439
+ for layer_idx in range(self.num_layer)
440
+ ])
441
+
442
+ def forward(
443
+ self,
444
+ hidden_states: torch.Tensor,
445
+ attention_mask: Optional[torch.Tensor] = None,
446
+ past_key_value: Optional[Cache] = None,
447
+ output_attentions: Optional[bool] = False,
448
+ use_cache: Optional[bool] = False,
449
+ cache_position: Optional[torch.LongTensor] = None,
450
+ position_embeddings: Optional[Tuple[torch.Tensor,
451
+ torch.Tensor]] = None,
452
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor,
453
+ torch.FloatTensor]]]:
454
+
455
+ for layer in self.decoder_layers:
456
+ # pdb.set_trace()
457
+ outputs = layer(hidden_states=hidden_states,
458
+ attention_mask=attention_mask,
459
+ output_attentions=output_attentions,
460
+ position_embeddings=position_embeddings)
461
+ hidden_states = outputs[0]
462
+ return hidden_states
463
+
464
+
465
+ class Olmo2ModelForSemiNAT(Olmo2Model):
466
+
467
+ def __init__(self, config):
468
+ super().__init__(config)
469
+ self.layers = nn.ModuleList([
470
+ Olmo2DecoderLayer(config, layer_idx)
471
+ for layer_idx in range(config.num_hidden_layers)
472
+ ])
473
+
474
+ self.decoder = NATDecoderForSemiNAT(config, 1)
475
+ self.encoder = NATEncoderForSemiNAT(config, 1)
476
+
477
+
478
+ # pdb.set_trace()
479
+ self.chunk_size_limit = config.chunk_size_limit
480
+ self.norm = Olmo2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
481
+ self.rotary_emb = Olmo2RotaryEmbedding(config=config)
482
+ self.pos_encoder = AbsolutePositionalEncoding(config.hidden_size)
483
+ self.gradient_checkpointing = False
484
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size,
485
+ self.padding_idx)
486
+
487
+
488
+ self.length_predictor = nn.Linear(config.hidden_size,
489
+ self.chunk_size_limit)
490
+
491
+ # self.linear_projection = TwoLayerMLP(config.hidden_size)
492
+
493
+
494
+ def forward(
495
+ self,
496
+ input_ids: torch.LongTensor = None,
497
+ attention_mask: Optional[torch.Tensor] = None,
498
+ position_ids: Optional[torch.LongTensor] = None,
499
+ slice_pos: torch.Tensor = None,
500
+ past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
501
+ inputs_embeds: Optional[torch.FloatTensor] = None,
502
+ use_cache: Optional[bool] = None,
503
+ output_attentions: Optional[bool] = None,
504
+ output_hidden_states: Optional[bool] = None,
505
+ return_dict: Optional[bool] = None,
506
+ cache_position: Optional[torch.LongTensor] = None,
507
+ inference: Optional[bool] = None,
508
+ padding: Optional[torch.Tensor] = None,
509
+ **flash_attn_kwargs: Unpack[FlashAttentionKwargs],
510
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
511
+
512
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
513
+ output_hidden_states = (output_hidden_states
514
+ if output_hidden_states is not None else
515
+ self.config.output_hidden_states)
516
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
517
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
518
+
519
+ if (input_ids is None) ^ (inputs_embeds is not None):
520
+ raise ValueError(
521
+ "You must specify exactly one of input_ids or inputs_embeds")
522
+
523
+ if self.gradient_checkpointing and self.training and use_cache:
524
+ logger.warning_once(
525
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`."
526
+ )
527
+ use_cache = False
528
+
529
+ if inputs_embeds is None:
530
+ inputs_embeds = self.embed_tokens(input_ids)
531
+
532
+ if use_cache and past_key_values is None:
533
+ past_key_values = DynamicCache()
534
+
535
+ if cache_position is None:
536
+ past_seen_tokens = past_key_values.get_seq_length(
537
+ ) if past_key_values is not None else 0
538
+ cache_position = torch.arange(past_seen_tokens,
539
+ past_seen_tokens +
540
+ inputs_embeds.shape[1],
541
+ device=inputs_embeds.device)
542
+
543
+ if position_ids is None:
544
+ position_ids = cache_position.unsqueeze(0)
545
+
546
+ if inference is not None:
547
+ position_ids = cache_position.unsqueeze(0)
548
+
549
+
550
+
551
+ position_embeddings = self.rotary_emb(inputs_embeds, position_ids)
552
+ all_hidden_states = () if output_hidden_states else None
553
+ all_self_attns = () if output_attentions else None
554
+ next_decoder_cache = None
555
+ max_chunk_num = (slice_pos != -1).sum(dim=1).max()
556
+
557
+ # pdb.set_trace()
558
+
559
+ ################################ 并行处理 #################################
560
+ M_avg, attn_mask, length_ground_truth, chunk_attention_mask, slice_num = self.build_slice_matrix(slice_pos) # torch.Size([1, 111, 512])
561
+
562
+ # pdb.set_trace()
563
+ encoded_input = self.encoder(inputs_embeds,position_embeddings=position_embeddings,attention_mask=attn_mask) # torch.Size([1, 512, 2048])
564
+ # 这里attention_mask没有用,因为encoder没有attention层
565
+
566
+ M_avg = M_avg.contiguous()
567
+ encoded_input = encoded_input.contiguous()
568
+ M_avg = M_avg.to(torch.bfloat16)
569
+ encoded_input = encoded_input.to(torch.bfloat16)
570
+
571
+ chunk_inputs_embeds = torch.matmul(M_avg, encoded_input)
572
+ accumu_num = sum(slice_num)-encoded_input.shape[0]
573
+ # pdb.set_trace()
574
+ ################################ 并行处理 #################################
575
+
576
+
577
+ ################################ 串行处理 #################################
578
+ # initialize chunk inputs as embedding of [pad]
579
+ # pad_token_id = padding
580
+ # batch_size, seq_len, hidden_size = inputs_embeds.shape
581
+ # pad_embedding = self.embed_tokens(
582
+ # torch.tensor([pad_token_id]).to(inputs_embeds.device)) # 1, 2048
583
+
584
+ # # pdb.set_trace()
585
+ # chunk_inputs_embeds = pad_embedding.expand(
586
+ # batch_size, max_chunk_num, hidden_size).clone().to(
587
+ # inputs_embeds.device)
588
+
589
+ # length_ground_truth = []
590
+ # chunk_attention_mask = []
591
+ # chunk_labels = []
592
+ # # max_chunk_num = 0
593
+ # accumu_num = 0
594
+ # slice_nums = []
595
+
596
+
597
+
598
+ # for b in range(batch_size):
599
+ # slice_num = 0
600
+ # start_position = 0
601
+ # slice_length = []
602
+ # for i in range(seq_len):
603
+ # cut = slice_pos[b, i].item() # 获取切分点
604
+ # if cut == -1: # 如果切分点为 -1,表示不切分
605
+ # pass
606
+ # else:
607
+ # cut += 1 # +1表示在后面切一刀
608
+ # chunk_inputs_embeds[b, i] = self.encoder(
609
+ # inputs_embeds[b, start_position:cut].unsqueeze(0),
610
+ # position_embeddings=tuple(
611
+ # tensor[0, 0:cut -
612
+ # start_position, :].unsqueeze(0)
613
+ # for tensor in position_embeddings))
614
+ # slice_num += 1
615
+ # slice_length.append(cut - start_position)
616
+ # if cut - start_position > 10 or cut - start_position < 0:
617
+ # pdb.set_trace()
618
+ # start_position = cut # 更新切分起点
619
+ # slice_nums.append(slice_num) # 每个样本的 chunk 数量
620
+ # # max_chunk_num = max(max_chunk_num, slice_num) # 不用这个,直接用累计的chunk num
621
+ # accumu_num += slice_num
622
+ # chunk_attention_mask.append(
623
+ # torch.tensor([1] * slice_num + [0] *
624
+ # (seq_len - slice_num)).unsqueeze(
625
+ # 0)) # 1表示切分,0表示不切分
626
+ # length_ground_truth.append(
627
+ # torch.tensor(slice_length + [-100] *
628
+ # (seq_len - slice_num)).unsqueeze(0)) # -100表示不切分
629
+ # accumu_num -= batch_size
630
+
631
+ # # pdb.set_trace()
632
+
633
+
634
+ # chunk_attention_mask = torch.cat(chunk_attention_mask, dim=0).to(
635
+ # inputs_embeds.device) # torch.Size([1, 256]) bs * length
636
+
637
+ # length_ground_truth = torch.cat(length_ground_truth,
638
+ # dim=0).to(inputs_embeds.device)
639
+
640
+
641
+ ################################ 串行处理 #################################
642
+
643
+
644
+ # 取最长chunk长度裁剪,加速计算
645
+ chunk_inputs_embeds = chunk_inputs_embeds[:, :max_chunk_num, :]
646
+ chunk_attention_mask = chunk_attention_mask[:, :max_chunk_num]
647
+ length_ground_truth = length_ground_truth[:,:max_chunk_num]
648
+ chunk_position_ids = position_ids[:,:max_chunk_num]
649
+ chunk_cache_position = cache_position[:max_chunk_num]
650
+
651
+ chunk_position_embeddings = self.rotary_emb(
652
+ chunk_inputs_embeds, chunk_position_ids
653
+ ) # tuple, 第一个元素为 torch.Size([1, 256, 128]),最后一个维度是 hidden_size / head , cos 和 sin 各 64 维
654
+
655
+ hidden_states = chunk_inputs_embeds # bs * max_chunk_num * hidden_size
656
+
657
+ # pdb.set_trace()
658
+
659
+
660
+ # inference待check
661
+ if inference is not None:
662
+
663
+ # inference 把填充去掉
664
+ mask_bool = chunk_attention_mask.bool()
665
+ chunk_inputs_embeds = chunk_inputs_embeds[mask_bool.unsqueeze(
666
+ -1).expand_as(chunk_inputs_embeds)].view(
667
+ chunk_inputs_embeds.size(0), -1,
668
+ chunk_inputs_embeds.size(2))
669
+ chunk_attention_mask = chunk_attention_mask[mask_bool].view(
670
+ chunk_attention_mask.size(0), -1)
671
+
672
+ # pdb.set_trace()
673
+ chunk_inputs_embeds = chunk_inputs_embeds[:,
674
+ chunk_cache_position, :]
675
+ chunk_attention_mask = chunk_attention_mask[:,
676
+ chunk_cache_position]
677
+
678
+ hidden_states = chunk_inputs_embeds
679
+
680
+
681
+ # pdb.set_trace()
682
+
683
+
684
+ causal_mask = self._update_causal_mask(chunk_attention_mask,
685
+ chunk_inputs_embeds,
686
+ chunk_cache_position,
687
+ past_key_values,
688
+ output_attentions)
689
+
690
+
691
+ # pdb.set_trace()
692
+ for decoder_layer in self.layers:
693
+ if output_hidden_states:
694
+ all_hidden_states += (hidden_states, )
695
+ if self.gradient_checkpointing and self.training:
696
+ layer_outputs = self._gradient_checkpointing_func(
697
+ decoder_layer.__call__,
698
+ hidden_states,
699
+ causal_mask,
700
+ position_ids,
701
+ past_key_values,
702
+ output_attentions,
703
+ use_cache,
704
+ cache_position,
705
+ chunk_position_embeddings,
706
+ )
707
+ else:
708
+ layer_outputs = decoder_layer(
709
+ hidden_states,
710
+ attention_mask=causal_mask,
711
+ position_ids=position_ids,
712
+ past_key_value=past_key_values,
713
+ output_attentions=output_attentions,
714
+ use_cache=use_cache,
715
+ cache_position=cache_position,
716
+ position_embeddings=chunk_position_embeddings,
717
+ **flash_attn_kwargs,
718
+ )
719
+
720
+ hidden_states = layer_outputs[0]
721
+
722
+ if output_attentions:
723
+ all_self_attns += (layer_outputs[1], )
724
+
725
+
726
+
727
+
728
+ # pdb.set_trace()
729
+ # add hidden states from the last decoder layer
730
+ if output_hidden_states:
731
+ all_hidden_states += (hidden_states, )
732
+
733
+ hidden_states = self.norm(
734
+ hidden_states) # bs * max_chunk_num * hidden_size 所有chunk的hidden
735
+
736
+ # pdb.set_trace()
737
+
738
+ # 算长度预测loss
739
+ self.length_predictor = self.length_predictor.to(
740
+ hidden_states.device).to(hidden_states.dtype) #这里强行变成了bf16,因为训练是这个
741
+ length_logits = self.length_predictor(
742
+ hidden_states.to(
743
+ hidden_states.device)) # bs * length * chunk_size_limit
744
+
745
+ # pdb.set_trace()
746
+
747
+ next_cache = next_decoder_cache if use_cache else None # DynamicCache()
748
+ # if return_legacy_cache:
749
+ # next_cache = next_cache.to_legacy_cache()
750
+
751
+
752
+ # pdb.set_trace()
753
+
754
+ nar_hidden_states = None
755
+ if inference is None:
756
+ # NAR decoder
757
+ bs, length, hidden_size = hidden_states.size()
758
+ assert length == max_chunk_num
759
+
760
+
761
+
762
+
763
+
764
+
765
+ # shape: (bs * max_chunk_num) * chunk_size_limit * hidden_size
766
+ # try:
767
+ # nat_input_embeddings = torch.zeros(
768
+ # accumu_num, self.chunk_size_limit,
769
+ # hidden_size).to(hidden_states.device).to(hidden_states.dtype)
770
+ # except:
771
+ # pdb.set_trace()
772
+ # nat_attention_mask = torch.zeros(
773
+ # accumu_num, self.chunk_size_limit).to(hidden_states.device).to(
774
+ # hidden_states.dtype)
775
+ # tot_chunk_num = 0
776
+
777
+
778
+
779
+ nat_input_embeddings, nat_attention_mask = self.repeat_with_limit_and_pad(
780
+ hidden_states, length_ground_truth, self.chunk_size_limit, skip_val=-100)
781
+
782
+
783
+
784
+
785
+
786
+
787
+
788
+
789
+
790
+ # pdb.set_trace()
791
+ # for b in range(bs):
792
+ # for i in range(slice_num[b]):
793
+ # # slice_nums[b] 是每个样本的 chunk 数量
794
+ # # length_ground_truth[b] 是每个样本的真实长度
795
+ # # copy length_ground_truth 份的 hidden_states 到 nat_input_embeddings
796
+
797
+ # if length_ground_truth[b, i + 1] != -100:
798
+ # # pdb.set_trace()
799
+ # nat_input_embeddings[
800
+ # tot_chunk_num, :length_ground_truth[
801
+ # b, i +
802
+ # 1], :] = hidden_states[b, i:i + 1, :].expand(
803
+ # length_ground_truth[b, i + 1], hidden_size)
804
+ # nat_attention_mask[tot_chunk_num, :length_ground_truth[
805
+ # b, i + 1]] = torch.tensor(
806
+ # [1] * length_ground_truth[b, i + 1])
807
+ # tot_chunk_num += 1
808
+ # # pdb.set_trace()
809
+ # else:
810
+ # break
811
+
812
+
813
+
814
+ # pdb.set_trace()
815
+ # nar_chunk_position = torch.arange(
816
+ # 0, self.chunk_size_limit).unsqueeze(0).repeat(
817
+ # accumu_num,
818
+ # 1).to(hidden_states.device) # bs * max_chunk_num
819
+
820
+ # nar_position_embeddings = self.rotary_emb(nat_attention_mask,
821
+ # nar_chunk_position)
822
+
823
+
824
+ # pdb.set_trace()
825
+
826
+ nat_input_embeddings = self.pos_encoder(nat_input_embeddings) # 加上绝对位置编码
827
+
828
+
829
+ self.decoder = self.decoder.to(dtype=nat_input_embeddings.dtype)
830
+
831
+
832
+ # 处理attention
833
+ mask_nat_attention_mask = self.nat_prepare_4d_full_attention_mask_without_causal(
834
+ attention_mask=nat_attention_mask,
835
+ dtype=nat_attention_mask.dtype,
836
+ device=nat_attention_mask.device)
837
+
838
+
839
+ # pdb.set_trace()
840
+ nar_hidden_states = self.decoder(
841
+ nat_input_embeddings,
842
+ attention_mask=mask_nat_attention_mask,
843
+ # attention_mask=None,
844
+ # position_embeddings=nar_position_embeddings,
845
+ position_embeddings=None, #使用绝对位置,不传相对位置
846
+ output_attentions=output_attentions,
847
+ use_cache=use_cache,
848
+ cache_position=None,
849
+ )
850
+
851
+ nar_hidden_states = self.norm(
852
+ nar_hidden_states) # bs * max_chunk_num * hidden_size
853
+
854
+
855
+ return ModelOutputWithPastForSemiNAT(
856
+ chunk_hidden_state=hidden_states,
857
+ length_ground_truth=length_ground_truth,
858
+ length_logits=length_logits,
859
+ position_embeddings=position_embeddings,
860
+ nar_hidden_state=nar_hidden_states,
861
+ past_key_values=next_cache,
862
+ hidden_states=all_hidden_states,
863
+ attentions=all_self_attns,
864
+ )
865
+ # @staticmethod
866
+ # def nat_prepare_4d_full_attention_mask_without_causal(
867
+ # self,
868
+ # attention_mask: torch.Tensor,
869
+ # dtype: torch.dtype,
870
+ # device: torch.device,
871
+ # ) -> torch.Tensor:
872
+ # """
873
+ # 构造一个非 causal 的 full attention mask,仅遮挡 padding token。
874
+
875
+ # Args:
876
+ # attention_mask (torch.Tensor): (batch_size, seq_len) 中 1 表示有效 token,0 表示 padding。
877
+ # dtype (torch.dtype): 生成的 mask 的数据类型(通常为 torch.float32/bfloat16)。
878
+ # device (torch.device): mask 所在设备。
879
+
880
+ # Returns:
881
+ # torch.Tensor: shape = (batch_size, 1, seq_len, seq_len),非 padding token 两两可见,padding 被遮挡。
882
+ # """
883
+ # if attention_mask.dim() != 2:
884
+ # raise ValueError("Expected 2D attention_mask of shape (batch_size, seq_len)")
885
+
886
+ # batch_size, seq_len = attention_mask.shape
887
+ # attention_mask = attention_mask.to(dtype=torch.float32) # 强制 float32 再做广播逻辑
888
+ # attention_mask = attention_mask.to(device)
889
+
890
+ # # outer product: only keep positions where both query and key are valid (1 * 1 = 1)
891
+ # visible_mask = attention_mask[:, None, :, None] * attention_mask[:, None, None, :] # (bs, 1, seq_len, seq_len)
892
+
893
+ # # 转为 additive mask:0 -> 0.0, 1 -> -inf(被遮住的位置是 -inf)
894
+ # min_dtype = torch.finfo(dtype).min
895
+ # full_attention_mask = (1.0 - visible_mask) * min_dtype # 有效区域是 0.0,其他是 -inf
896
+
897
+ # return full_attention_mask.to(dtype=dtype)
898
+
899
+
900
+ # def nat_prepare_4d_full_attention_mask_no_masking(
901
+ # self,
902
+ # attention_mask: torch.Tensor, # (bs, L),此处不会被使用
903
+ # dtype: torch.dtype, # torch.float32/bfloat16
904
+ # device: torch.device,
905
+ # mask_val: float = -1e4, # 不会被使用
906
+ # ) -> torch.Tensor:
907
+ # """
908
+ # 构造完全互看的 attention mask,包括 padding token。
909
+ # - 所有 query 可以看所有 key;
910
+ # - additive mask 全为 0(无任何遮挡);
911
+ # 返回 shape = (bs, 1, L, L)
912
+ # """
913
+ # if attention_mask.dim() != 2:
914
+ # raise ValueError(
915
+ # "Expected 2-D attention_mask with shape (batch, seq_len)")
916
+
917
+ # bs, L = attention_mask.shape
918
+ # additive_mask = torch.zeros((bs, 1, L, L), dtype=dtype,
919
+ # device=device) # 全 0,代表全可见
920
+
921
+ # return additive_mask
922
+
923
+
924
+ def repeat_with_limit_and_pad(self, x: torch.Tensor, repeat_counts: torch.Tensor, chunk_limit: int, skip_val: int = -100):
925
+ """
926
+ 对 x 中的每个位置复制若干次(最多 chunk_limit 次),不足则 padding,跳过 repeat=-100 的项。
927
+
928
+ 参数:
929
+ - x: Tensor of shape (bs, length, hidden)
930
+ - repeat_counts: Tensor of shape (bs, length),每个位置的复制次数,-100 表示跳过
931
+ - chunk_limit: int,每个位置最多复制的次数,不足则 padding
932
+ - skip_val: int,跳过标记值,默认 -100
933
+
934
+ 返回:
935
+ - Tensor of shape (chunk_num, chunk_limit, hidden)
936
+ """
937
+ bs, length, hidden = x.shape
938
+ device = x.device
939
+
940
+
941
+ x = x[:,:-1,:]
942
+ repeat_counts = repeat_counts[:,1:]
943
+
944
+ # Step 1: 展平 & 过滤有效位置
945
+ x_flat = x.reshape(-1, hidden) # (bs * length, hidden)
946
+ repeat_flat = repeat_counts.reshape(-1) # (bs * length,)
947
+
948
+ valid_mask = repeat_flat != skip_val
949
+ x_valid = x_flat[valid_mask] # (chunk_num, hidden)
950
+ repeat_valid = repeat_flat[valid_mask].clamp_max(chunk_limit) # (chunk_num,)
951
+
952
+ # Step 2: 扩展向量
953
+ # chunk_num = x_valid.size(0)
954
+ repeated = x_valid.unsqueeze(1).expand(-1, chunk_limit, -1) # (chunk_num, chunk_limit, hidden)
955
+
956
+ # Step 3: 构造 mask,并乘以 mask 进行 padding
957
+ range_k = torch.arange(chunk_limit, device=device).unsqueeze(0) # (1, chunk_limit)
958
+ mask = (range_k < repeat_valid.unsqueeze(1)).unsqueeze(-1) # (chunk_num, chunk_limit, 1)
959
+
960
+ # Step 4: 应用 mask,padding
961
+ out = repeated * mask # masked 填 0
962
+
963
+ mask = mask.squeeze(-1).to(x.dtype)
964
+ # pdb.set_trace()
965
+ return out, mask # shape: (chunk_num, chunk_limit, hidden)
966
+
967
+
968
+ # def build_slice_matrix(self, slice_pos: torch.Tensor) -> torch.Tensor:
969
+ # bs, num_slices = slice_pos.shape
970
+ # seq_len = num_slices
971
+
972
+ # # 替换 -1 为 0 用于 prev 计算
973
+ # slice_pos_clipped = slice_pos.clone()
974
+ # slice_pos_clipped[slice_pos_clipped == -1] = 0
975
+
976
+ # # prevs (a) 和 currents (b)
977
+ # prevs = torch.cat([
978
+ # torch.zeros((bs,1), device=slice_pos.device, dtype=slice_pos.dtype),
979
+ # slice_pos_clipped[:, :-1] + 1
980
+ # ], dim=1)
981
+ # currents = slice_pos_clipped + 1
982
+
983
+ # # valid mask
984
+ # valid_mask = (slice_pos != -1)
985
+ # lengths = currents - prevs # (bs, num_slices)
986
+ # lengths[lengths <= 0] = -100 # 将0元素替换为-100
987
+
988
+ # # 统计每行非-100元素个数
989
+ # slice_num = (lengths != -100).sum(dim=1).tolist() # 每行非-100元素个数
990
+
991
+ # # 生成chunk_mask
992
+ # chunk_mask = torch.zeros_like(lengths, dtype=torch.long)
993
+ # for i in range(lengths.size(0)):
994
+ # chunk_mask[i, :slice_num[i]] = 1 # 前slice_num[i]个元素置1
995
+ # values = torch.zeros_like(lengths, dtype=torch.float)
996
+ # values[valid_mask] = 1.0 / lengths[valid_mask]
997
+
998
+ # chunk_nums = valid_mask.sum(dim=1) # (bs,)
999
+ # max_chunk_num = chunk_nums.max().item()
1000
+
1001
+ # # 初始化输出
1002
+ # M = torch.zeros((bs, max_chunk_num, seq_len), device=slice_pos.device)
1003
+
1004
+ # # 遍历 batch 填充
1005
+ # for b in range(bs):
1006
+ # a_b = prevs[b] # (num_slices,)
1007
+ # b_b = currents[b] # (num_slices,)
1008
+ # v_b = values[b] # (num_slices,)
1009
+
1010
+ # for i in range(num_slices):
1011
+ # if not valid_mask[b, i]:
1012
+ # continue
1013
+ # a = a_b[i].item()
1014
+ # b_ = b_b[i].item()
1015
+ # if b_ > a:
1016
+ # M[b, i, a:b_] = v_b[i]
1017
+
1018
+ # return M, lengths, chunk_mask, slice_num
1019
+
1020
+ def build_slice_matrix(self, slice_pos: torch.Tensor):
1021
+ bs, num_slices = slice_pos.shape
1022
+ seq_len = num_slices
1023
+
1024
+ # 替换 -1 为 0 用于 prev 计算
1025
+ slice_pos_clipped = slice_pos.clone()
1026
+ slice_pos_clipped[slice_pos_clipped == -1] = 0
1027
+
1028
+ # prevs (a) 和 currents (b)
1029
+ prevs = torch.cat([
1030
+ torch.zeros((bs, 1), device=slice_pos.device, dtype=slice_pos.dtype),
1031
+ slice_pos_clipped[:, :-1] + 1
1032
+ ], dim=1)
1033
+ currents = slice_pos_clipped + 1
1034
+
1035
+ # valid mask
1036
+ valid_mask = (slice_pos != -1)
1037
+ lengths = currents - prevs # (bs, num_slices)
1038
+ lengths[lengths <= 0] = -100 # invalid values
1039
+
1040
+ # 每行非 -100 元素个数
1041
+ slice_num = (lengths != -100).sum(dim=1).tolist()
1042
+
1043
+ # chunk mask
1044
+ chunk_mask = torch.zeros_like(lengths, dtype=torch.long)
1045
+ for i in range(lengths.size(0)):
1046
+ chunk_mask[i, :slice_num[i]] = 1
1047
+ values = torch.zeros_like(lengths, dtype=torch.float)
1048
+ values[valid_mask] = 1.0 / lengths[valid_mask]
1049
+
1050
+ chunk_nums = valid_mask.sum(dim=1)
1051
+ max_chunk_num = chunk_nums.max().item()
1052
+
1053
+ # 初始化输出矩阵 M
1054
+ M = torch.zeros((bs, max_chunk_num, seq_len), device=slice_pos.device)
1055
+
1056
+ # 初始化 attention mask (bs, seq_len, seq_len),默认全部 mask 掉(True)
1057
+ attn_mask = torch.ones((bs, 1, seq_len, seq_len), dtype=torch.bool, device=slice_pos.device)
1058
+
1059
+ # 遍历填充 M 和 attention mask
1060
+ for b in range(bs):
1061
+ a_b = prevs[b]
1062
+ b_b = currents[b]
1063
+ v_b = values[b]
1064
+
1065
+ for i in range(num_slices):
1066
+ if not valid_mask[b, i]:
1067
+ continue
1068
+ a = a_b[i].item()
1069
+ b_ = b_b[i].item()
1070
+ if b_ > a:
1071
+ # 填充 chunk average matrix
1072
+ M[b, i, a:b_] = v_b[i]
1073
+ # 更新 attention mask,chunk 内不 mask(False)
1074
+ attn_mask[b, :, a:b_, a:b_] = False
1075
+ # pdb.set_trace()
1076
+ return M, attn_mask, lengths, chunk_mask, slice_num
1077
+
1078
+
1079
+ def nat_prepare_4d_full_attention_mask_without_causal(
1080
+ self,
1081
+ attention_mask: torch.Tensor, # (bs, L) 1=real, 0=pad
1082
+ dtype: torch.dtype, # torch.float32/bfloat16
1083
+ device: torch.device,
1084
+ mask_val: float = -1e4, # additive mask的遮挡值
1085
+ ) -> torch.Tensor:
1086
+ """
1087
+ - 对于 query 为有效 token (attention_mask==1) 的行:
1088
+ 仅允许观看 key 也是有效 token 的列 -> 完全互看
1089
+ - 对于 query 为 padding 的行:
1090
+ 采用 causal 下三角 (j <= i) -> 避免整行 -inf
1091
+ 返回 shape = (bs, 1, L, L) 的 additive mask
1092
+ """
1093
+ if attention_mask.dim() != 2:
1094
+ raise ValueError(
1095
+ "Expected 2-D attention_mask with shape (batch, seq_len)"
1096
+ )
1097
+
1098
+ bs, L = attention_mask.shape
1099
+ attn_mask_f = attention_mask.to(device=device, dtype=torch.float32) # 方便广播
1100
+
1101
+ # ---------- ① 有效 token 间的互看 ----------
1102
+ # valid2valid[b,i,j] = 1 ⇔ query_i 与 key_j 均为 real
1103
+ valid2valid = attn_mask_f[:, :, None] * attn_mask_f[:, None, :] # (bs, L, L)
1104
+
1105
+ # ---------- ② padding 行的因果下三角 ----------
1106
+ # lower_tri[i,j] = 1 ⇔ j ≤ i
1107
+ lower_tri = torch.tril(torch.ones(L, L, device=device))
1108
+ # query_is_pad: (bs, L, 1) 1=pad
1109
+ query_is_pad = (1.0 - attn_mask_f)[:, :, None]
1110
+ causal_part = query_is_pad * lower_tri # (bs, L, L)
1111
+
1112
+ # ---------- ③ 合并两部分 ----------
1113
+ visible = torch.clamp(valid2valid + causal_part, 0.0, 1.0) # (bs, L, L)
1114
+
1115
+ # ---------- ④ 变 additive mask ----------
1116
+ additive_mask = (1.0 - visible) * mask_val # 0->0, 1->mask_val
1117
+ additive_mask = additive_mask[:, None, :, :] # (bs,1,L,L)
1118
+
1119
+ return additive_mask.to(dtype=dtype)
1120
+
1121
+
1122
+
1123
+ def compute_chunk_lengths(slice_pos: torch.Tensor, pad_value: int = -100):
1124
+ """
1125
+ Args:
1126
+ slice_pos: [B, L] 切分点,表示当前位置的 token 后面切一刀,-1 表示 padding
1127
+ Returns:
1128
+ length_gt: [B, max_chunk_num], 每个 chunk 的长度,不足部分填 pad_value
1129
+ """
1130
+ B, L = slice_pos.shape
1131
+ device = slice_pos.device
1132
+
1133
+ length_ground_truth = []
1134
+
1135
+ for b in range(B):
1136
+ pos = slice_pos[b]
1137
+ pos = pos[pos != -1] + 1 # 获取有效切分点并 +1(实际切在后面)
1138
+ cuts = torch.cat([
1139
+ torch.tensor([0], device=device), # 起始点
1140
+ pos,
1141
+ ])
1142
+ lens = cuts[1:] - cuts[:-1] # 计算每段长度
1143
+
1144
+ # 补齐到 max_chunk_num(L)
1145
+ padded = torch.full((L,), pad_value, device=device, dtype=torch.long)
1146
+ padded[:lens.shape[0]] = lens
1147
+ length_ground_truth.append(padded)
1148
+
1149
+ return torch.stack(length_ground_truth) # [B, L]
1150
+
1151
+
1152
+
1153
+
1154
+ class Olmo2ForCausalLMForSemiNAT(Olmo2ForCausalLM):
1155
+
1156
+ def __init__(self, config, *args, **kwargs):
1157
+ super().__init__(config, *args, **kwargs)
1158
+ self.pos_encoder = AbsolutePositionalEncoding(config.hidden_size)
1159
+ self.config = config
1160
+ self.padding_idx = config.pad_token_id
1161
+ self.vocab_size = config.vocab_size
1162
+
1163
+ self.chunk_size_limit = config.chunk_size_limit
1164
+ self.model = Olmo2ModelForSemiNAT(config)
1165
+ self.vocab_size = config.vocab_size
1166
+ self.lm_head = nn.Linear(config.hidden_size,
1167
+ config.vocab_size,
1168
+ bias=False)
1169
+
1170
+ # Initialize weights and apply final processing
1171
+ self.post_init()
1172
+
1173
+ def forward(
1174
+ self,
1175
+ input_ids: torch.LongTensor = None,
1176
+ attention_mask: Optional[torch.Tensor] = None,
1177
+ position_ids: Optional[torch.LongTensor] = None,
1178
+ slice_pos: Optional[torch.Tensor] = None,
1179
+ slice_label: Optional[torch.Tensor] = None,
1180
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1181
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1182
+ labels: Optional[torch.LongTensor] = None,
1183
+ use_cache: Optional[bool] = None,
1184
+ output_attentions: Optional[bool] = None,
1185
+ output_hidden_states: Optional[bool] = None,
1186
+ return_dict: Optional[bool] = None,
1187
+ cache_position: Optional[torch.LongTensor] = None,
1188
+ logits_to_keep: Union[int, torch.Tensor] = 0,
1189
+ # padding: Optional[torch.Tensor] = None,
1190
+ **kwargs: Unpack[KwargsForCausalLM],
1191
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
1192
+
1193
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1194
+ output_hidden_states = (output_hidden_states
1195
+ if output_hidden_states is not None else
1196
+ self.config.output_hidden_states)
1197
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1198
+
1199
+ # pdb.set_trace()
1200
+
1201
+ # start = time.time()
1202
+
1203
+
1204
+ if labels is not None:
1205
+ outputs = self.model(
1206
+ input_ids=input_ids, # bs * length
1207
+ attention_mask=attention_mask, # bs * length
1208
+ position_ids=position_ids,
1209
+ slice_pos=slice_pos,
1210
+ past_key_values=past_key_values,
1211
+ inputs_embeds=inputs_embeds,
1212
+ use_cache=use_cache,
1213
+ output_attentions=output_attentions,
1214
+ output_hidden_states=output_hidden_states,
1215
+ return_dict=return_dict,
1216
+ cache_position=cache_position,
1217
+ padding=self.padding_idx,
1218
+ **kwargs,
1219
+ )
1220
+ else:
1221
+ outputs = self.model(
1222
+ input_ids=input_ids, # bs * length
1223
+ attention_mask=attention_mask, # bs * length
1224
+ position_ids=position_ids,
1225
+ slice_pos=slice_pos,
1226
+ past_key_values=past_key_values,
1227
+ inputs_embeds=inputs_embeds,
1228
+ use_cache=use_cache,
1229
+ output_attentions=output_attentions,
1230
+ output_hidden_states=output_hidden_states,
1231
+ return_dict=return_dict,
1232
+ cache_position=cache_position,
1233
+ padding=self.padding_idx,
1234
+ inference=True,
1235
+ )
1236
+
1237
+ # end1 = time.time()
1238
+ # print(f"end1 time: {end1 - start:.4f} 秒")
1239
+
1240
+
1241
+
1242
+ # pdb.set_trace()
1243
+
1244
+
1245
+
1246
+ chunk_hidden_states = outputs.chunk_hidden_state
1247
+ bs, length, hidden_size = chunk_hidden_states.size()
1248
+
1249
+
1250
+ ############################# loss 计算,分两部分 #############################
1251
+ loss = None
1252
+ loss1 = None
1253
+ loss2 = None
1254
+ ############################# 首先, 接上mlp,预测长度的loss,维度是10#############################
1255
+
1256
+ if labels is not None:
1257
+
1258
+ length_ground_truth = outputs.length_ground_truth
1259
+ length_logits = outputs.length_logits
1260
+
1261
+ new_length_ground_truth = torch.where(
1262
+ length_ground_truth != -100, # 条件:不等于 -100
1263
+ length_ground_truth - 1, # 如果条件为真,执行 labels - 1
1264
+ length_ground_truth # 否则保持原值
1265
+ )
1266
+
1267
+ # pdb.set_trace()
1268
+
1269
+ shift_length_logits = length_logits[:, :-1, :]
1270
+ shift_new_length_ground_truth = new_length_ground_truth[:, 1:]
1271
+
1272
+ logits_flat = shift_length_logits.reshape(
1273
+ -1,
1274
+ self.chunk_size_limit) # 形状变为 [bs * length, chunk_size_limit]
1275
+ labels_flat = shift_new_length_ground_truth.reshape(
1276
+ -1) # [bs * length]
1277
+
1278
+ # softmax logits to get probability
1279
+ logits_flat = torch.nn.functional.softmax(logits_flat, dim=-1)
1280
+
1281
+ # 修改 loss 为 MSE: 首先根据 logits 加权得到预测长度(注意不是 argmax),之后与 label 计算 MSE
1282
+
1283
+ # pdb.set_trace()
1284
+ # 计算预测长度
1285
+ predicted_lengths = torch.sum(
1286
+ logits_flat * torch.arange(self.chunk_size_limit).to(
1287
+ chunk_hidden_states.device).to(chunk_hidden_states.dtype),
1288
+ dim=1)
1289
+ # 计算预测长度与真实长度之间的均方误差
1290
+
1291
+
1292
+
1293
+
1294
+ shift_slice_label = slice_label[:, 1:length_logits.size(1)] #用最大chunk数阶段
1295
+ slice_label_flat = shift_slice_label.reshape(-1)
1296
+
1297
+ # 对应 labels_flat 的 global indices
1298
+ indices = torch.arange(0, labels_flat.size(0), device=labels_flat.device)
1299
+ mask = (slice_label_flat == -1)
1300
+
1301
+ # pdb.set_trace()
1302
+ # labels_not_ignored = (labels_flat[indices] != -100)
1303
+ # final_mask = mask & labels_not_ignored
1304
+ labels_flat[indices[mask]] = -100
1305
+
1306
+
1307
+
1308
+
1309
+ loss1 = torch.mean((predicted_lengths[labels_flat != -100] -
1310
+ labels_flat[labels_flat != -100].float())**2)
1311
+
1312
+ # pdb.set_trace()
1313
+
1314
+ nar_hidden_state = outputs.nar_hidden_state
1315
+
1316
+ ############################# 其次,用chunk的hidden recover所有token,跟gt计算loss #############################
1317
+
1318
+ nar_labels = torch.full(
1319
+ (nar_hidden_state.size(0), nar_hidden_state.size(1)),
1320
+ -100).to(nar_hidden_state.device) # bs * length
1321
+
1322
+ nar_labels = self.update_nar_labels(nar_labels, labels, slice_pos,
1323
+ length_ground_truth, input_ids,
1324
+ self.chunk_size_limit)
1325
+
1326
+ # Only compute necessary logits, and do not upcast them to float if we are not computing the loss
1327
+ slice_indices = slice(-logits_to_keep, None) if isinstance(
1328
+ logits_to_keep, int) else logits_to_keep
1329
+ logits = self.lm_head(
1330
+ nar_hidden_state[:, slice_indices, :]) # 1* seq_len * 50304
1331
+ # logits = logits.float()
1332
+ # pdb.set_trace()
1333
+ # if labels is not None:
1334
+
1335
+
1336
+ loss2 = self.loss_function_seminat(
1337
+ logits,
1338
+ nar_labels,
1339
+ self.vocab_size,
1340
+ )
1341
+
1342
+ # grad1 = torch.autograd.grad(loss1, outputs.chunk_hidden_state, retain_graph=True)[0]
1343
+ # grad2 = torch.autograd.grad(loss2, outputs.chunk_hidden_state, retain_graph=True)[0]
1344
+ # cos_sim = cosine_similarity(grad1.flatten(), grad2.flatten(), dim=0)
1345
+
1346
+
1347
+ # pdb.set_trace()
1348
+
1349
+
1350
+
1351
+ else: # for inference
1352
+ softmaxed = torch.softmax(outputs.length_logits[:, -1, :], dim=-1)
1353
+ length = torch.argmax(softmaxed, dim=-1).item() + 1
1354
+ # pdb.set_trace()
1355
+
1356
+ # nat_input_embeddings = torch.zeros(
1357
+ # 1, self.chunk_size_limit, hidden_size).to(input_ids.device).to(
1358
+ # outputs.chunk_hidden_state.dtype)
1359
+
1360
+ nat_input_embeddings = torch.zeros(
1361
+ 1, length, hidden_size).to(input_ids.device).to(
1362
+ outputs.chunk_hidden_state.dtype)
1363
+ nat_attention_mask = torch.zeros(1, self.chunk_size_limit).to(
1364
+ input_ids.device).to(outputs.chunk_hidden_state.dtype)
1365
+
1366
+
1367
+ # pdb.set_trace()
1368
+
1369
+ nat_input_embeddings[:, :
1370
+ length, :] = outputs.chunk_hidden_state[:, -1, :].expand(
1371
+ length, -1).to(input_ids.device).to(
1372
+ outputs.chunk_hidden_state.dtype)
1373
+
1374
+ nat_attention_mask[:, :length] = torch.tensor([1] * length).to(
1375
+ input_ids.device).to(outputs.chunk_hidden_state.dtype)
1376
+
1377
+ nar_chunk_position = torch.arange(
1378
+ 0, self.chunk_size_limit).unsqueeze(0).to(input_ids.device).to(
1379
+ outputs.chunk_hidden_state.dtype) # bs * max_chunk_num
1380
+
1381
+ # nar_position_embeddings = self.pos_encoder(nat_attention_mask,
1382
+ # nar_chunk_position)
1383
+
1384
+ # pdb.set_trace()
1385
+ nat_input_embeddings = self.pos_encoder(nat_input_embeddings) # 加上绝对位置编码
1386
+
1387
+ # pdb.set_trace()
1388
+ nar_hidden_states = self.model.decoder(
1389
+ nat_input_embeddings,
1390
+ # attention_mask=nat_attention_mask,
1391
+ attention_mask=None,
1392
+ # position_embeddings=nar_position_embeddings,
1393
+ position_embeddings=None,
1394
+ output_attentions=output_attentions,
1395
+ use_cache=False,
1396
+ cache_position=None,
1397
+ )
1398
+
1399
+ nar_hidden_states = self.model.norm(nar_hidden_states)
1400
+ # slice_indices = slice(-logits_to_keep, None) if isinstance(
1401
+ # logits_to_keep, int) else logits_to_keep
1402
+ logits = self.lm_head(
1403
+ nar_hidden_states[:, :, :])
1404
+
1405
+ # end2 = time.time()
1406
+ # print(f"end2 time: {end2 - end1:.4f} 秒")
1407
+
1408
+ # pdb.set_trace()
1409
+ return CausalLMOutputWithPast(
1410
+ loss=(loss1, loss2),
1411
+ logits=logits,
1412
+ past_key_values=outputs.past_key_values,
1413
+ hidden_states=outputs.hidden_states,
1414
+ attentions=outputs.attentions,
1415
+ )
1416
+
1417
+ ############################# loss 计算,分两部分 #############################
1418
+
1419
+ # if not return_dict:
1420
+ # output = (logits, ) + outputs[1:]
1421
+ # if output_router_logits:
1422
+ # output = (aux_loss, ) + output
1423
+ # return (loss, ) + output if loss is not None else output
1424
+ # pdb.set_trace()
1425
+ return CausalLMOutputWithPast(
1426
+ loss=(loss1, loss2),
1427
+ logits=logits,
1428
+ past_key_values=outputs.past_key_values,
1429
+ hidden_states=outputs.hidden_states,
1430
+ attentions=outputs.attentions,
1431
+ )
1432
+
1433
+
1434
+
1435
+
1436
+
1437
+
1438
+ def update_nar_labels(self, nar_labels, labels, slice_pos,
1439
+ length_ground_truth, input_ids, chunk_size_limit):
1440
+ bs, length = input_ids.size()
1441
+ chunk = 0
1442
+ for b in range(bs):
1443
+ last_cut = slice_pos[b][0] #第一次切分位置
1444
+ for i in range(1, length):
1445
+ if slice_pos[b, i] != -1:
1446
+ # pdb.set_trace()
1447
+ try:
1448
+ nar_labels[chunk, :length_ground_truth[b, i]] = labels[
1449
+ b, last_cut + 1:slice_pos[b, i] + 1]
1450
+ except:
1451
+ pdb.set_trace()
1452
+ last_cut = slice_pos[b, i]
1453
+ chunk += 1
1454
+ else:
1455
+ break
1456
+ # pdb.set_trace()
1457
+ return nar_labels
1458
+
1459
+ def fixed_cross_entropy(self,
1460
+ source,
1461
+ target,
1462
+ num_items_in_batch: int = None,
1463
+ ignore_index: int = -100,
1464
+ **kwargs):
1465
+ reduction = "sum" if num_items_in_batch is not None else "mean"
1466
+ loss = F.cross_entropy(source,
1467
+ target,
1468
+ ignore_index=ignore_index,
1469
+ reduction=reduction)
1470
+ if torch.isnan(loss):
1471
+ # print(f"Step {global_step}: loss is NaN, entering pdb …")
1472
+ pdb.set_trace()
1473
+ # pdb.set_trace()
1474
+ if reduction == "sum":
1475
+ loss = loss / num_items_in_batch
1476
+ return loss
1477
+
1478
+ def loss_function_seminat(self,
1479
+ logits,
1480
+ labels,
1481
+ vocab_size: int,
1482
+ num_items_in_batch: int = None,
1483
+ ignore_index: int = -100,
1484
+ **kwargs):
1485
+ # logits: (B, L, V)
1486
+ # labels: (B, L)
1487
+
1488
+
1489
+ logits = logits.float()
1490
+ labels = labels.to(logits.device)
1491
+
1492
+ # Flatten the tokens (无 shift)
1493
+ logits = logits.view(-1, vocab_size) # (B*L, V)
1494
+ labels = labels.view(-1) # (B*L)
1495
+
1496
+ # Ensure device alignment
1497
+ labels = labels.to(logits.device)
1498
+
1499
+ # Compute loss
1500
+ loss = self.fixed_cross_entropy(logits, labels, num_items_in_batch,
1501
+ ignore_index, **kwargs)
1502
+ return loss
1503
+
1504
+ def generate(
1505
+ self,
1506
+ inputs: Optional[torch.Tensor] = None,
1507
+ generation_config: Optional[GenerationConfig] = None,
1508
+ logits_processor: Optional[LogitsProcessorList] = None,
1509
+ stopping_criteria: Optional[StoppingCriteriaList] = None,
1510
+ prefix_allowed_tokens_fn: Optional[Callable[[int, torch.Tensor],
1511
+ List[int]]] = None,
1512
+ synced_gpus: Optional[bool] = None,
1513
+ assistant_model: Optional["PreTrainedModel"] = None,
1514
+ streamer: Optional["BaseStreamer"] = None,
1515
+ negative_prompt_ids: Optional[torch.Tensor] = None,
1516
+ negative_prompt_attention_mask: Optional[torch.Tensor] = None,
1517
+ prefilling_length: int = 0,
1518
+ **kwargs,
1519
+ ) -> Union[GenerateOutput, torch.LongTensor]:
1520
+
1521
+ # 1. Handle `generation_config` and kwargs that might update it, and validate the `.generate()` call
1522
+ self._validate_model_class()
1523
+ tokenizer = kwargs.pop(
1524
+ "tokenizer",
1525
+ None) # Pull this out first, we only use it for stopping criteria
1526
+ assistant_tokenizer = kwargs.pop(
1527
+ "assistant_tokenizer", None) # only used for assisted generation
1528
+
1529
+ generation_config, model_kwargs = self._prepare_generation_config(
1530
+ generation_config, **kwargs)
1531
+
1532
+ # GenerationConfig {
1533
+ # "eos_token_id": 50279,
1534
+ # "max_length": 2048,
1535
+ # "pad_token_id": 1
1536
+ # }
1537
+
1538
+ self._validate_model_kwargs(model_kwargs.copy())
1539
+ self._validate_assistant(assistant_model, tokenizer,
1540
+ assistant_tokenizer)
1541
+
1542
+ # 2. Set generation parameters if not already defined
1543
+ # 判断是否在多GPU环境下同步生成(如DeepSpeed ZeRO-3或FSDP)
1544
+ if synced_gpus is None:
1545
+ synced_gpus = (
1546
+ is_deepspeed_zero3_enabled()
1547
+ or is_fsdp_managed_module(self)) and dist.get_world_size() > 1
1548
+
1549
+ # 初始化logits处理器和停止条件
1550
+ logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList(
1551
+ ) # 定义对模型输出logits的修改规则(如禁止重复词、强制特定token等)。
1552
+ stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList(
1553
+ ) # 定义生成停止条件(如达到最大长度、检测到终止符等)。
1554
+
1555
+ accepts_attention_mask = "attention_mask" in set(
1556
+ inspect.signature(self.forward).parameters.keys()) # True
1557
+ requires_attention_mask = "encoder_outputs" not in model_kwargs # True
1558
+ kwargs_has_attention_mask = model_kwargs.get("attention_mask",
1559
+ None) is not None # False
1560
+
1561
+ # pdb.set_trace()
1562
+
1563
+ # 3. Define model inputs
1564
+ inputs_tensor, model_input_name, model_kwargs = self._prepare_model_inputs(
1565
+ inputs, generation_config.bos_token_id, model_kwargs)
1566
+ batch_size = inputs_tensor.shape[0]
1567
+
1568
+ # inputs_tensor bs * input_length; model_input_name:"input_ids";
1569
+
1570
+ device = inputs_tensor.device
1571
+ self._prepare_special_tokens(generation_config,
1572
+ kwargs_has_attention_mask,
1573
+ device=device)
1574
+
1575
+ # decoder-only models must use left-padding for batched generation.
1576
+ # batch generation用的
1577
+ if not self.config.is_encoder_decoder and not is_torchdynamo_compiling(
1578
+ ):
1579
+ # If `input_ids` was given, check if the last id in any sequence is `pad_token_id`
1580
+ # Note: If using, `inputs_embeds` this check does not work, because we want to be more hands-off.
1581
+ if (generation_config._pad_token_tensor is not None
1582
+ and batch_size > 1 and len(inputs_tensor.shape) == 2
1583
+ and torch.sum(inputs_tensor[:, -1] ==
1584
+ generation_config._pad_token_tensor) > 0):
1585
+ logger.warning(
1586
+ "A decoder-only architecture is being used, but right-padding was detected! For correct "
1587
+ "generation results, please set `padding_side='left'` when initializing the tokenizer."
1588
+ )
1589
+ # pdb.set_trace()
1590
+ # 4. Define other model kwargs
1591
+ # decoder-only models with inputs_embeds forwarding must use caching (otherwise we can't detect whether we are
1592
+ # generating the first new token or not, and we only want to use the embeddings for the first new token)
1593
+ if not self.config.is_encoder_decoder and model_input_name == "inputs_embeds":
1594
+ generation_config.use_cache = True
1595
+ # 生成第一个新token时需要依赖缓存判断是否处于生成阶段,后续token生成依赖缓存加速。
1596
+
1597
+ # 生成attention mask
1598
+ if not kwargs_has_attention_mask and requires_attention_mask and accepts_attention_mask:
1599
+ model_kwargs[
1600
+ "attention_mask"] = self._prepare_attention_mask_for_generation(
1601
+ inputs_tensor, generation_config, model_kwargs)
1602
+
1603
+ # 输入了attention,检查一下对不对
1604
+ elif kwargs_has_attention_mask:
1605
+ # TODO (joao): generalize this check with other types of inputs
1606
+ if model_input_name == "input_ids" and len(
1607
+ model_kwargs["attention_mask"].shape) > 2:
1608
+ raise ValueError(
1609
+ "`attention_mask` passed to `generate` must be 2D.")
1610
+
1611
+ # encoder-decoder model设定
1612
+ if self.config.is_encoder_decoder and "encoder_outputs" not in model_kwargs:
1613
+ # if model is encoder decoder encoder_outputs are created and added to `model_kwargs`
1614
+ model_kwargs = self._prepare_encoder_decoder_kwargs_for_generation(
1615
+ inputs_tensor, model_kwargs, model_input_name,
1616
+ generation_config)
1617
+
1618
+ # 5. Prepare `input_ids` which will be used for auto-regressive generation
1619
+ # encoder-decoder model
1620
+ if self.config.is_encoder_decoder:
1621
+ input_ids, model_kwargs = self._prepare_decoder_input_ids_for_generation(
1622
+ batch_size=batch_size,
1623
+ model_input_name=model_input_name,
1624
+ model_kwargs=model_kwargs,
1625
+ decoder_start_token_id=generation_config.
1626
+ _decoder_start_token_tensor,
1627
+ device=inputs_tensor.device,
1628
+ )
1629
+ else:
1630
+ input_ids = inputs_tensor if model_input_name == "input_ids" else model_kwargs.pop(
1631
+ "input_ids") # torch.Size([1, 25]) # torch.Size([1, 25])
1632
+
1633
+ # 修复不完整的token
1634
+ if generation_config.token_healing:
1635
+ input_ids = self.heal_tokens(input_ids, tokenizer)
1636
+
1637
+ # 流式输出
1638
+ if streamer is not None:
1639
+ streamer.put(input_ids.cpu())
1640
+
1641
+ # pdb.set_trace()
1642
+
1643
+ # 6. Prepare `max_length` depending on other stopping criteria.
1644
+ input_ids_length = input_ids.shape[-1]
1645
+ has_default_max_length = kwargs.get(
1646
+ "max_length") is None and generation_config.max_length is not None
1647
+ has_default_min_length = kwargs.get(
1648
+ "min_length") is None and generation_config.min_length is not None
1649
+ # min_length是0
1650
+
1651
+ # 生成的一些config
1652
+ generation_config = self._prepare_generated_length(
1653
+ generation_config=generation_config,
1654
+ has_default_max_length=has_default_max_length,
1655
+ has_default_min_length=has_default_min_length,
1656
+ model_input_name=model_input_name, # "input_ids"
1657
+ inputs_tensor=inputs_tensor,
1658
+ input_ids_length=input_ids_length, #输入长度
1659
+ )
1660
+
1661
+ # If the model supports `logits_to_keep` in forward(), set it to 1 to avoid computing the whole
1662
+ # logit matrix. This can save a lot of memory during the first forward pass. Note that assisted decoding
1663
+ # dynamically overrides this value as it can need more than the last token logits
1664
+ if self._supports_logits_to_keep(
1665
+ ) and "logits_to_keep" not in model_kwargs:
1666
+ model_kwargs["logits_to_keep"] = 1
1667
+ # 模型在计算时仅保留最后一个 token 的 logits,而非整个词汇表的 logits,从而大幅降低内存占用。若使用束搜索宽度为 5,辅助解码会覆盖 logits_to_keep=5,保留多个候选 token 的 logits 以支持多路径探索。
1668
+
1669
+ # 检查生成长度
1670
+ self._validate_generated_length(generation_config, input_ids_length,
1671
+ has_default_max_length)
1672
+
1673
+ # 7. Prepare the cache.
1674
+ # - `model_kwargs` may be updated in place with a cache as defined by the parameters in `generation_config`.
1675
+ # - different models have a different cache name expected by the model (default = "past_key_values")
1676
+ # - `max_length`, prepared above, is used to determine the maximum cache length
1677
+ max_cache_length = generation_config.max_length - 1 #存最长length-1个token cache
1678
+
1679
+ # 如果输入是emb
1680
+ if (inputs_tensor.shape[1] != input_ids_length
1681
+ and model_input_name == "inputs_embeds"
1682
+ and not self.config.is_encoder_decoder):
1683
+ max_cache_length += inputs_tensor.shape[1]
1684
+ self._prepare_cache_for_generation(generation_config, model_kwargs,
1685
+ assistant_model, batch_size,
1686
+ max_cache_length, device)
1687
+
1688
+ # 8. determine generation mode
1689
+ generation_mode = generation_config.get_generation_mode(
1690
+ assistant_model) # 辅助解码
1691
+
1692
+ if streamer is not None and (generation_config.num_beams > 1):
1693
+ raise ValueError(
1694
+ "`streamer` cannot be used with beam search (yet!). Make sure that `num_beams` is set to 1."
1695
+ )
1696
+
1697
+ # device检查
1698
+ if not is_torchdynamo_compiling(
1699
+ ) and self.device.type != input_ids.device.type:
1700
+ warnings.warn(
1701
+ "You are calling .generate() with the `input_ids` being on a device type different"
1702
+ f" than your model's device. `input_ids` is on {input_ids.device.type}, whereas the model"
1703
+ f" is on {self.device.type}. You may experience unexpected behaviors or slower generation."
1704
+ " Please make sure that you have put `input_ids` to the"
1705
+ f" correct device by calling for example input_ids = input_ids.to('{self.device.type}') before"
1706
+ " running `.generate()`.",
1707
+ UserWarning,
1708
+ )
1709
+
1710
+ # pdb.set_trace()
1711
+
1712
+ # 9. prepare logits processors and stopping criteria
1713
+ prepared_logits_processor = self._get_logits_processor(
1714
+ generation_config=generation_config,
1715
+ input_ids_seq_length=input_ids_length,
1716
+ encoder_input_ids=inputs_tensor,
1717
+ prefix_allowed_tokens_fn=prefix_allowed_tokens_fn,
1718
+ logits_processor=logits_processor,
1719
+ device=inputs_tensor.device,
1720
+ model_kwargs=model_kwargs,
1721
+ negative_prompt_ids=negative_prompt_ids,
1722
+ negative_prompt_attention_mask=negative_prompt_attention_mask,
1723
+ )
1724
+ prepared_stopping_criteria = self._get_stopping_criteria_for_seminat(
1725
+ generation_config=generation_config,
1726
+ stopping_criteria=stopping_criteria,
1727
+ tokenizer=tokenizer,
1728
+ **kwargs)
1729
+
1730
+ # Set model_kwargs `use_cache` so we can use it later in forward runs
1731
+ model_kwargs["use_cache"] = generation_config.use_cache
1732
+
1733
+ input_ids, model_kwargs = self._expand_inputs_for_generation(
1734
+ input_ids=input_ids,
1735
+ expand_size=generation_config.num_return_sequences, # 1
1736
+ is_encoder_decoder=self.config.is_encoder_decoder, # false
1737
+ **model_kwargs,
1738
+ )
1739
+
1740
+
1741
+ pdb.set_trace()
1742
+ result = self._sampleforseminat(
1743
+ input_ids,
1744
+ logits_processor=prepared_logits_processor,
1745
+ stopping_criteria=prepared_stopping_criteria,
1746
+ generation_config=generation_config,
1747
+ synced_gpus=synced_gpus,
1748
+ streamer=streamer,
1749
+ prefilling_length=prefilling_length,
1750
+ **model_kwargs,
1751
+ )
1752
+
1753
+ # Convert to legacy cache format if requested
1754
+ if (generation_config.return_legacy_cache is True
1755
+ and not is_torchdynamo_compiling()
1756
+ and hasattr(result, "past_key_values") and getattr(
1757
+ result.past_key_values, "to_legacy_cache") is not None):
1758
+ result.past_key_values = result.past_key_values.to_legacy_cache()
1759
+ return result
1760
+
1761
+ def _get_stopping_criteria_for_seminat(
1762
+ self,
1763
+ generation_config: GenerationConfig,
1764
+ stopping_criteria: Optional[StoppingCriteriaList],
1765
+ tokenizer: Optional["PreTrainedTokenizerBase"] = None,
1766
+ **kwargs,
1767
+ ) -> StoppingCriteriaList:
1768
+ criteria = StoppingCriteriaList()
1769
+ if generation_config.max_length is not None:
1770
+ max_position_embeddings = getattr(self.config, "max_position_embeddings", None)
1771
+ criteria.append(
1772
+ MaxLengthCriteria(
1773
+ max_length=generation_config.max_length,
1774
+ max_position_embeddings=max_position_embeddings,
1775
+ )
1776
+ )
1777
+ if generation_config.max_time is not None:
1778
+ criteria.append(MaxTimeCriteria(max_time=generation_config.max_time))
1779
+ if generation_config.stop_strings is not None:
1780
+ if tokenizer is None:
1781
+ raise ValueError(
1782
+ "There are one or more stop strings, either in the arguments to `generate` or in the "
1783
+ "model's generation config, but we could not locate a tokenizer. When generating with "
1784
+ "stop strings, you must pass the model's tokenizer to the `tokenizer` argument of `generate`."
1785
+ )
1786
+ criteria.append(StopStringCriteria(stop_strings=generation_config.stop_strings, tokenizer=tokenizer))
1787
+ if generation_config._eos_token_tensor is not None:
1788
+ criteria.append(EosTokenCriteriaForSemiNAT(eos_token_id=generation_config._eos_token_tensor))
1789
+ if (
1790
+ generation_config.is_assistant
1791
+ and generation_config.assistant_confidence_threshold is not None
1792
+ and generation_config.assistant_confidence_threshold > 0
1793
+ ):
1794
+ criteria.append(
1795
+ ConfidenceCriteria(assistant_confidence_threshold=generation_config.assistant_confidence_threshold)
1796
+ )
1797
+ criteria = self._merge_criteria_processor_list(criteria, stopping_criteria)
1798
+ return criteria
1799
+
1800
+
1801
+ def _sampleforseminat(
1802
+ self,
1803
+ input_ids: torch.LongTensor,
1804
+ logits_processor: LogitsProcessorList,
1805
+ stopping_criteria: StoppingCriteriaList,
1806
+ generation_config: GenerationConfig,
1807
+ synced_gpus: bool,
1808
+ streamer: Optional["BaseStreamer"],
1809
+ prefilling_length: int,
1810
+ **model_kwargs,
1811
+ ) -> Union[GenerateNonBeamOutput, torch.LongTensor]:
1812
+
1813
+ # init values
1814
+ pad_token_id = generation_config._pad_token_tensor # 获取填充token的ID
1815
+ output_attentions = generation_config.output_attentions # 是否输出注意力权重
1816
+ output_hidden_states = generation_config.output_hidden_states # 是否输出隐藏状态
1817
+ output_scores = generation_config.output_scores # 是否输出分数
1818
+ output_logits = generation_config.output_logits # 是否输出原始logits
1819
+ return_dict_in_generate = generation_config.return_dict_in_generate # 是否返回结构化字典
1820
+ max_length = generation_config.max_length # 最大生成长度
1821
+ has_eos_stopping_criteria = any(
1822
+ hasattr(criteria, "eos_token_id")
1823
+ for criteria in stopping_criteria) # 检查停止条件是否包含EOS token
1824
+ do_sample = generation_config.do_sample # 是否使用采样方法
1825
+
1826
+ # 初始化结果收集容器
1827
+ # init attention / hidden states / scores tuples
1828
+ scores = () if (return_dict_in_generate and output_scores) else None
1829
+ raw_logits = () if (return_dict_in_generate
1830
+ and output_logits) else None
1831
+ decoder_attentions = () if (return_dict_in_generate
1832
+ and output_attentions) else None
1833
+ cross_attentions = () if (return_dict_in_generate
1834
+ and output_attentions) else None
1835
+ decoder_hidden_states = () if (return_dict_in_generate
1836
+ and output_hidden_states) else None
1837
+
1838
+ # # 编码器-解码器模型特殊处理 不用管
1839
+ # if model is an encoder-decoder, retrieve encoder attention weights and hidden states
1840
+ if return_dict_in_generate and self.config.is_encoder_decoder:
1841
+ encoder_attentions = model_kwargs["encoder_outputs"].get(
1842
+ "attentions") if output_attentions else None
1843
+ encoder_hidden_states = (
1844
+ model_kwargs["encoder_outputs"].get("hidden_states")
1845
+ if output_hidden_states else None)
1846
+
1847
+ # pdb.set_trace()
1848
+
1849
+ # 初始化序列跟踪
1850
+ # keep track of which sequences are already finished
1851
+ batch_size, cur_len = input_ids.shape
1852
+ this_peer_finished = False
1853
+ unfinished_sequences = torch.ones(
1854
+ batch_size, dtype=torch.long,
1855
+ device=input_ids.device) # 初始化未完成序列标记 torch.Size([1])
1856
+ model_kwargs = self._get_initial_cache_position(
1857
+ input_ids, model_kwargs) # 初始化缓存位置
1858
+
1859
+ model_forward = self.__call__ # 获取前向传播函数
1860
+ ############ 换成新的forward
1861
+ # model_forward = self.forward
1862
+
1863
+ if isinstance(model_kwargs.get("past_key_values"), Cache):
1864
+ is_compileable = model_kwargs[
1865
+ "past_key_values"].is_compileable and self._supports_static_cache #编译优化
1866
+ is_compileable = is_compileable and not self.generation_config.disable_compile
1867
+ if is_compileable and (
1868
+ self.device.type == "cuda"
1869
+ or generation_config.compile_config._compile_all_devices):
1870
+ os.environ["TOKENIZERS_PARALLELISM"] = "0"
1871
+ model_forward = self.get_compiled_call(
1872
+ generation_config.compile_config)
1873
+
1874
+ ############ prefilling ############
1875
+ start = prefilling_length-1
1876
+ chunk_length = prefilling_length
1877
+
1878
+ s_pos = [start]
1879
+ while True:
1880
+ start += chunk_length
1881
+ if start >= input_ids.shape[1] - 1:
1882
+ s_pos.append(input_ids.shape[1] - 1)
1883
+ break
1884
+ else:
1885
+ s_pos.append(start)
1886
+
1887
+ # pdb.set_trace()
1888
+ slice_pos = torch.tensor(s_pos + [-1] *
1889
+ (max_length - len(s_pos))).unsqueeze(0).to(
1890
+ input_ids.device)
1891
+
1892
+ model_kwargs['slice_pos'] = slice_pos
1893
+ count = (slice_pos != -1).sum().item()
1894
+ new_cache_position = torch.arange(0, count).to(input_ids.device)
1895
+ model_kwargs[
1896
+ 'cache_position'] = new_cache_position # 更新一下cache position
1897
+
1898
+ # pdb.set_trace()
1899
+ ############ prefilling ############
1900
+
1901
+ is_prefill = True
1902
+ while self._has_unfinished_sequences(
1903
+ this_peer_finished,
1904
+ synced_gpus,
1905
+ device=input_ids.device,
1906
+ cur_len=cur_len,
1907
+ max_length=max_length): # 循环知道序列生成完
1908
+ # prepare model inputs
1909
+
1910
+ # pdb.set_trace()
1911
+
1912
+ # model_kwargs.keys(): dict_keys(['attention_mask', 'logits_to_keep', 'past_key_values', 'use_cache', 'cache_position', 'nar_kv_cache', 'slice_pos'])
1913
+ model_inputs = self.prepare_inputs_for_generation( #加入position_id和input_id
1914
+ input_ids, **model_kwargs
1915
+ ) #dict_keys(['cache_position', 'past_key_values', 'input_ids', 'inputs_embeds', 'position_ids', 'attention_mask', 'logits_to_keep', 'use_cache'])
1916
+ # pdb.set_trace()
1917
+
1918
+ # position_ids = torch.arange(
1919
+ # input_ids.shape[1], device=input_ids.device).unsqueeze(0).to(input_ids.device)
1920
+ # model_inputs.update({"position_ids": position_ids})
1921
+
1922
+ model_inputs.update({"input_ids": input_ids})
1923
+
1924
+ # prepare variable output controls (note: some models won't accept all output controls)
1925
+ model_inputs.update({"output_attentions": output_attentions}
1926
+ if output_attentions else {})
1927
+ model_inputs.update({"output_hidden_states": output_hidden_states}
1928
+ if output_hidden_states else {})
1929
+
1930
+ if is_prefill:
1931
+ # pdb.set_trace()
1932
+ # outputs = self(**model_inputs, return_dict=True)
1933
+ # dict_keys(['cache_position', 'past_key_values', 'input_ids', 'inputs_embeds', 'position_ids', 'attention_mask', 'logits_to_keep', 'use_cache'])
1934
+ outputs = self.forward(**model_inputs, return_dict=True)
1935
+ is_prefill = False
1936
+ else:
1937
+ # pdb.set_trace()
1938
+ outputs = model_forward(**model_inputs, return_dict=True)
1939
+
1940
+ # pdb.set_trace()
1941
+
1942
+ ################ seminat ###########################
1943
+ # model_kwargs['slice_pos'] = outputs.slice_pos
1944
+ ################ seminat ###########################
1945
+
1946
+ # synced_gpus: don't waste resources running the code we don't need; kwargs must be updated before skipping
1947
+ model_kwargs = self._update_model_kwargs_for_generation_for_seminat(
1948
+ outputs,
1949
+ model_kwargs,
1950
+ is_encoder_decoder=self.config.is_encoder_decoder,
1951
+ num_new_tokens=outputs.logits.size(1))
1952
+ if synced_gpus and this_peer_finished:
1953
+ continue
1954
+
1955
+ # pdb.set_trace()
1956
+ # Clone is needed to avoid keeping a hanging ref to outputs.logits which may be very large for first iteration
1957
+ # (the clone itself is always small)
1958
+
1959
+ # next_token_logits = outputs.logits[:, -1, :].clone().float()
1960
+ next_token_logits = outputs.logits[:, :, :].clone().float(
1961
+ ) # 新生成了k个token
1962
+
1963
+ next_token_logits = next_token_logits.to(input_ids.device)
1964
+
1965
+ # pre-process distribution
1966
+ next_token_scores = logits_processor(input_ids, next_token_logits)
1967
+
1968
+ # token selection
1969
+ if do_sample:
1970
+ probs = nn.functional.softmax(next_token_scores, dim=-1)
1971
+ # TODO (joao): this OP throws "skipping cudagraphs due to ['incompatible ops']", find solution
1972
+ next_tokens = torch.multinomial(probs,
1973
+ num_samples=1).squeeze(1)
1974
+ else:
1975
+ next_tokens = torch.argmax(
1976
+ next_token_scores,
1977
+ dim=-1) # tensor([9281], device='cuda:0') token id
1978
+
1979
+ # pdb.set_trace()
1980
+ # 更新slice_pos
1981
+ count = (model_kwargs['slice_pos'] != -1).sum().item()
1982
+ model_kwargs['slice_pos'][:, count] = model_kwargs[
1983
+ 'slice_pos'][:, count - 1] + outputs.logits.size(1)
1984
+
1985
+ # pdb.set_trace()
1986
+
1987
+ # finished sentences should have their next token be a padding token
1988
+ if has_eos_stopping_criteria:
1989
+ next_tokens = next_tokens * unfinished_sequences + pad_token_id * (
1990
+ 1 - unfinished_sequences
1991
+ ) # 序列生成完的时候,unfinished_sequences为0,正好后面全填上padding
1992
+
1993
+ # pdb.set_trace()
1994
+ # update generated ids, model inputs, and length for next step
1995
+ # input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1)
1996
+ input_ids = torch.cat([input_ids, next_tokens], dim=-1)
1997
+ if streamer is not None:
1998
+ streamer.put(next_tokens.cpu())
1999
+
2000
+ # 更新完成状态
2001
+ unfinished_sequences = unfinished_sequences & ~stopping_criteria(
2002
+ input_ids, scores, last_k=next_tokens.size(1))
2003
+ this_peer_finished = unfinished_sequences.max() == 0
2004
+ cur_len += outputs.logits.size(1) # 长度 +1
2005
+
2006
+ # This is needed to properly delete outputs.logits which may be very large for first iteration
2007
+ # Otherwise a reference to outputs is kept which keeps the logits alive in the next iteration
2008
+ del outputs
2009
+
2010
+ if streamer is not None:
2011
+ streamer.end()
2012
+
2013
+ if return_dict_in_generate:
2014
+ if self.config.is_encoder_decoder:
2015
+ return GenerateEncoderDecoderOutput(
2016
+ sequences=input_ids,
2017
+ scores=scores,
2018
+ logits=raw_logits,
2019
+ encoder_attentions=encoder_attentions,
2020
+ encoder_hidden_states=encoder_hidden_states,
2021
+ decoder_attentions=decoder_attentions,
2022
+ cross_attentions=cross_attentions,
2023
+ decoder_hidden_states=decoder_hidden_states,
2024
+ past_key_values=model_kwargs.get("past_key_values"),
2025
+ )
2026
+ else:
2027
+ return GenerateDecoderOnlyOutput(
2028
+ sequences=input_ids,
2029
+ scores=scores,
2030
+ logits=raw_logits,
2031
+ attentions=decoder_attentions,
2032
+ hidden_states=decoder_hidden_states,
2033
+ past_key_values=model_kwargs.get("past_key_values"),
2034
+ )
2035
+ else:
2036
+ return input_ids
2037
+
2038
+ def _update_model_kwargs_for_generation_for_seminat(
2039
+ self,
2040
+ outputs: ModelOutput,
2041
+ model_kwargs: Dict[str, Any],
2042
+ is_encoder_decoder: bool = False,
2043
+ num_new_tokens: int = 1,
2044
+ ) -> Dict[str, Any]:
2045
+ ALL_CACHE_NAMES = [
2046
+ "past_key_values", # default
2047
+ "cache_params", # mamba-based models
2048
+ "state", # rwkv
2049
+ "mems", # xlnet
2050
+ "past_buckets_states", # reformer
2051
+ ]
2052
+ # update past_key_values keeping its naming used in model code
2053
+ for possible_cache_name in ALL_CACHE_NAMES:
2054
+ if possible_cache_name in outputs:
2055
+ # TODO (joao): remove output/input mismatch when these old models (xlnet, reformer) are deprecated
2056
+ if possible_cache_name in ("past_buckets_states", "mems"):
2057
+ cache_name = "past_key_values"
2058
+ else:
2059
+ cache_name = possible_cache_name
2060
+ model_kwargs[cache_name] = getattr(outputs,
2061
+ possible_cache_name)
2062
+ break
2063
+
2064
+ # pdb.set_trace()
2065
+
2066
+ # update token_type_ids with last value
2067
+ # false
2068
+ if "token_type_ids" in model_kwargs:
2069
+ token_type_ids = model_kwargs["token_type_ids"]
2070
+ model_kwargs["token_type_ids"] = torch.cat(
2071
+ [token_type_ids, token_type_ids[:, -1].unsqueeze(-1)], dim=-1)
2072
+
2073
+ if not is_encoder_decoder:
2074
+ # update attention mask
2075
+ # 重点看这个
2076
+ # pdb.set_trace()
2077
+ if "attention_mask" in model_kwargs:
2078
+ attention_mask = model_kwargs["attention_mask"]
2079
+ model_kwargs["attention_mask"] = torch.cat(
2080
+ [
2081
+ attention_mask,
2082
+ attention_mask.new_ones(
2083
+ (attention_mask.shape[0], num_new_tokens
2084
+ )) # 1 -> num_new_tokens 一次加多个token的attention
2085
+ ],
2086
+ dim=-1)
2087
+ else:
2088
+ # update decoder attention mask
2089
+ if "decoder_attention_mask" in model_kwargs:
2090
+ decoder_attention_mask = model_kwargs["decoder_attention_mask"]
2091
+ model_kwargs["decoder_attention_mask"] = torch.cat(
2092
+ [
2093
+ decoder_attention_mask,
2094
+ decoder_attention_mask.new_ones(
2095
+ (decoder_attention_mask.shape[0], 1))
2096
+ ],
2097
+ dim=-1,
2098
+ )
2099
+
2100
+ # pdb.set_trace()
2101
+ if model_kwargs.get("use_cache", True):
2102
+ # model_kwargs["cache_position"] = model_kwargs["cache_position"][-1:] + num_new_tokens
2103
+ model_kwargs["cache_position"] = torch.tensor([
2104
+ model_kwargs["cache_position"][-1:].item() + 1
2105
+ ]).to(model_kwargs["cache_position"].device)
2106
+ else:
2107
+ past_positions = model_kwargs.pop("cache_position")
2108
+ new_positions = torch.arange(
2109
+ past_positions[-1] + 1,
2110
+ past_positions[-1] + num_new_tokens + 1,
2111
+ dtype=past_positions.dtype).to(past_positions.device)
2112
+ model_kwargs["cache_position"] = torch.cat(
2113
+ (past_positions, new_positions))
2114
+ return model_kwargs
2115
+
2116
+ class AbsolutePositionalEncoding(nn.Module):
2117
+ def __init__(self, hidden_size: int, max_len: int = 2048):
2118
+ """
2119
+ 初始化绝对位置编码
2120
+
2121
+ 参数:
2122
+ hidden_size (int): 隐藏层维度
2123
+ max_len (int): 最大序列长度
2124
+ """
2125
+ super().__init__()
2126
+
2127
+ # 创建位置编码矩阵
2128
+ pe = torch.zeros(max_len, hidden_size)
2129
+ position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
2130
+ div_term = torch.exp(torch.arange(0, hidden_size, 2).float() * (-math.log(10000.0) / hidden_size))
2131
+
2132
+ # 使用sin和cos函数计算位置编码
2133
+ pe[:, 0::2] = torch.sin(position * div_term)
2134
+ pe[:, 1::2] = torch.cos(position * div_term)
2135
+ pe = pe.unsqueeze(0) # [1, max_len, hidden_size]
2136
+
2137
+ # 注册为buffer(不参与训练)
2138
+ self.register_buffer('pe', pe)
2139
+
2140
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
2141
+ """
2142
+ 添加位置编码到输入张量
2143
+
2144
+ 参数:
2145
+ x (torch.Tensor): 输入张量,形状为 (batch_size, seq_len, hidden_size)
2146
+
2147
+ 返回:
2148
+ torch.Tensor: 添加位置编码后的张量,形状与输入相同
2149
+ """
2150
+ seq_len = x.size(1)
2151
+
2152
+
2153
+ pos = x + self.pe[:, :seq_len]
2154
+
2155
+ # pdb.set_trace()
2156
+ return pos