zaydzuhri commited on
Commit
e49db55
·
verified ·
1 Parent(s): ee2cdd2

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. fla/models/gated_deltaproduct/__init__.py +14 -0
  2. fla/models/gated_deltaproduct/__pycache__/__init__.cpython-311.pyc +0 -0
  3. fla/models/gated_deltaproduct/__pycache__/configuration_gated_deltaproduct.cpython-311.pyc +0 -0
  4. fla/models/gated_deltaproduct/__pycache__/modeling_gated_deltaproduct.cpython-311.pyc +0 -0
  5. fla/models/gated_deltaproduct/configuration_gated_deltaproduct.py +90 -0
  6. fla/models/gated_deltaproduct/modeling_gated_deltaproduct.py +520 -0
  7. fla/models/gsa/__init__.py +13 -0
  8. fla/models/gsa/__pycache__/__init__.cpython-311.pyc +0 -0
  9. fla/models/gsa/__pycache__/configuration_gsa.cpython-311.pyc +0 -0
  10. fla/models/gsa/__pycache__/modeling_gsa.cpython-311.pyc +0 -0
  11. fla/models/gsa/configuration_gsa.py +97 -0
  12. fla/models/gsa/modeling_gsa.py +420 -0
  13. fla/models/mamba/__pycache__/configuration_mamba.cpython-311.pyc +0 -0
  14. fla/models/mamba/__pycache__/modeling_mamba.cpython-311.pyc +0 -0
  15. fla/modules/__init__.py +29 -0
  16. fla/modules/__pycache__/__init__.cpython-311.pyc +0 -0
  17. fla/modules/__pycache__/activations.cpython-311.pyc +0 -0
  18. fla/modules/__pycache__/convolution.cpython-311.pyc +0 -0
  19. fla/modules/__pycache__/feature_map.cpython-311.pyc +0 -0
  20. fla/modules/__pycache__/fused_bitlinear.cpython-311.pyc +0 -0
  21. fla/modules/__pycache__/fused_cross_entropy.cpython-311.pyc +0 -0
  22. fla/modules/__pycache__/fused_kl_div.cpython-311.pyc +0 -0
  23. fla/modules/__pycache__/fused_linear_cross_entropy.cpython-311.pyc +0 -0
  24. fla/modules/__pycache__/fused_norm_gate.cpython-311.pyc +0 -0
  25. fla/modules/__pycache__/l2norm.cpython-311.pyc +0 -0
  26. fla/modules/__pycache__/layernorm.cpython-311.pyc +0 -0
  27. fla/modules/__pycache__/layernorm_gated.cpython-311.pyc +0 -0
  28. fla/modules/__pycache__/mlp.cpython-311.pyc +0 -0
  29. fla/modules/__pycache__/rotary.cpython-311.pyc +0 -0
  30. fla/modules/activations.py +471 -0
  31. fla/modules/convolution.py +434 -0
  32. fla/modules/feature_map.py +300 -0
  33. fla/modules/fused_bitlinear.py +638 -0
  34. fla/modules/fused_cross_entropy.py +419 -0
  35. fla/modules/fused_kl_div.py +323 -0
  36. fla/modules/fused_linear_cross_entropy.py +570 -0
  37. fla/modules/fused_norm_gate.py +995 -0
  38. fla/modules/grpo.py +396 -0
  39. fla/modules/l2norm.py +176 -0
  40. fla/modules/layernorm.py +1196 -0
  41. fla/modules/layernorm_gated.py +528 -0
  42. fla/modules/mlp.py +127 -0
  43. fla/modules/parallel.py +37 -0
  44. fla/modules/rotary.py +512 -0
  45. torchtitan/__init__.py +15 -0
  46. torchtitan/config_manager.py +947 -0
  47. torchtitan/experiments/README.md +20 -0
  48. torchtitan/experiments/__init__.py +8 -0
  49. torchtitan/experiments/llama4/README.md +29 -0
  50. torchtitan/experiments/llama4/__init__.py +70 -0
fla/models/gated_deltaproduct/__init__.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoConfig, AutoModel, AutoModelForCausalLM
2
+
3
+ from fla.models.gated_deltaproduct.configuration_gated_deltaproduct import GatedDeltaProductConfig
4
+ from fla.models.gated_deltaproduct.modeling_gated_deltaproduct import GatedDeltaProductForCausalLM, GatedDeltaProductModel
5
+
6
+ AutoConfig.register(GatedDeltaProductConfig.model_type, GatedDeltaProductConfig)
7
+ AutoModel.register(GatedDeltaProductConfig, GatedDeltaProductModel)
8
+ AutoModelForCausalLM.register(GatedDeltaProductConfig, GatedDeltaProductForCausalLM)
9
+
10
+ __all__ = [
11
+ "GatedDeltaProductConfig",
12
+ "GatedDeltaProductForCausalLM",
13
+ "GatedDeltaProductModel",
14
+ ]
fla/models/gated_deltaproduct/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (840 Bytes). View file
 
fla/models/gated_deltaproduct/__pycache__/configuration_gated_deltaproduct.cpython-311.pyc ADDED
Binary file (3.75 kB). View file
 
fla/models/gated_deltaproduct/__pycache__/modeling_gated_deltaproduct.cpython-311.pyc ADDED
Binary file (21.5 kB). View file
 
fla/models/gated_deltaproduct/configuration_gated_deltaproduct.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+
3
+ from typing import Dict, Optional
4
+
5
+ from transformers.configuration_utils import PretrainedConfig
6
+
7
+
8
+ class GatedDeltaProductConfig(PretrainedConfig):
9
+ model_type = "gated_deltaproduct"
10
+ keys_to_ignore_at_inference = ["past_key_values"]
11
+
12
+ def __init__(
13
+ self,
14
+ attn_mode: str = "chunk",
15
+ hidden_size: int = 2048,
16
+ expand_v: int = 2,
17
+ use_gate: bool = True,
18
+ use_short_conv: bool = True,
19
+ conv_size: int = 4,
20
+ head_dim: int = 256,
21
+ num_heads: int = 6,
22
+ max_position_embeddings: int = 2048,
23
+ hidden_ratio: Optional[int] = 4,
24
+ intermediate_size: Optional[int] = None,
25
+ hidden_act: str = "swish",
26
+ num_hidden_layers: int = 21,
27
+ norm_first: bool = False,
28
+ norm_eps: float = 1e-6,
29
+ attn: Optional[Dict] = None,
30
+ use_cache: bool = True,
31
+ pad_token_id: int | None = None,
32
+ bos_token_id: int = 1,
33
+ eos_token_id: int = 2,
34
+ tie_word_embeddings: bool = False,
35
+ initializer_range: float = 0.006,
36
+ fuse_cross_entropy: bool = True,
37
+ vocab_size: int = 32000,
38
+ use_forget_gate: bool = False, # when true Gated DeltaProduct, when false DeltaProduct
39
+ allow_neg_eigval: bool = False, # when true (Gated) DeltaProduct [-1, 1], when false (Gated) DeltaProduct [0, 1]
40
+ num_householder: int = 1,
41
+ **kwargs,
42
+ ):
43
+ self.attn_mode = attn_mode
44
+ self.hidden_size = hidden_size
45
+ self.expand_v = expand_v
46
+ self.use_gate = use_gate
47
+ self.use_short_conv = use_short_conv
48
+ self.conv_size = conv_size
49
+ self.head_dim = head_dim
50
+ self.num_heads = num_heads
51
+ self.max_position_embeddings = max_position_embeddings
52
+
53
+ self.hidden_ratio = hidden_ratio
54
+ self.intermediate_size = intermediate_size
55
+ self.hidden_act = hidden_act
56
+ self.num_hidden_layers = num_hidden_layers
57
+ self.norm_first = norm_first
58
+ self.norm_eps = norm_eps
59
+ self.attn = attn
60
+ self.use_cache = use_cache
61
+ self.initializer_range = initializer_range
62
+ self.fuse_cross_entropy = fuse_cross_entropy
63
+ self.vocab_size = vocab_size
64
+
65
+ # DeltaProduct specific
66
+ self.allow_neg_eigval = allow_neg_eigval
67
+ self.num_householder = num_householder
68
+ self.use_forget_gate = use_forget_gate
69
+
70
+ if attn is not None:
71
+ if not isinstance(attn, Dict):
72
+ raise ValueError("attn must be a dictionary")
73
+ if "layers" not in attn:
74
+ raise ValueError(
75
+ "Layer indices must be provided to initialize hybrid attention layers"
76
+ )
77
+ if "num_heads" not in attn:
78
+ raise ValueError(
79
+ "Number of heads must be provided to initialize hybrid attention layers"
80
+ )
81
+ attn["num_kv_heads"] = attn.get("num_kv_heads", attn["num_heads"])
82
+ attn["window_size"] = attn.get("window_size", None)
83
+
84
+ super().__init__(
85
+ pad_token_id=pad_token_id,
86
+ bos_token_id=bos_token_id,
87
+ eos_token_id=eos_token_id,
88
+ tie_word_embeddings=tie_word_embeddings,
89
+ **kwargs,
90
+ )
fla/models/gated_deltaproduct/modeling_gated_deltaproduct.py ADDED
@@ -0,0 +1,520 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+
3
+ from __future__ import annotations
4
+
5
+ import math
6
+ import warnings
7
+ from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union
8
+
9
+ import torch
10
+ import torch.nn as nn
11
+ import torch.utils.checkpoint
12
+ from transformers.activations import ACT2FN
13
+ from transformers.generation import GenerationMixin
14
+ from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
15
+ from transformers.modeling_utils import PreTrainedModel
16
+ from transformers.utils import logging
17
+ from transformers.utils.deprecation import deprecate_kwarg
18
+
19
+ from fla.layers.attn import Attention
20
+ from fla.layers.gated_deltaproduct import GatedDeltaProduct
21
+ from fla.models.gated_deltaproduct.configuration_gated_deltaproduct import GatedDeltaProductConfig
22
+ from fla.models.utils import Cache
23
+ from fla.modules import FusedCrossEntropyLoss, FusedLinearCrossEntropyLoss, RMSNorm
24
+ from fla.modules.activations import swiglu_linear
25
+ from fla.modules.layernorm import rms_norm_linear
26
+
27
+ if TYPE_CHECKING:
28
+ from transformers.processing_utils import Unpack
29
+
30
+ logger = logging.get_logger(__name__)
31
+
32
+
33
+ class GatedDeltaNetMLP(nn.Module):
34
+ def __init__(
35
+ self,
36
+ hidden_size: int,
37
+ hidden_ratio: Optional[int] = None,
38
+ intermediate_size: Optional[int] = None,
39
+ hidden_act: str = "swish",
40
+ norm_first: bool = True,
41
+ norm_eps: float = 1e-5,
42
+ ) -> GatedDeltaNetMLP:
43
+ super().__init__()
44
+
45
+ self.hidden_size = hidden_size
46
+ # the final number of params is `hidden_ratio * hidden_size^2`
47
+ # `intermediate_size` is chosen to be a multiple of 256 closest to `2/3 * hidden_size * hidden_ratio`
48
+ if hidden_ratio is None:
49
+ hidden_ratio = 4
50
+ if intermediate_size is None:
51
+ intermediate_size = int(hidden_size * hidden_ratio * 2 / 3)
52
+ intermediate_size = 256 * ((intermediate_size + 256 - 1) // 256)
53
+ self.hidden_ratio = hidden_ratio
54
+ self.intermediate_size = intermediate_size
55
+ self.norm_first = norm_first
56
+
57
+ if norm_first:
58
+ self.norm = RMSNorm(hidden_size=hidden_size, eps=norm_eps)
59
+
60
+ self.gate_proj = nn.Linear(
61
+ self.hidden_size, self.intermediate_size * 2, bias=False
62
+ )
63
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
64
+ self.act_fn = ACT2FN[hidden_act]
65
+
66
+ def forward(
67
+ self,
68
+ x: torch.Tensor,
69
+ **kwargs: Unpack[Dict],
70
+ ) -> torch.Tensor:
71
+ if self.norm_first:
72
+ x = rms_norm_linear(
73
+ x,
74
+ self.norm.weight,
75
+ self.norm.bias,
76
+ self.gate_proj.weight,
77
+ self.gate_proj.bias,
78
+ )
79
+ else:
80
+ x = self.gate_proj(x)
81
+ gate, y = x.chunk(2, -1)
82
+ return swiglu_linear(gate, y, self.down_proj.weight, self.down_proj.bias)
83
+
84
+
85
+ class GatedDeltaProductBlock(nn.Module):
86
+ def __init__(self, config: GatedDeltaProductConfig, layer_idx: int):
87
+ super().__init__()
88
+ self.hidden_size = config.hidden_size
89
+
90
+ if not config.norm_first:
91
+ self.attn_norm = RMSNorm(
92
+ hidden_size=config.hidden_size, eps=config.norm_eps
93
+ )
94
+ if config.attn is not None and layer_idx in config.attn["layers"]:
95
+ self.attn = Attention(
96
+ hidden_size=config.hidden_size,
97
+ num_heads=config.attn["num_heads"],
98
+ num_kv_heads=config.attn["num_kv_heads"],
99
+ window_size=config.attn["window_size"],
100
+ max_position_embeddings=config.max_position_embeddings,
101
+ layer_idx=layer_idx,
102
+ )
103
+ else:
104
+ self.attn = GatedDeltaProduct(
105
+ mode=config.attn_mode,
106
+ hidden_size=config.hidden_size,
107
+ expand_v=config.expand_v,
108
+ head_dim=config.head_dim,
109
+ num_heads=config.num_heads,
110
+ use_gate=config.use_gate,
111
+ use_forget_gate=config.use_forget_gate,
112
+ use_short_conv=config.use_short_conv,
113
+ conv_size=config.conv_size,
114
+ norm_first=config.norm_first,
115
+ norm_eps=config.norm_eps,
116
+ allow_neg_eigval=config.allow_neg_eigval,
117
+ num_householder=config.num_householder,
118
+ layer_idx=layer_idx,
119
+ use_beta_conv=config.use_beta_conv
120
+ )
121
+ if not config.norm_first:
122
+ self.mlp_norm = RMSNorm(hidden_size=config.hidden_size, eps=config.norm_eps)
123
+ self.mlp = GatedDeltaNetMLP(
124
+ hidden_size=config.hidden_size,
125
+ hidden_ratio=config.hidden_ratio,
126
+ intermediate_size=config.intermediate_size,
127
+ hidden_act=config.hidden_act,
128
+ norm_first=config.norm_first,
129
+ norm_eps=config.norm_eps,
130
+ )
131
+
132
+ def forward(
133
+ self,
134
+ hidden_states: torch.Tensor,
135
+ attention_mask: Optional[torch.Tensor] = None,
136
+ past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
137
+ use_cache: Optional[bool] = False,
138
+ output_attentions: Optional[bool] = False,
139
+ **kwargs: Unpack[Dict],
140
+ ) -> Tuple[
141
+ torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]
142
+ ]:
143
+ residual = hidden_states
144
+ if hasattr(self, "attn_norm"):
145
+ hidden_states = self.attn_norm(hidden_states)
146
+ hidden_states, attentions, past_key_values = self.attn(
147
+ hidden_states=hidden_states,
148
+ attention_mask=attention_mask,
149
+ past_key_values=past_key_values,
150
+ use_cache=use_cache,
151
+ output_attentions=output_attentions,
152
+ **kwargs,
153
+ )
154
+ if hasattr(self, "mlp_norm"):
155
+ hidden_states, residual = self.mlp_norm(hidden_states, residual, True)
156
+ else:
157
+ hidden_states = residual + hidden_states
158
+ residual = hidden_states
159
+ hidden_states = self.mlp(hidden_states, **kwargs)
160
+ hidden_states = residual + hidden_states
161
+
162
+ outputs = (hidden_states, attentions, past_key_values)
163
+
164
+ return outputs
165
+
166
+
167
+ class GatedDeltaProductPreTrainedModel(PreTrainedModel):
168
+ config_class = GatedDeltaProductConfig
169
+ supports_gradient_checkpointing = True
170
+ _no_split_modules = ["GatedDeltaNetBlock"]
171
+
172
+ def __init__(self, *inputs, **kwargs):
173
+ super().__init__(*inputs, **kwargs)
174
+
175
+ def _init_weights(
176
+ self,
177
+ module: nn.Module,
178
+ rescale_prenorm_residual: bool = True,
179
+ num_residuals_per_layer: int = 2,
180
+ ):
181
+ if isinstance(module, (nn.Linear, nn.Conv1d)):
182
+ # Slightly different from the TF version which uses truncated_normal for initialization
183
+ # cf https://github.com/pytorch/pytorch/pull/5617
184
+ nn.init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
185
+ if module.bias is not None:
186
+ nn.init.zeros_(module.bias)
187
+ elif isinstance(module, nn.Embedding):
188
+ nn.init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
189
+ if module.padding_idx is not None:
190
+ module.weight.data[module.padding_idx].zero_()
191
+
192
+ if rescale_prenorm_residual:
193
+ # Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme:
194
+ # > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale
195
+ # > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers.
196
+ # > -- GPT-2 :: https://openai.com/blog/better-language-models/
197
+ #
198
+ # Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py
199
+ for name, p in module.named_parameters():
200
+ if name in ["o_proj.weight", "down_proj.weight"]:
201
+ # Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block
202
+ # Following Pytorch init, except scale by 1/sqrt(2 * n_layer)
203
+ # We need to reinit p since this code could be called multiple times
204
+ # Having just p *= scale would repeatedly scale it down
205
+ with torch.no_grad():
206
+ p /= math.sqrt(
207
+ num_residuals_per_layer * self.config.num_hidden_layers
208
+ )
209
+
210
+
211
+ class GatedDeltaProductModel(GatedDeltaProductPreTrainedModel):
212
+ def __init__(self, config: GatedDeltaProductConfig):
213
+ super().__init__(config)
214
+ self.padding_idx = config.pad_token_id
215
+ self.vocab_size = config.vocab_size
216
+
217
+ self.embeddings = nn.Embedding(
218
+ config.vocab_size, config.hidden_size, self.padding_idx
219
+ )
220
+ self.layers = nn.ModuleList(
221
+ [
222
+ GatedDeltaProductBlock(config, layer_idx)
223
+ for layer_idx in range(config.num_hidden_layers)
224
+ ]
225
+ )
226
+ self.norm = RMSNorm(config.hidden_size, eps=config.norm_eps)
227
+
228
+ self.gradient_checkpointing = False
229
+
230
+ self.post_init()
231
+
232
+ def get_input_embeddings(self):
233
+ return self.embeddings
234
+
235
+ def set_input_embeddings(self, value):
236
+ self.embeddings = value
237
+
238
+ def forward(
239
+ self,
240
+ input_ids: Optional[torch.LongTensor] = None,
241
+ attention_mask: Optional[torch.Tensor] = None,
242
+ inputs_embeds: Optional[torch.FloatTensor] = None,
243
+ past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
244
+ use_cache: Optional[bool] = None,
245
+ output_attentions: Optional[bool] = None,
246
+ output_hidden_states: Optional[bool] = None,
247
+ return_dict: Optional[bool] = None,
248
+ **kwargs: Unpack[Dict],
249
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
250
+ if output_attentions:
251
+ warnings.warn(
252
+ "`GatedDeltaNetModel` does not `output_attentions` now, setting it to `False`.",
253
+ stacklevel=2,
254
+ )
255
+ output_attentions = False
256
+ output_attentions = (
257
+ output_attentions
258
+ if output_attentions is not None
259
+ else self.config.output_attentions
260
+ )
261
+ output_hidden_states = (
262
+ output_hidden_states
263
+ if output_hidden_states is not None
264
+ else self.config.output_hidden_states
265
+ )
266
+ use_cache = (
267
+ use_cache
268
+ if use_cache is not None
269
+ else (self.config.use_cache if not self.training else False)
270
+ )
271
+ return_dict = (
272
+ return_dict if return_dict is not None else self.config.use_return_dict
273
+ )
274
+
275
+ # retrieve input_ids and inputs_embeds
276
+ if input_ids is not None and inputs_embeds is not None:
277
+ raise ValueError(
278
+ "You cannot specify both input_ids and inputs_embeds at the same time"
279
+ )
280
+ if input_ids is None and inputs_embeds is None:
281
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
282
+
283
+ if inputs_embeds is None:
284
+ inputs_embeds = self.embeddings(input_ids)
285
+ hidden_states = inputs_embeds
286
+
287
+ if use_cache and not isinstance(past_key_values, Cache):
288
+ past_key_values = Cache.from_legacy_cache(past_key_values)
289
+
290
+ if self.gradient_checkpointing and self.training and use_cache:
291
+ logger.warning_once(
292
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
293
+ )
294
+ use_cache = False
295
+
296
+ all_hidden_states = () if output_hidden_states else None
297
+ all_attns = () if output_attentions else None
298
+ for layer in self.layers:
299
+ if output_hidden_states:
300
+ all_hidden_states += (hidden_states,)
301
+
302
+ if self.gradient_checkpointing and self.training:
303
+ hidden_states, attentions, past_key_values = (
304
+ self._gradient_checkpointing_func(
305
+ layer.__call__,
306
+ hidden_states,
307
+ attention_mask,
308
+ past_key_values,
309
+ use_cache,
310
+ output_attentions,
311
+ **kwargs,
312
+ )
313
+ )
314
+ else:
315
+ hidden_states, attentions, past_key_values = layer(
316
+ hidden_states,
317
+ attention_mask=attention_mask,
318
+ past_key_values=past_key_values,
319
+ use_cache=use_cache,
320
+ output_attentions=output_attentions,
321
+ **kwargs,
322
+ )
323
+
324
+ if output_attentions:
325
+ all_attns += (attentions,)
326
+
327
+ hidden_states = self.norm(hidden_states)
328
+ # add hidden states from the last decoder layer
329
+ if output_hidden_states:
330
+ all_hidden_states += (hidden_states,)
331
+
332
+ if not return_dict:
333
+ return tuple(
334
+ i
335
+ for i in [
336
+ hidden_states,
337
+ past_key_values,
338
+ all_hidden_states,
339
+ all_attns,
340
+ ]
341
+ if i is not None
342
+ )
343
+ return BaseModelOutputWithPast(
344
+ last_hidden_state=hidden_states,
345
+ past_key_values=past_key_values,
346
+ hidden_states=all_hidden_states,
347
+ attentions=all_attns,
348
+ )
349
+
350
+
351
+ class GatedDeltaProductForCausalLM(GatedDeltaProductPreTrainedModel, GenerationMixin):
352
+ _tied_weights_keys = ["lm_head.weight"]
353
+
354
+ def __init__(self, config):
355
+ super().__init__(config)
356
+ self.model = GatedDeltaProductModel(config)
357
+ self.vocab_size = config.vocab_size
358
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
359
+
360
+ # Initialize weights and apply final processing
361
+ self.post_init()
362
+
363
+ def get_input_embeddings(self):
364
+ return self.model.embeddings
365
+
366
+ def set_input_embeddings(self, value):
367
+ self.model.embeddings = value
368
+
369
+ def get_output_embeddings(self):
370
+ return self.lm_head
371
+
372
+ def set_output_embeddings(self, new_embeddings):
373
+ self.lm_head = new_embeddings
374
+
375
+ def set_decoder(self, decoder):
376
+ self.model = decoder
377
+
378
+ def get_decoder(self):
379
+ return self.model
380
+
381
+ def generate(self, *args, **kwargs):
382
+ try:
383
+ return super().generate(*args, **kwargs)
384
+ except AttributeError as exception:
385
+ if "past_key_values" in str(exception):
386
+ raise AttributeError(
387
+ f"You tried to call `generate` with a decoding strategy that manipulates `past_key_values`, "
388
+ f"which is not supported for {self.__class__.__name__}. "
389
+ f"Try another generation strategy instead. "
390
+ f"For the available generation strategies, check this doc: "
391
+ f"https://huggingface.co/docs/transformers/en/generation_strategies#decoding-strategies"
392
+ )
393
+ else:
394
+ raise exception
395
+
396
+ @deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep")
397
+ def prepare_inputs_for_generation(
398
+ self,
399
+ input_ids: torch.LongTensor = None,
400
+ past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
401
+ attention_mask: Optional[torch.Tensor] = None,
402
+ inputs_embeds: Optional[torch.Tensor] = None,
403
+ use_cache: bool = True,
404
+ num_logits_to_keep: Optional[int] = None,
405
+ logits_to_keep: Optional[int] = None,
406
+ **kwargs,
407
+ ):
408
+ # only last token for `inputs_ids` if the `past_key_values` is passed along is not empty.
409
+ if past_key_values is not None and len(past_key_values) > 0:
410
+ input_ids = input_ids[:, -1:]
411
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
412
+ if inputs_embeds is not None and past_key_values is None:
413
+ model_inputs = {"inputs_embeds": inputs_embeds}
414
+ else:
415
+ # The `contiguous()` here is necessary to have a static stride during decoding. torchdynamo otherwise
416
+ # recompiles graphs as the stride of the inputs is a guard.
417
+ # Ref: https://github.com/huggingface/transformers/pull/29114
418
+ # TODO: use `next_tokens` directly instead.
419
+ model_inputs = {"input_ids": input_ids.contiguous()}
420
+
421
+ if logits_to_keep is not None:
422
+ model_inputs['logits_to_keep'] = logits_to_keep
423
+
424
+ model_inputs.update(
425
+ {
426
+ "past_key_values": past_key_values,
427
+ "use_cache": use_cache,
428
+ "attention_mask": attention_mask,
429
+ "num_logits_to_keep": num_logits_to_keep,
430
+ }
431
+ )
432
+ return model_inputs
433
+
434
+ @deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep")
435
+ def forward(
436
+ self,
437
+ input_ids: torch.LongTensor = None,
438
+ attention_mask: Optional[torch.Tensor] = None,
439
+ inputs_embeds: Optional[torch.Tensor] = None,
440
+ past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
441
+ labels: Optional[torch.LongTensor] = None,
442
+ use_cache: Optional[bool] = None,
443
+ output_attentions: Optional[bool] = None,
444
+ output_hidden_states: Optional[bool] = None,
445
+ return_dict: Optional[bool] = None,
446
+ num_logits_to_keep: Optional[int] = 0,
447
+ logits_to_keep: Optional[int] = 0,
448
+ **kwargs: Unpack[Dict],
449
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
450
+ num_logits_to_keep = 0 if num_logits_to_keep is None else num_logits_to_keep
451
+ output_attentions = (
452
+ output_attentions
453
+ if output_attentions is not None
454
+ else self.config.output_attentions
455
+ )
456
+ output_hidden_states = (
457
+ output_hidden_states
458
+ if output_hidden_states is not None
459
+ else self.config.output_hidden_states
460
+ )
461
+ return_dict = (
462
+ return_dict if return_dict is not None else self.config.use_return_dict
463
+ )
464
+ kwargs.pop("num_items_in_batch", None)
465
+ outputs = self.model(
466
+ input_ids=input_ids,
467
+ attention_mask=attention_mask,
468
+ inputs_embeds=inputs_embeds,
469
+ past_key_values=past_key_values,
470
+ use_cache=use_cache,
471
+ output_attentions=output_attentions,
472
+ output_hidden_states=output_hidden_states,
473
+ return_dict=return_dict,
474
+ **kwargs,
475
+ )
476
+ hidden_states = outputs[0]
477
+ fuse_linear_and_cross_entropy = self.config.fuse_cross_entropy and self.training
478
+
479
+ loss, logits = None, None
480
+ if not fuse_linear_and_cross_entropy or labels is None:
481
+ logits = self.lm_head(hidden_states if logits_to_keep is None else hidden_states[:, -logits_to_keep:])
482
+ if labels is not None:
483
+ if self.config.fuse_cross_entropy:
484
+ if fuse_linear_and_cross_entropy:
485
+ loss_fct = FusedLinearCrossEntropyLoss()
486
+ else:
487
+ loss_fct = FusedCrossEntropyLoss(inplace_backward=True)
488
+ else:
489
+ loss_fct = nn.CrossEntropyLoss()
490
+ # Enable model parallelism
491
+ labels = labels.to(hidden_states.device)
492
+ labels = torch.cat(
493
+ (
494
+ labels[..., 1:],
495
+ torch.full_like(labels[:, :1], loss_fct.ignore_index),
496
+ ),
497
+ 1,
498
+ )
499
+ if fuse_linear_and_cross_entropy:
500
+ loss = loss_fct(
501
+ hidden_states.view(-1, self.config.hidden_size),
502
+ labels.view(-1),
503
+ self.lm_head.weight,
504
+ self.lm_head.bias,
505
+ )
506
+ else:
507
+ loss = loss_fct(
508
+ logits.view(-1, self.config.vocab_size), labels.view(-1)
509
+ )
510
+
511
+ if not return_dict:
512
+ output = (logits,) + outputs[1:]
513
+ return (loss, *output) if loss is not None else output
514
+ return CausalLMOutputWithPast(
515
+ loss=loss,
516
+ logits=logits,
517
+ past_key_values=outputs.past_key_values,
518
+ hidden_states=outputs.hidden_states,
519
+ attentions=outputs.attentions,
520
+ )
fla/models/gsa/__init__.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+
3
+ from transformers import AutoConfig, AutoModel, AutoModelForCausalLM
4
+
5
+ from fla.models.gsa.configuration_gsa import GSAConfig
6
+ from fla.models.gsa.modeling_gsa import GSAForCausalLM, GSAModel
7
+
8
+ AutoConfig.register(GSAConfig.model_type, GSAConfig)
9
+ AutoModel.register(GSAConfig, GSAModel)
10
+ AutoModelForCausalLM.register(GSAConfig, GSAForCausalLM)
11
+
12
+
13
+ __all__ = ['GSAConfig', 'GSAForCausalLM', 'GSAModel']
fla/models/gsa/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (716 Bytes). View file
 
fla/models/gsa/__pycache__/configuration_gsa.cpython-311.pyc ADDED
Binary file (4.27 kB). View file
 
fla/models/gsa/__pycache__/modeling_gsa.cpython-311.pyc ADDED
Binary file (19.5 kB). View file
 
fla/models/gsa/configuration_gsa.py ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+
3
+ from typing import Dict, Optional
4
+
5
+ from transformers.configuration_utils import PretrainedConfig
6
+
7
+
8
+ class GSAConfig(PretrainedConfig):
9
+
10
+ model_type = 'gsa'
11
+ keys_to_ignore_at_inference = ['past_key_values']
12
+
13
+ def __init__(
14
+ self,
15
+ hidden_size: int = 2048,
16
+ gate_logit_normalizer: Optional[int] = 8,
17
+ clamp_min: Optional[float] = None,
18
+ clamp_max: Optional[float] = None,
19
+ hidden_ratio: Optional[int] = 4,
20
+ intermediate_size: Optional[int] = None,
21
+ num_hidden_layers: int = 24,
22
+ num_heads: int = 4,
23
+ num_kv_heads: Optional[int] = None,
24
+ num_slots: Optional[int] = 64,
25
+ use_short_conv: bool = False,
26
+ conv_size: int = 4,
27
+ exapnd_k: float = 1,
28
+ exapnd_v: float = 1,
29
+ feature_map: str = 'swish',
30
+ use_output_gate: bool = False,
31
+ use_norm: bool = True,
32
+ max_position_embeddings: int = 2048,
33
+ hidden_act: str = "swish",
34
+ elementwise_affine: Optional[bool] = True,
35
+ norm_eps: float = 1e-6,
36
+ attn: Optional[Dict] = None,
37
+ use_cache: bool = True,
38
+ pad_token_id: int = None,
39
+ bos_token_id: int = 1,
40
+ eos_token_id: int = 2,
41
+ initializer_range: float = 0.006,
42
+ tie_word_embeddings: bool = False,
43
+ fuse_norm: bool = True,
44
+ fuse_swiglu: bool = True,
45
+ fuse_cross_entropy: bool = True,
46
+ vocab_size: int = 32000,
47
+ **kwargs
48
+ ):
49
+ self.hidden_size = hidden_size
50
+ self.gate_logit_normalizer = gate_logit_normalizer
51
+ self.clamp_min = clamp_min
52
+ self.clamp_max = clamp_max
53
+ self.hidden_ratio = hidden_ratio
54
+ self.intermediate_size = intermediate_size
55
+ self.num_hidden_layers = num_hidden_layers
56
+ self.num_heads = num_heads
57
+ self.num_kv_heads = num_kv_heads
58
+ self.num_slots = num_slots
59
+ self.use_short_conv = use_short_conv
60
+ self.conv_size = conv_size
61
+ self.expand_k = exapnd_k
62
+ self.expand_v = exapnd_v
63
+ self.feature_map = feature_map
64
+ self.use_output_gate = use_output_gate
65
+ self.use_norm = use_norm
66
+ self.max_position_embeddings = max_position_embeddings
67
+ self.hidden_act = hidden_act
68
+ self.elementwise_affine = elementwise_affine
69
+ self.norm_eps = norm_eps
70
+ self.attn = attn
71
+ self.use_cache = use_cache
72
+ self.initializer_range = initializer_range
73
+
74
+ self.fuse_norm = fuse_norm
75
+ self.fuse_swiglu = fuse_swiglu
76
+ self.fuse_cross_entropy = fuse_cross_entropy
77
+ self.vocab_size = vocab_size
78
+
79
+ if attn is not None:
80
+ if not isinstance(attn, Dict):
81
+ raise ValueError("attn must be a dictionary")
82
+ if 'layers' not in attn:
83
+ raise ValueError("Layer indices must be provided to initialize hybrid attention layers")
84
+ if 'num_heads' not in attn:
85
+ raise ValueError("Number of heads must be provided to initialize hybrid attention layers")
86
+ attn['num_kv_heads'] = attn.get('num_kv_heads', attn['num_heads'])
87
+ attn['qkv_bias'] = attn.get('qkv_bias', False)
88
+ attn['window_size'] = attn.get('window_size', None)
89
+ attn['rope_theta'] = attn.get('rope_theta', 10000.)
90
+
91
+ super().__init__(
92
+ pad_token_id=pad_token_id,
93
+ bos_token_id=bos_token_id,
94
+ eos_token_id=eos_token_id,
95
+ tie_word_embeddings=tie_word_embeddings,
96
+ **kwargs,
97
+ )
fla/models/gsa/modeling_gsa.py ADDED
@@ -0,0 +1,420 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+
3
+ from __future__ import annotations
4
+
5
+ import math
6
+ import warnings
7
+ from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union
8
+
9
+ import torch
10
+ import torch.nn as nn
11
+ import torch.utils.checkpoint
12
+ from transformers.generation import GenerationMixin
13
+ from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
14
+ from transformers.modeling_utils import PreTrainedModel
15
+ from transformers.utils import logging
16
+ from transformers.utils.deprecation import deprecate_kwarg
17
+
18
+ from fla.layers.attn import Attention
19
+ from fla.layers.gsa import GatedSlotAttention
20
+ from fla.models.gsa.configuration_gsa import GSAConfig
21
+ from fla.models.utils import Cache
22
+ from fla.modules import FusedCrossEntropyLoss, FusedLinearCrossEntropyLoss
23
+ from fla.modules import GatedMLP as GSAMLP
24
+ from fla.modules import RMSNorm
25
+
26
+ if TYPE_CHECKING:
27
+ from transformers.processing_utils import Unpack
28
+
29
+ logger = logging.get_logger(__name__)
30
+
31
+
32
+ class GSABlock(nn.Module):
33
+ def __init__(self, config: GSAConfig, layer_idx: int):
34
+ super().__init__()
35
+
36
+ self.config = config
37
+ self.layer_idx = layer_idx
38
+
39
+ self.attn_norm = (RMSNorm if config.fuse_norm else nn.RMSNorm)(config.hidden_size, eps=config.norm_eps)
40
+ if config.attn is not None and layer_idx in config.attn['layers']:
41
+ self.attn = Attention(
42
+ hidden_size=config.hidden_size,
43
+ num_heads=config.attn['num_heads'],
44
+ num_kv_heads=config.attn['num_kv_heads'],
45
+ qkv_bias=config.attn['qkv_bias'],
46
+ window_size=config.attn['window_size'],
47
+ rope_theta=config.attn['rope_theta'],
48
+ max_position_embeddings=config.max_position_embeddings,
49
+ layer_idx=layer_idx
50
+ )
51
+ else:
52
+ self.attn = GatedSlotAttention(
53
+ hidden_size=config.hidden_size,
54
+ expand_k=config.expand_k,
55
+ expand_v=config.expand_v,
56
+ num_heads=config.num_heads,
57
+ num_kv_heads=config.num_kv_heads,
58
+ num_slots=config.num_slots,
59
+ use_short_conv=config.use_short_conv,
60
+ conv_size=config.conv_size,
61
+ feature_map=config.feature_map,
62
+ use_output_gate=config.use_output_gate,
63
+ use_norm=config.use_norm,
64
+ gate_fn=config.hidden_act,
65
+ gate_logit_normalizer=config.gate_logit_normalizer,
66
+ elementwise_affine=config.elementwise_affine,
67
+ norm_eps=config.norm_eps,
68
+ fuse_norm=config.fuse_norm,
69
+ layer_idx=layer_idx
70
+ )
71
+ self.mlp_norm = (RMSNorm if config.fuse_norm else nn.RMSNorm)(config.hidden_size, eps=config.norm_eps)
72
+ self.mlp = GSAMLP(
73
+ hidden_size=config.hidden_size,
74
+ hidden_ratio=config.hidden_ratio,
75
+ intermediate_size=config.intermediate_size,
76
+ hidden_act=config.hidden_act,
77
+ fuse_swiglu=config.fuse_swiglu
78
+ )
79
+
80
+ def forward(
81
+ self,
82
+ hidden_states: torch.Tensor,
83
+ attention_mask: Optional[torch.Tensor] = None,
84
+ past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
85
+ use_cache: Optional[bool] = False,
86
+ output_attentions: Optional[bool] = False,
87
+ **kwargs: Unpack[Dict]
88
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
89
+ residual = hidden_states
90
+ hidden_states = self.attn_norm(hidden_states)
91
+ hidden_states, attentions, past_key_values = self.attn(
92
+ hidden_states=hidden_states,
93
+ attention_mask=attention_mask,
94
+ past_key_values=past_key_values,
95
+ use_cache=use_cache,
96
+ output_attentions=output_attentions,
97
+ **kwargs
98
+ )
99
+ if self.config.fuse_norm:
100
+ hidden_states, residual = self.mlp_norm(hidden_states, residual, True)
101
+ else:
102
+ hidden_states = residual + hidden_states
103
+ residual = hidden_states
104
+ hidden_states = self.mlp_norm(hidden_states)
105
+ hidden_states = self.mlp(hidden_states, **kwargs)
106
+ hidden_states = residual + hidden_states
107
+
108
+ outputs = (hidden_states, attentions, past_key_values)
109
+
110
+ return outputs
111
+
112
+
113
+ class GSAPreTrainedModel(PreTrainedModel):
114
+
115
+ config_class = GSAConfig
116
+ base_model_prefix = 'model'
117
+ supports_gradient_checkpointing = True
118
+ _no_split_modules = ['GSABlock']
119
+ _supports_cache_class = True
120
+
121
+ def __init__(self, *inputs, **kwargs):
122
+ super().__init__(*inputs, **kwargs)
123
+
124
+ def _init_weights(
125
+ self,
126
+ module: nn.Module,
127
+ prenorm_residual_strategy: Optional[str] = 'rescale',
128
+ num_residuals_per_layer: int = 2,
129
+ ):
130
+ if isinstance(module, (nn.Linear, nn.Conv1d)):
131
+ # Slightly different from the TF version which uses truncated_normal for initialization
132
+ # cf https://github.com/pytorch/pytorch/pull/5617
133
+ nn.init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
134
+ if module.bias is not None:
135
+ nn.init.zeros_(module.bias)
136
+ elif isinstance(module, nn.Embedding):
137
+ nn.init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
138
+ elif hasattr(module, 'reset_parameters'):
139
+ module.reset_parameters()
140
+
141
+ if prenorm_residual_strategy is not None:
142
+ # Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme:
143
+ # > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale
144
+ # > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers.
145
+ # > -- GPT-2 :: https://openai.com/blog/better-language-models/
146
+ #
147
+ # Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py
148
+ p = None
149
+ if hasattr(module, 'o_proj'):
150
+ p = module.o_proj.weight
151
+ elif hasattr(module, 'down_proj'):
152
+ p = module.down_proj.weight
153
+ if p is not None:
154
+ # Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block
155
+ # Following Pytorch init, except scale by 1/sqrt(2 * n_layer)
156
+ # We need to reinit p since this code could be called multiple times
157
+ # Having just p *= scale would repeatedly scale it down
158
+ if prenorm_residual_strategy == 'rescale':
159
+ nn.init.kaiming_uniform_(p, a=math.sqrt(5))
160
+ with torch.no_grad():
161
+ p /= math.sqrt(num_residuals_per_layer * self.config.num_hidden_layers)
162
+ elif prenorm_residual_strategy == 'zero':
163
+ nn.init.zeros_(p)
164
+ else:
165
+ raise ValueError(f"Invalid prenorm_residual_strategy: {prenorm_residual_strategy}")
166
+
167
+
168
+ class GSAModel(GSAPreTrainedModel):
169
+
170
+ def __init__(self, config: GSAConfig):
171
+ super().__init__(config)
172
+ self.padding_idx = config.pad_token_id
173
+ self.vocab_size = config.vocab_size
174
+
175
+ self.embeddings = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
176
+ self.layers = nn.ModuleList([GSABlock(config, layer_idx) for layer_idx in range(config.num_hidden_layers)])
177
+ self.norm = (RMSNorm if config.fuse_norm else nn.RMSNorm)(config.hidden_size, eps=config.norm_eps)
178
+
179
+ self.gradient_checkpointing = False
180
+
181
+ self.post_init()
182
+
183
+ def get_input_embeddings(self):
184
+ return self.embeddings
185
+
186
+ def set_input_embeddings(self, value):
187
+ self.embeddings = value
188
+
189
+ def forward(
190
+ self,
191
+ input_ids: Optional[torch.LongTensor] = None,
192
+ attention_mask: Optional[torch.Tensor] = None, # noqa
193
+ inputs_embeds: Optional[torch.FloatTensor] = None,
194
+ past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
195
+ use_cache: Optional[bool] = None,
196
+ output_attentions: Optional[bool] = None,
197
+ output_hidden_states: Optional[bool] = None,
198
+ return_dict: Optional[bool] = None,
199
+ **kwargs: Unpack[Dict]
200
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
201
+ if output_attentions:
202
+ warnings.warn("`GSAModel` does not `output_attentions` now, setting it to `False`.")
203
+ output_attentions = False
204
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
205
+ output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
206
+ use_cache = use_cache if use_cache is not None else (self.config.use_cache if not self.training else False)
207
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
208
+
209
+ # retrieve input_ids and inputs_embeds
210
+ if input_ids is not None and inputs_embeds is not None:
211
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
212
+ if input_ids is None and inputs_embeds is None:
213
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
214
+
215
+ if inputs_embeds is None:
216
+ inputs_embeds = self.embeddings(input_ids)
217
+ hidden_states = inputs_embeds
218
+
219
+ if use_cache and not isinstance(past_key_values, Cache):
220
+ past_key_values = Cache.from_legacy_cache(past_key_values)
221
+
222
+ if self.gradient_checkpointing and self.training and use_cache:
223
+ logger.warning_once("`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...")
224
+ use_cache = False
225
+
226
+ all_hidden_states = () if output_hidden_states else None
227
+ all_attns = () if output_attentions else None
228
+ for layer in self.layers:
229
+ if output_hidden_states:
230
+ all_hidden_states += (hidden_states,)
231
+
232
+ if self.gradient_checkpointing and self.training:
233
+ hidden_states, attentions, past_key_values = self._gradient_checkpointing_func(
234
+ layer.__call__,
235
+ hidden_states,
236
+ attention_mask,
237
+ past_key_values,
238
+ use_cache,
239
+ output_attentions,
240
+ **kwargs
241
+ )
242
+ else:
243
+ hidden_states, attentions, past_key_values = layer(
244
+ hidden_states,
245
+ attention_mask=attention_mask,
246
+ past_key_values=past_key_values,
247
+ use_cache=use_cache,
248
+ output_attentions=output_attentions,
249
+ **kwargs
250
+ )
251
+
252
+ if output_attentions:
253
+ all_attns += (attentions,)
254
+
255
+ hidden_states = self.norm(hidden_states)
256
+
257
+ # add hidden states from the last decoder layer
258
+ if output_hidden_states:
259
+ all_hidden_states += (hidden_states,)
260
+
261
+ if not return_dict:
262
+ return tuple(i for i in [hidden_states, past_key_values, all_hidden_states, all_attns] if i is not None)
263
+ return BaseModelOutputWithPast(
264
+ last_hidden_state=hidden_states,
265
+ past_key_values=past_key_values,
266
+ hidden_states=all_hidden_states,
267
+ attentions=all_attns
268
+ )
269
+
270
+
271
+ class GSAForCausalLM(GSAPreTrainedModel, GenerationMixin):
272
+
273
+ _tied_weights_keys = ["lm_head.weight"]
274
+
275
+ def __init__(self, config):
276
+
277
+ super().__init__(config)
278
+ self.model = GSAModel(config)
279
+ self.vocab_size = config.vocab_size
280
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
281
+ self.criterion = None
282
+
283
+ # Initialize weights and apply final processing
284
+ self.post_init()
285
+
286
+ def get_input_embeddings(self):
287
+ return self.model.embeddings
288
+
289
+ def set_input_embeddings(self, value):
290
+ self.model.embeddings = value
291
+
292
+ def get_output_embeddings(self):
293
+ return self.lm_head
294
+
295
+ def set_output_embeddings(self, new_embeddings):
296
+ self.lm_head = new_embeddings
297
+
298
+ def set_decoder(self, decoder):
299
+ self.model = decoder
300
+
301
+ def get_decoder(self):
302
+ return self.model
303
+
304
+ def generate(self, *args, **kwargs):
305
+ try:
306
+ return super().generate(*args, **kwargs)
307
+ except AttributeError as exception:
308
+ if 'past_key_values' in str(exception):
309
+ raise AttributeError(
310
+ f"You tried to call `generate` with a decoding strategy that manipulates `past_key_values`, "
311
+ f"which is not supported for {self.__class__.__name__}. "
312
+ f"Try another generation strategy instead. "
313
+ f"For the available generation strategies, check this doc: "
314
+ f"https://huggingface.co/docs/transformers/en/generation_strategies#decoding-strategies"
315
+ )
316
+ else:
317
+ raise exception
318
+
319
+ @deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep")
320
+ def prepare_inputs_for_generation(
321
+ self,
322
+ input_ids: torch.LongTensor = None,
323
+ past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
324
+ attention_mask: Optional[torch.Tensor] = None,
325
+ inputs_embeds: Optional[torch.Tensor] = None,
326
+ use_cache: bool = True,
327
+ logits_to_keep: Optional[int] = None,
328
+ **kwargs
329
+ ):
330
+ # only last token for `inputs_ids` if the `past_key_values` is not empty.
331
+ if past_key_values is not None and len(past_key_values) > 0:
332
+ input_ids = input_ids[:, -1:]
333
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
334
+ if inputs_embeds is not None and len(past_key_values) == 0:
335
+ model_inputs = {'inputs_embeds': inputs_embeds}
336
+ else:
337
+ # The `contiguous()` here is necessary to have a static stride during decoding. torchdynamo otherwise
338
+ # recompiles graphs as the stride of the inputs is a guard.
339
+ # Ref: https://github.com/huggingface/transformers/pull/29114
340
+ # TODO: use `next_tokens` directly instead.
341
+ model_inputs = {'input_ids': input_ids.contiguous()}
342
+
343
+ if logits_to_keep is not None:
344
+ model_inputs['logits_to_keep'] = logits_to_keep
345
+
346
+ model_inputs.update({
347
+ 'past_key_values': past_key_values,
348
+ 'use_cache': use_cache,
349
+ 'attention_mask': attention_mask,
350
+ })
351
+ return model_inputs
352
+
353
+ @deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep")
354
+ def forward(
355
+ self,
356
+ input_ids: torch.LongTensor = None,
357
+ attention_mask: Optional[torch.Tensor] = None,
358
+ inputs_embeds: Optional[torch.Tensor] = None,
359
+ past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
360
+ labels: Optional[torch.LongTensor] = None,
361
+ use_cache: Optional[bool] = None,
362
+ output_attentions: Optional[bool] = None,
363
+ output_hidden_states: Optional[bool] = None,
364
+ return_dict: Optional[bool] = None,
365
+ logits_to_keep: Optional[int] = 0,
366
+ **kwargs: Unpack[Dict]
367
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
368
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
369
+ output_hidden_states = (
370
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
371
+ )
372
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
373
+
374
+ outputs = self.model(
375
+ input_ids=input_ids,
376
+ attention_mask=attention_mask,
377
+ inputs_embeds=inputs_embeds,
378
+ past_key_values=past_key_values,
379
+ use_cache=use_cache,
380
+ output_attentions=output_attentions,
381
+ output_hidden_states=output_hidden_states,
382
+ return_dict=return_dict,
383
+ **kwargs
384
+ )
385
+
386
+ hidden_states = outputs[0]
387
+ fuse_linear_and_cross_entropy = self.config.fuse_cross_entropy and self.training
388
+
389
+ loss, logits = None, None
390
+ if not fuse_linear_and_cross_entropy or labels is None:
391
+ logits = self.lm_head(hidden_states if logits_to_keep is None else hidden_states[:, -logits_to_keep:])
392
+ if labels is not None:
393
+ if getattr(self, 'criterion', None) is None:
394
+ if fuse_linear_and_cross_entropy:
395
+ criterion = FusedLinearCrossEntropyLoss()
396
+ elif self.config.fuse_cross_entropy:
397
+ criterion = FusedCrossEntropyLoss(inplace_backward=True)
398
+ else:
399
+ criterion = nn.CrossEntropyLoss()
400
+ else:
401
+ criterion = self.criterion
402
+ # Enable model parallelism
403
+ labels = labels.to(hidden_states.device)
404
+ labels = torch.cat((labels[..., 1:], torch.full_like(labels[:, :1], criterion.ignore_index)), 1)
405
+ if fuse_linear_and_cross_entropy:
406
+ loss = criterion(hidden_states, labels, self.lm_head.weight, self.lm_head.bias)
407
+ else:
408
+ loss = criterion(logits.view(labels.numel(), -1), labels.view(-1))
409
+
410
+ if not return_dict:
411
+ output = (logits,) + outputs[1:]
412
+ return (loss,) + output if loss is not None else output
413
+
414
+ return CausalLMOutputWithPast(
415
+ loss=loss,
416
+ logits=logits,
417
+ past_key_values=outputs.past_key_values,
418
+ hidden_states=outputs.hidden_states,
419
+ attentions=outputs.attentions,
420
+ )
fla/models/mamba/__pycache__/configuration_mamba.cpython-311.pyc ADDED
Binary file (7.33 kB). View file
 
fla/models/mamba/__pycache__/modeling_mamba.cpython-311.pyc ADDED
Binary file (42.9 kB). View file
 
fla/modules/__init__.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+
3
+ from fla.modules.convolution import ImplicitLongConvolution, LongConvolution, ShortConvolution
4
+ from fla.modules.fused_bitlinear import BitLinear, FusedBitLinear
5
+ from fla.modules.fused_cross_entropy import FusedCrossEntropyLoss
6
+ from fla.modules.fused_kl_div import FusedKLDivLoss
7
+ from fla.modules.fused_linear_cross_entropy import FusedLinearCrossEntropyLoss
8
+ from fla.modules.fused_norm_gate import (
9
+ FusedLayerNormGated,
10
+ FusedLayerNormSwishGate,
11
+ FusedLayerNormSwishGateLinear,
12
+ FusedRMSNormGated,
13
+ FusedRMSNormSwishGate,
14
+ FusedRMSNormSwishGateLinear
15
+ )
16
+ from fla.modules.layernorm import GroupNorm, GroupNormLinear, LayerNorm, LayerNormLinear, RMSNorm, RMSNormLinear
17
+ from fla.modules.mlp import GatedMLP
18
+ from fla.modules.rotary import RotaryEmbedding
19
+
20
+ __all__ = [
21
+ 'ImplicitLongConvolution', 'LongConvolution', 'ShortConvolution',
22
+ 'BitLinear', 'FusedBitLinear',
23
+ 'FusedCrossEntropyLoss', 'FusedLinearCrossEntropyLoss', 'FusedKLDivLoss',
24
+ 'GroupNorm', 'GroupNormLinear', 'LayerNorm', 'LayerNormLinear', 'RMSNorm', 'RMSNormLinear',
25
+ 'FusedLayerNormGated', 'FusedLayerNormSwishGate', 'FusedLayerNormSwishGateLinear',
26
+ 'FusedRMSNormGated', 'FusedRMSNormSwishGate', 'FusedRMSNormSwishGateLinear',
27
+ 'GatedMLP',
28
+ 'RotaryEmbedding'
29
+ ]
fla/modules/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (1.51 kB). View file
 
fla/modules/__pycache__/activations.cpython-311.pyc ADDED
Binary file (24.1 kB). View file
 
fla/modules/__pycache__/convolution.cpython-311.pyc ADDED
Binary file (22.3 kB). View file
 
fla/modules/__pycache__/feature_map.cpython-311.pyc ADDED
Binary file (20.2 kB). View file
 
fla/modules/__pycache__/fused_bitlinear.cpython-311.pyc ADDED
Binary file (24.4 kB). View file
 
fla/modules/__pycache__/fused_cross_entropy.cpython-311.pyc ADDED
Binary file (16.6 kB). View file
 
fla/modules/__pycache__/fused_kl_div.cpython-311.pyc ADDED
Binary file (12.2 kB). View file
 
fla/modules/__pycache__/fused_linear_cross_entropy.cpython-311.pyc ADDED
Binary file (21.5 kB). View file
 
fla/modules/__pycache__/fused_norm_gate.cpython-311.pyc ADDED
Binary file (35.7 kB). View file
 
fla/modules/__pycache__/l2norm.cpython-311.pyc ADDED
Binary file (7.48 kB). View file
 
fla/modules/__pycache__/layernorm.cpython-311.pyc ADDED
Binary file (43.8 kB). View file
 
fla/modules/__pycache__/layernorm_gated.cpython-311.pyc ADDED
Binary file (24.7 kB). View file
 
fla/modules/__pycache__/mlp.cpython-311.pyc ADDED
Binary file (6.87 kB). View file
 
fla/modules/__pycache__/rotary.cpython-311.pyc ADDED
Binary file (23.8 kB). View file
 
fla/modules/activations.py ADDED
@@ -0,0 +1,471 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright (c) 2023-2025, Tri Dao, Yu Zhang, Songlin Yang.
3
+
4
+ import torch
5
+ import torch.nn.functional as F
6
+ import triton
7
+ import triton.language as tl
8
+
9
+ from fla.ops.utils.op import exp, log
10
+ from fla.utils import autocast_custom_bwd, autocast_custom_fwd, get_multiprocessor_count, input_guard
11
+
12
+ sigmoid_fwd_codestring = """
13
+ template <typename T> T sigmoid_fwd(T x) {
14
+ return 1.0f / (1.0f + ::exp(-float(x)));
15
+ }
16
+ """
17
+ sigmoid_bwd_codestring = """
18
+ template <typename T> T sigmoid_bwd(T x, T g) {
19
+ float x_sigmoid = 1.0f / (1.0f + ::exp(-float(x)));
20
+ return float(g) * x_sigmoid * (1.0f - x_sigmoid);
21
+ }
22
+ """
23
+
24
+ sigmoid_fwd_jit_fn = torch.cuda.jiterator._create_jit_fn(sigmoid_fwd_codestring)
25
+ sigmoid_bwd_jit_fn = torch.cuda.jiterator._create_jit_fn(sigmoid_bwd_codestring)
26
+
27
+
28
+ @torch.compiler.disable
29
+ def sigmoid_fwd(x):
30
+ return sigmoid_fwd_jit_fn(x)
31
+
32
+
33
+ @torch.compiler.disable
34
+ def sigmoid_bwd(x, g):
35
+ return sigmoid_bwd_jit_fn(x, g)
36
+
37
+
38
+ class SigmoidFunction(torch.autograd.Function):
39
+
40
+ @staticmethod
41
+ def forward(ctx, x):
42
+ ctx.save_for_backward(x)
43
+ return sigmoid_fwd(x)
44
+
45
+ @staticmethod
46
+ def backward(ctx, dout):
47
+ x, = ctx.saved_tensors
48
+ return sigmoid_bwd(x, dout)
49
+
50
+
51
+ sigmoid = SigmoidFunction.apply
52
+
53
+
54
+ @triton.autotune(
55
+ configs=[
56
+ triton.Config({}, num_warps=num_warps)
57
+ for num_warps in [1, 2, 4, 8, 16, 32]
58
+ ],
59
+ key=['D']
60
+ )
61
+ @triton.jit
62
+ def logsigmoid_fwd_kernel(
63
+ x,
64
+ y,
65
+ temperature,
66
+ T: tl.constexpr,
67
+ D: tl.constexpr,
68
+ B: tl.constexpr
69
+ ):
70
+ i = tl.program_id(0)
71
+ o_i = i * B + tl.arange(0, B)
72
+ m_i = o_i < T
73
+
74
+ b_x = tl.load(x + o_i, mask=m_i, other=0.).to(tl.float32)
75
+ b_m = tl.minimum(0., b_x)
76
+ b_z = 1. + exp(-tl.abs(b_x))
77
+ b_y = (b_m - log(b_z)) / temperature
78
+ tl.store(y + o_i, b_y.to(y.dtype.element_ty), mask=m_i)
79
+
80
+
81
+ @triton.autotune(
82
+ configs=[
83
+ triton.Config({}, num_warps=num_warps)
84
+ for num_warps in [1, 2, 4, 8, 16, 32]
85
+ ],
86
+ key=['D']
87
+ )
88
+ @triton.jit
89
+ def logsigmoid_bwd_kernel(
90
+ x,
91
+ dx,
92
+ dy,
93
+ temperature,
94
+ T: tl.constexpr,
95
+ D: tl.constexpr,
96
+ B: tl.constexpr
97
+ ):
98
+ i = tl.program_id(0)
99
+ o_i = i * B + tl.arange(0, B)
100
+ m_i = o_i < T
101
+
102
+ b_x = tl.load(x + o_i, mask=m_i, other=0.).to(tl.float32)
103
+ b_dy = tl.load(dy + o_i, mask=m_i, other=0.).to(tl.float32)
104
+ b_dx = b_dy * (1. - tl.sigmoid(b_x)) / temperature
105
+ tl.store(dx + o_i, b_dx.to(dx.dtype.element_ty), mask=m_i)
106
+
107
+
108
+ def logsigmoid_fwd(x: torch.Tensor, temperature: float = 1.) -> torch.Tensor:
109
+ T, D = x.numel(), x.shape[-1]
110
+ B = triton.next_power_of_2(triton.cdiv(T, get_multiprocessor_count(x.device.index)))
111
+ y = torch.empty_like(x)
112
+ logsigmoid_fwd_kernel[(triton.cdiv(T, B),)](
113
+ x=x,
114
+ y=y,
115
+ temperature=temperature,
116
+ T=T,
117
+ D=D,
118
+ B=B
119
+ )
120
+ return y
121
+
122
+
123
+ def logsigmoid_bwd(x: torch.Tensor, dy: torch.Tensor, temperature: float = 1.) -> torch.Tensor:
124
+ T, D = x.numel(), x.shape[-1]
125
+ B = triton.next_power_of_2(triton.cdiv(T, get_multiprocessor_count(x.device.index)))
126
+ dx = torch.empty_like(x)
127
+ logsigmoid_bwd_kernel[(triton.cdiv(T, B),)](
128
+ x=x,
129
+ dx=dx,
130
+ dy=dy,
131
+ temperature=temperature,
132
+ T=T,
133
+ D=D,
134
+ B=B
135
+ )
136
+ return dx
137
+
138
+
139
+ class LogSigmoidFunction(torch.autograd.Function):
140
+
141
+ @staticmethod
142
+ @input_guard
143
+ def forward(ctx, x, temperature):
144
+ ctx.save_for_backward(x,)
145
+ ctx.temperature = temperature
146
+ return logsigmoid_fwd(x, temperature)
147
+
148
+ @staticmethod
149
+ @input_guard
150
+ def backward(ctx, dy):
151
+ x, = ctx.saved_tensors
152
+ return logsigmoid_bwd(x, dy, ctx.temperature), None
153
+
154
+
155
+ def logsigmoid(x: torch.Tensor, temperature: float = 1.) -> torch.Tensor:
156
+ return LogSigmoidFunction.apply(x, temperature)
157
+
158
+
159
+ swish_fwd_codestring = """
160
+ template <typename T> T swish_fwd(T x) {
161
+ float x_sigmoid = 1.0f / (1.0f + ::exp(-float(x)));
162
+ return float(x) * x_sigmoid;
163
+ }
164
+ """
165
+ swish_bwd_codestring = """
166
+ template <typename T> T swish_bwd(T x, T g) {
167
+ float x_sigmoid = 1.0f / (1.0f + ::exp(-float(x)));
168
+ return float(g) * x_sigmoid * (1.0f - float(x) * x_sigmoid + float(x));
169
+ }
170
+ """
171
+
172
+ swish_fwd_jit_fn = torch.cuda.jiterator._create_jit_fn(swish_fwd_codestring)
173
+ swish_bwd_jit_fn = torch.cuda.jiterator._create_jit_fn(swish_bwd_codestring)
174
+
175
+
176
+ @torch.compiler.disable
177
+ def swish_fwd(x):
178
+ return swish_fwd_jit_fn(x)
179
+
180
+
181
+ @torch.compiler.disable
182
+ def swish_bwd(x, g):
183
+ return swish_bwd_jit_fn(x, g)
184
+
185
+
186
+ class SwishFunction(torch.autograd.Function):
187
+
188
+ @staticmethod
189
+ def forward(ctx, x):
190
+ ctx.save_for_backward(x)
191
+ return swish_fwd(x)
192
+
193
+ @staticmethod
194
+ def backward(ctx, dout):
195
+ x, = ctx.saved_tensors
196
+ return swish_bwd(x, dout)
197
+
198
+
199
+ swish = SwishFunction.apply
200
+
201
+ # 1/sqrt(2*pi)-> 0.3989423
202
+ # 1/sqrt(2) -> 0.70710678
203
+ # sqrt(2/pi) -> 0.79788456
204
+
205
+
206
+ # this function is tanh approximation of gelu
207
+ # actual gelu is:
208
+ # x * 0.5 * (1.0 + torch.erf(x * 0.70710678))
209
+ @torch.compile
210
+ def bias_gelu(y, bias):
211
+ x = bias + y
212
+ return (x * 0.5 * (1.0 + torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x)))).to(dtype=y.dtype)
213
+
214
+
215
+ # gradient of tanh approximation of gelu
216
+ # gradient of actual gelu is:
217
+ # 0.5 * (1. + torch.erf(x * 0.70710678)) + 0.3989423 * x * torch.exp(-0.5 * x * x)
218
+ @torch.compile
219
+ def bias_gelu_bwd(g, y, bias):
220
+ """Assume that y has shape (B, D) and bias has shape (D)"""
221
+ x = bias + y
222
+ tanh_out = torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x))
223
+ # sqrt(2/pi) * 3 * 0.044715 -> 0.1070322243
224
+ ff = 0.5 * x * ((1 - tanh_out * tanh_out) * (0.79788456 + 0.1070322243 * x * x)) + 0.5 * (
225
+ 1 + tanh_out
226
+ )
227
+ grad_y = ff * g
228
+ return grad_y.to(dtype=y.dtype), grad_y.sum(dim=(0), dtype=bias.dtype)
229
+
230
+
231
+ class GeLUFunction(torch.autograd.Function):
232
+
233
+ @staticmethod
234
+ # bias is an optional argument
235
+ def forward(ctx, input, bias):
236
+ ctx.save_for_backward(input, bias)
237
+ return bias_gelu(input, bias)
238
+
239
+ @staticmethod
240
+ def backward(ctx, grad_output):
241
+ input, bias = ctx.saved_tensors
242
+ tmp = bias_gelu_bwd(grad_output, input, bias)
243
+ return tmp, tmp
244
+
245
+
246
+ bias_gelu_impl = GeLUFunction.apply
247
+
248
+
249
+ # this function is tanh approximation of gelu
250
+ # actual gelu is:
251
+ # x * 0.5 * (1.0 + torch.erf(x * 0.70710678))
252
+ @torch.compile
253
+ def gelu_fwd(x):
254
+ return (x * 0.5 * (1.0 + torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x)))).to(dtype=x.dtype)
255
+
256
+
257
+ # gradient of tanh approximation of gelu
258
+ # gradient of actual gelu is:
259
+ # 0.5 * (1. + torch.erf(x * 0.70710678)) + 0.3989423 * x * torch.exp(-0.5 * x * x)
260
+ @torch.compile
261
+ def gelu_bwd(g, x):
262
+ tanh_out = torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x))
263
+ # sqrt(2/pi) * 3 * 0.044715 -> 0.1070322243
264
+ ff = 0.5 * x * ((1 - tanh_out * tanh_out) * (0.79788456 + 0.1070322243 * x * x)) + 0.5 * (
265
+ 1 + tanh_out
266
+ )
267
+ return (ff * g).to(dtype=x.dtype)
268
+
269
+
270
+ class FastGeLUFunction(torch.autograd.Function):
271
+ @staticmethod
272
+ # bias is an optional argument
273
+ def forward(ctx, input):
274
+ ctx.save_for_backward(input)
275
+ return gelu_fwd(input)
276
+
277
+ @staticmethod
278
+ def backward(ctx, grad_output):
279
+ (input,) = ctx.saved_tensors
280
+ tmp = gelu_bwd(grad_output, input)
281
+ return tmp
282
+
283
+
284
+ fast_gelu_impl = FastGeLUFunction.apply
285
+
286
+
287
+ @torch.compile
288
+ def relu_bwd(g, x):
289
+ return torch.where(x >= 0, g, 0.0).to(dtype=x.dtype)
290
+
291
+
292
+ @torch.compile
293
+ def sqrelu_fwd(x):
294
+ r = F.relu(x.float())
295
+ return (r * r).to(dtype=x.dtype)
296
+
297
+
298
+ @torch.compile
299
+ def sqrelu_bwd(g, x):
300
+ return (2.0 * g * F.relu(x.float())).to(dtype=x.dtype)
301
+
302
+
303
+ class SquaredReLUFunction(torch.autograd.Function):
304
+
305
+ @staticmethod
306
+ def forward(ctx, input):
307
+ ctx.save_for_backward(input)
308
+ return sqrelu_fwd(input)
309
+
310
+ @staticmethod
311
+ def backward(ctx, grad_output):
312
+ input, = ctx.saved_tensors
313
+ return sqrelu_bwd(grad_output, input)
314
+
315
+
316
+ sqrelu = SquaredReLUFunction.apply
317
+
318
+
319
+ swiglu_fwd_codestring = """
320
+ template <typename T> T swiglu_fwd(T x, T y) {
321
+ return float(x) * float(y) / (1.0f + ::exp(-float(x)));
322
+ }
323
+ """
324
+ swiglu_bwd_codestring = """
325
+ template <typename T> T swiglu_bwd(T x, T y, T g, T& dx, T& dy) {
326
+ float x_sigmoid = 1.0f / (1.0f + ::exp(-float(x)));
327
+ dx = x_sigmoid * (1 + float(x) * (1.0f - x_sigmoid)) * float(g) * float(y);
328
+ dy = float(x) * x_sigmoid * float(g);
329
+ }
330
+ """
331
+
332
+ swiglu_fwdbwd_codestring = """
333
+ template <typename T> T swiglu_fwdbwd(T x, T y, T g, T& dx, T& dy, T& z) {
334
+ float x_sigmoid = 1.0f / (1.0f + ::exp(-float(x)));
335
+ float x_swish = float(x) * x_sigmoid;
336
+ dx = x_sigmoid * (1 + float(x) * (1.0f - x_sigmoid)) * float(g) * float(y);
337
+ dy = x_swish * float(g);
338
+ z = x_swish * float(y);
339
+ }
340
+ """
341
+
342
+
343
+ swiglu_fwd_jit_fn = torch.cuda.jiterator._create_jit_fn(swiglu_fwd_codestring)
344
+ swiglu_bwd_jit_fn = torch.cuda.jiterator._create_multi_output_jit_fn(swiglu_bwd_codestring, num_outputs=2)
345
+ swiglu_fwdbwd_jit_fn = torch.cuda.jiterator._create_multi_output_jit_fn(swiglu_fwdbwd_codestring, num_outputs=3)
346
+
347
+
348
+ @torch.compiler.disable
349
+ def swiglu_fwd(x, y):
350
+ return swiglu_fwd_jit_fn(x, y)
351
+
352
+
353
+ @torch.compiler.disable
354
+ def swiglu_bwd(x, y, g):
355
+ return swiglu_bwd_jit_fn(x, y, g)
356
+
357
+
358
+ @torch.compiler.disable
359
+ def swiglu_fwdbwd(x, y, g):
360
+ return swiglu_fwdbwd_jit_fn(x, y, g)
361
+
362
+
363
+ @torch.compile
364
+ def swiglu_fwd_torch(x, y):
365
+ return (F.silu(x.float()) * y).to(x.dtype)
366
+
367
+
368
+ @torch.compile
369
+ def swiglu_bwd_torch(x, y, g):
370
+ dtype = x.dtype
371
+ x, y, g = x.float(), y.float(), g.float()
372
+ x_sigmoid = x.sigmoid()
373
+ x_swish = x * x_sigmoid
374
+ dx = x_sigmoid * (1 + x * (1.0 - x_sigmoid)) * g * y
375
+ dy = x_swish * g
376
+ return dx.to(dtype), dy.to(dtype)
377
+
378
+
379
+ @torch.compile
380
+ def swiglu_fwdbwd_torch(x, y, g):
381
+ dtype = x.dtype
382
+ x, y, g = x.float(), y.float(), g.float()
383
+ x_sigmoid = x.sigmoid()
384
+ x_swish = x * x_sigmoid
385
+ dx = x_sigmoid * (1 + x * (1.0 - x_sigmoid)) * g * y
386
+ dy = x_swish * g
387
+ z = x_swish * y
388
+ return dx.to(dtype), dy.to(dtype), z.to(dtype)
389
+
390
+
391
+ class SwiGLUFunction(torch.autograd.Function):
392
+ r"""
393
+ Swish-Gated Linear Unit (SwiGLU) function.
394
+
395
+ .. math::
396
+ \text{SwiGLU}(x, y) = swish(x) * y = \frac{x}{1 + \exp(-x)} * y
397
+ """
398
+
399
+ @staticmethod
400
+ def forward(ctx, x, y):
401
+ ctx.save_for_backward(x, y)
402
+ if torch.compiler.is_compiling() or isinstance(x, torch.distributed.tensor.DTensor):
403
+ return swiglu_fwd_torch(x, y)
404
+ else:
405
+ return swiglu_fwd(x, y)
406
+
407
+ @staticmethod
408
+ def backward(ctx, dout):
409
+ x, y = ctx.saved_tensors
410
+ if torch.compiler.is_compiling() or isinstance(x, torch.distributed.tensor.DTensor):
411
+ return swiglu_bwd_torch(x, y, dout)
412
+ else:
413
+ return swiglu_bwd(x, y, dout)
414
+
415
+
416
+ class SwiGLULinearFunction(torch.autograd.Function):
417
+ r"""
418
+ Swish-Gated Linear Unit (SwiGLU) function followed by a linear transformation.
419
+
420
+ .. math::
421
+ \text{SwiGLULinear}(x, y, W, b) = (swish(x) * y) W + b
422
+
423
+ This simple wrap discards the intermediate results of SwiGLU(x, y) to save memory.
424
+ """
425
+
426
+ @staticmethod
427
+ @autocast_custom_fwd
428
+ def forward(ctx, x, y, weight, bias):
429
+ with torch.no_grad():
430
+ if torch.compiler.is_compiling() or isinstance(x, torch.distributed.tensor.DTensor):
431
+ z = swiglu_fwd_torch(x, y)
432
+ else:
433
+ z = swiglu_fwd(x, y)
434
+ out = F.linear(z, weight, bias)
435
+ # We don't store z, will be recomputed in the backward pass to save memory
436
+ ctx.save_for_backward(x, y, weight)
437
+ ctx.linear_bias_is_none = bias is None
438
+ return out
439
+
440
+ @staticmethod
441
+ @autocast_custom_bwd
442
+ def backward(ctx, dout, *args):
443
+ x, y, weight = ctx.saved_tensors
444
+ dout = dout.reshape(-1, dout.shape[-1])
445
+ dz = F.linear(dout, weight.t()).view_as(x)
446
+ with torch.no_grad():
447
+ if torch.compiler.is_compiling() or isinstance(x, torch.distributed.tensor.DTensor):
448
+ dx, dy, z = swiglu_fwdbwd_torch(x, y, dz)
449
+ else:
450
+ dx, dy, z = swiglu_fwdbwd(x, y, dz)
451
+ dlinear_weight = torch.einsum("bo,bi->oi", dout, z.reshape(-1, z.shape[-1]))
452
+ dlinear_bias = None if ctx.linear_bias_is_none else dout.sum(0)
453
+ return dx, dy, dlinear_weight, dlinear_bias
454
+
455
+
456
+ swiglu = SwiGLUFunction.apply
457
+
458
+
459
+ swiglu_linear = SwiGLULinearFunction.apply
460
+
461
+
462
+ ACT2FN = {
463
+ 'relu': F.relu,
464
+ 'sigmoid': sigmoid,
465
+ 'logsigmoid': logsigmoid,
466
+ 'silu': swish,
467
+ 'swish': swish,
468
+ 'sqrelu': sqrelu,
469
+ 'gelu': fast_gelu_impl,
470
+ 'bias_gelu': bias_gelu_impl,
471
+ }
fla/modules/convolution.py ADDED
@@ -0,0 +1,434 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+
3
+ # from https://github.com/HazyResearch/zoology/blob/main/zoology/mixers/convolution.py
4
+
5
+ import math
6
+ import warnings
7
+ from typing import Optional, Tuple
8
+
9
+ import torch
10
+ import torch.nn as nn
11
+ import torch.nn.functional as F
12
+ import triton
13
+ import triton.language as tl
14
+ from einops import rearrange
15
+
16
+ from fla.modules.activations import ACT2FN
17
+ from fla.ops.common.utils import prepare_position_ids, prepare_sequence_ids
18
+ from fla.utils import checkpoint, input_guard
19
+
20
+ try:
21
+ from causal_conv1d import causal_conv1d_fn, causal_conv1d_update
22
+ except ImportError:
23
+ causal_conv1d_fn = None
24
+ causal_conv1d_update = None
25
+
26
+
27
+ def fft_conv(u, k, dropout_mask, gelu=True, k_rev=None):
28
+ seqlen = u.shape[-1]
29
+ fft_size = 2 * seqlen
30
+ k_f = torch.fft.rfft(k, n=fft_size) / fft_size
31
+ if k_rev is not None:
32
+ k_rev_f = torch.fft.rfft(k_rev, n=fft_size) / fft_size
33
+ k_f = k_f + k_rev_f.conj()
34
+ u_f = torch.fft.rfft(u.to(dtype=k.dtype), n=fft_size)
35
+
36
+ if len(u.shape) > 3:
37
+ k_f = k_f.unsqueeze(1)
38
+ y = torch.fft.irfft(u_f * k_f, n=fft_size, norm="forward")[..., :seqlen]
39
+
40
+ out = y + u
41
+ if gelu:
42
+ out = F.gelu(out)
43
+ if dropout_mask is not None:
44
+ return (out * rearrange(dropout_mask, "b H -> b H 1")).to(dtype=u.dtype)
45
+ else:
46
+ return out.to(dtype=u.dtype)
47
+
48
+
49
+ @checkpoint
50
+ def proj_then_conv1d(
51
+ x: torch.Tensor,
52
+ proj_weight: torch.Tensor,
53
+ conv1d_weight: torch.Tensor,
54
+ conv1d_bias: Optional[torch.Tensor] = None,
55
+ cache: Optional[torch.Tensor] = None
56
+ ) -> torch.Tensor:
57
+ # We do matmul and transpose BLH -> HBL at the same time
58
+ x = rearrange(proj_weight @ rearrange(x, "b t d -> d (b t)"), "d (b t) -> b d t", t=x.shape[-2])
59
+
60
+ if causal_conv1d_fn is None:
61
+ raise ImportError("`causal_conv1d_fn` is not available. Please install `causal-conv1d` first.")
62
+ if cache is None:
63
+ x = causal_conv1d_fn(
64
+ x=x,
65
+ weight=rearrange(conv1d_weight, "d 1 w -> d w"),
66
+ bias=conv1d_bias,
67
+ activation="silu",
68
+ ).transpose(1, 2)
69
+ else:
70
+ assert x.shape[-1] == 1, "Only support decoding with 1 token at a time for now"
71
+ x = x.squeeze(-1)
72
+ x = causal_conv1d_update(
73
+ x=x,
74
+ weight=rearrange(conv1d_weight, "d 1 w -> d w"),
75
+ bias=conv1d_bias,
76
+ cache=cache,
77
+ activation="silu",
78
+ )
79
+ return x
80
+
81
+
82
+ @triton.jit
83
+ def causal_conv1d_varlen_states_fwd_kernel(
84
+ x,
85
+ cache,
86
+ offsets,
87
+ D,
88
+ W,
89
+ BD: tl.constexpr,
90
+ BW: tl.constexpr
91
+ ):
92
+ i_d, i_w, i_n = tl.program_id(0), tl.program_id(1), tl.program_id(2)
93
+ eos = tl.load(offsets + i_n + 1)
94
+ bos = tl.maximum(tl.load(offsets + i_n), eos - W)
95
+ o_t = eos - (i_w + 1) * BW + tl.arange(0, BW)
96
+ o_d = i_d * BD + tl.arange(0, BD)
97
+ o_w = W - (i_w + 1) * BW + tl.arange(0, BW)
98
+
99
+ b_x = tl.load(x + o_t * D + o_d[:, None], mask=(o_t >= bos) & (o_d[:, None] < D), other=0)
100
+ tl.store(cache + i_n * D*W + o_d[:, None] * W + o_w, b_x, mask=(o_d[:, None] < D) & (o_w >= 0))
101
+
102
+
103
+ @input_guard
104
+ def causal_conv1d_varlen_states_fwd(
105
+ x: torch.Tensor,
106
+ cache: torch.Tensor,
107
+ cu_seqlens: torch.Tensor,
108
+ state_len: int
109
+ ) -> torch.Tensor:
110
+ N, D, W = len(cu_seqlens) - 1, x.shape[-1], state_len
111
+ cache = torch.empty(N, D, W, dtype=x.dtype, device=x.device) if cache is None else cache
112
+ BD = min(triton.next_power_of_2(D), 256)
113
+ BW = min(triton.next_power_of_2(state_len), 16)
114
+ grid = (triton.cdiv(D, BD), triton.cdiv(W, BW), N)
115
+ with torch.cuda.device(x.device.index):
116
+ causal_conv1d_varlen_states_fwd_kernel[grid](
117
+ x=x,
118
+ cache=cache,
119
+ offsets=cu_seqlens,
120
+ D=D,
121
+ W=W,
122
+ BW=BW,
123
+ BD=BD
124
+ )
125
+ return cache
126
+
127
+
128
+ class ShortConvolution(nn.Conv1d):
129
+ """
130
+ Simple wrapper around `nn.Conv1d` that accepts dimension last.
131
+ """
132
+
133
+ def __init__(
134
+ self,
135
+ hidden_size: int,
136
+ kernel_size: int,
137
+ bias: bool = False,
138
+ activation: Optional[str] = 'silu',
139
+ use_fast_conv1d: Optional[bool] = True,
140
+ device: Optional[torch.device] = None,
141
+ dtype: Optional[torch.dtype] = None,
142
+ ):
143
+ super().__init__(
144
+ in_channels=hidden_size,
145
+ out_channels=hidden_size,
146
+ kernel_size=kernel_size,
147
+ groups=hidden_size,
148
+ bias=bias,
149
+ padding=kernel_size - 1,
150
+ device=device,
151
+ dtype=dtype,
152
+ )
153
+
154
+ self.hidden_size = hidden_size
155
+ self.activation = None
156
+ if activation is not None:
157
+ assert activation in ['silu', 'swish'], f"Activation `{activation}` not supported yet."
158
+ self.activation = activation
159
+
160
+ if causal_conv1d_fn is None:
161
+ if use_fast_conv1d:
162
+ raise RuntimeError(
163
+ "Please either install `causal-conv1d>=1.4.0` to enable fast causal short convolution CUDA kernel "
164
+ "or set `use_fast_conv1d` to False"
165
+ )
166
+ else:
167
+ warnings.warn(
168
+ "The naive Pytorch verison is very slow in practice, "
169
+ "please run `pip install causal-conv1d>=1.4.0` to install fast causal short convolution CUDA kernel",
170
+ category=ImportWarning
171
+ )
172
+ self.use_fast_conv1d = use_fast_conv1d
173
+
174
+ def extra_repr(self):
175
+ s = ('{in_channels}, {out_channels}, kernel_size={kernel_size}'
176
+ ', stride={stride}')
177
+ if self.padding != (0,) * len(self.padding):
178
+ s += ', padding={padding}'
179
+ if self.dilation != (1,) * len(self.dilation):
180
+ s += ', dilation={dilation}'
181
+ if self.output_padding != (0,) * len(self.output_padding):
182
+ s += ', output_padding={output_padding}'
183
+ if self.groups != 1:
184
+ s += ', groups={groups}'
185
+ if self.bias is None:
186
+ s += ', bias=False'
187
+ if self.padding_mode != 'zeros':
188
+ s += ', padding_mode={padding_mode}'
189
+ if self.activation is not None:
190
+ s += ', activation={activation}'
191
+ if not self.use_fast_conv1d:
192
+ s += ', use_fast_conv1d={use_fast_conv1d}'
193
+ return s.format(**self.__dict__)
194
+
195
+ def forward(
196
+ self,
197
+ x: torch.Tensor,
198
+ mask: Optional[torch.Tensor] = None,
199
+ cache: Optional[torch.Tensor] = None,
200
+ output_final_state: bool = False,
201
+ cu_seqlens: Optional[torch.LongTensor] = None,
202
+ **kwargs,
203
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
204
+ """
205
+ Args:
206
+ x (`torch.Tensor`):
207
+ Tensor of shape `[B, T, D]`.
208
+ If `seq_idx` is provided, `B` must be 1.
209
+ mask (`Optional[torch.Tensor]`):
210
+ Attention mask dealing with padded positions.
211
+ cache (`Optional[torch.Tensor]`):
212
+ Previous cache tensor of shape `[N, D, W]`, where `W` is the kernel size.
213
+ If provided, the cache is updated **inplace**.
214
+ output_final_state (Optional[bool]):
215
+ Whether to output the final state of shape `[N, D, W]`. Default: `False`.
216
+ cu_seqlens (Optional[torch.LongTensor]):
217
+ Cumulative sequence lengths for each batch. Used for varlen. Default: `None`.
218
+ Shape: [B+1]
219
+
220
+ Returns:
221
+ Tensor of shape `[B, T, D]`.
222
+ """
223
+
224
+ B, T, D, W = *x.shape, self.kernel_size[0]
225
+ N = B if cu_seqlens is None else len(cu_seqlens) - 1
226
+ if mask is not None:
227
+ if cu_seqlens is not None:
228
+ raise ValueError("`mask` and `cu_seqlens` cannot be provided at the same time")
229
+ x = x.mul_(mask.unsqueeze(-1))
230
+ if output_final_state and cache is None:
231
+ cache = x.new_zeros(N, D, W)
232
+ # during the decoding phase, we assume the batch is composed of sequences of length 1
233
+ if cache is not None and B * T == N:
234
+ return self.step(x, cache, cu_seqlens)
235
+
236
+ if cache is not None:
237
+ if cu_seqlens is not None:
238
+ cache = causal_conv1d_varlen_states_fwd(x, cache, cu_seqlens, W)
239
+ else:
240
+ cache[:, :, -min(W, T):].copy_(rearrange(x[..., -min(W, T):, :], 'n w d -> n d w'))
241
+
242
+ x = rearrange(x, 'b t d -> b d t')
243
+ if self.use_fast_conv1d:
244
+ # Sequence index for each token. Used for varlen.
245
+ # Suppose a batch consists of two sequences with lengths 3 and 4,
246
+ # seq_idx=[0, 0, 0, 1, 1, 1, 1] for this batch.
247
+ # NOTE: No need to provide this arg if `cu_seqlens` is passed.
248
+ # This arg is just for BC, and will be removed in the future.
249
+ # [B, T]
250
+ seq_idx = kwargs.get('seq_idx', None)
251
+ if cu_seqlens is not None and seq_idx is None:
252
+ seq_idx = prepare_sequence_ids(prepare_position_ids(cu_seqlens)).to(torch.int32).unsqueeze(0)
253
+ x = causal_conv1d_fn(
254
+ x=x,
255
+ weight=rearrange(self.weight, "d 1 w -> d w"),
256
+ bias=self.bias,
257
+ activation=self.activation,
258
+ seq_idx=seq_idx,
259
+ )
260
+ else:
261
+ if cu_seqlens is not None:
262
+ raise ValueError("`cu_seqlens` is not supported for the naive Pytorch version")
263
+ x = self._conv_forward(x, self.weight, self.bias)[..., :x.shape[-1]]
264
+ if self.activation is not None:
265
+ x = ACT2FN[self.activation](x)
266
+ return rearrange(x, "b d t -> b t d"), cache
267
+
268
+ def step(
269
+ self,
270
+ x: torch.Tensor,
271
+ cache: torch.Tensor,
272
+ cu_seqlens: Optional[torch.LongTensor] = None
273
+ ):
274
+ shape = x.shape
275
+ x = x.squeeze(0) if cu_seqlens is not None else x.squeeze(1)
276
+ if self.use_fast_conv1d:
277
+ x = causal_conv1d_update(
278
+ x=x,
279
+ conv_state=cache,
280
+ weight=rearrange(self.weight, "d 1 w -> d w"),
281
+ bias=self.bias,
282
+ activation=self.activation,
283
+ )
284
+ else:
285
+ dtype = x.dtype
286
+ # we follow the fast mode that updates the cache in-place
287
+ cache.copy_(cache.roll(shifts=-1, dims=-1))
288
+ cache[:, :, -1] = x
289
+ x = torch.sum(cache * rearrange(self.weight, "d 1 w -> d w"), dim=-1)
290
+ if self.bias is not None:
291
+ x = x + self.bias
292
+ if self.activation is not None:
293
+ x = ACT2FN[self.activation](x).to(dtype=dtype)
294
+ return x.view(shape), cache
295
+
296
+ @property
297
+ def state_size(self) -> int:
298
+ return self.hidden_size * self.kernel_size
299
+
300
+
301
+ class LongConvolution(nn.Module):
302
+ """
303
+ LongConvolution applies a convolution operation on the input tensor using a fixed
304
+ filter of length max_len.
305
+ The filter is learned during training and is applied using FFT convolution.
306
+ Args:
307
+ hidden_size (int): The number of expected features in the input and output.
308
+ max_len (int): The maximum sequence length.
309
+ Returns:
310
+ y: [batch_size, seq_len, hidden_size] tensor
311
+ """
312
+
313
+ def __init__(
314
+ self,
315
+ hidden_size: int,
316
+ max_len: int,
317
+ **kwargs,
318
+ ):
319
+ """
320
+ Initializes the LongConvolution module.
321
+ Args:
322
+ hidden_size (int): The number of expected features in the input and output.
323
+ max_len (int): The maximum sequence length.
324
+ """
325
+ super().__init__()
326
+ self.hidden_size = hidden_size
327
+ self.filter = nn.Parameter(torch.randn(self.hidden_size, max_len), requires_grad=True)
328
+
329
+ def forward(self, x: torch.Tensor, *args, **kwargs):
330
+ """
331
+ Applies the LongConvolution operation on the input tensor.
332
+ Args:
333
+ x: [batch_size, seq_len, hidden_size] tensor
334
+ Returns:
335
+ y: [batch_size, seq_len, hidden_size] tensor
336
+ """
337
+ x = x.transpose(1, 2)
338
+ y = fft_conv(x, self.filter, dropout_mask=None, gelu=False)
339
+ y = y.transpose(1, 2)
340
+ return y.to(dtype=x.dtype)
341
+
342
+
343
+ class PositionalEmbedding(nn.Module):
344
+ def __init__(self, emb_dim: int, seq_len: int, **kwargs):
345
+ """Complex exponential positional embeddings for implicit long convolution filters."""
346
+ super().__init__()
347
+
348
+ self.seq_len = seq_len
349
+ # The time embedding fed to the filteres is normalized so that t_f = 1
350
+ t = torch.linspace(0, 1, self.seq_len)[None, :, None] # 1, L, 1
351
+
352
+ if emb_dim > 1:
353
+ bands = (emb_dim - 1) // 2
354
+ # To compute the right embeddings we use the "proper" linspace
355
+ t_rescaled = torch.linspace(0, seq_len - 1, seq_len)[None, :, None]
356
+ w = 2 * math.pi * t_rescaled / seq_len # 1, L, 1
357
+
358
+ f = torch.linspace(1e-4, bands - 1, bands)[None, None]
359
+ z = torch.exp(-1j * f * w)
360
+ z = torch.cat([t, z.real, z.imag], dim=-1)
361
+ self.z = nn.Parameter(z, requires_grad=False)
362
+
363
+ def forward(self, L):
364
+ return self.z[:, :L]
365
+
366
+
367
+ class ImplicitLongConvolution(nn.Module):
368
+ """
369
+ Long convolution with implicit filter parameterized by an MLP.
370
+
371
+ Args:
372
+ hidden_size (int):
373
+ The number of expected features in the input and output.
374
+ max_len (int):
375
+ The maximum sequence length.
376
+ d_emb (Optional[int]):
377
+ The dimension of the positional embeddings. Must be odd and greater or equal to 3 (time, sine and cosine).
378
+ Defaults to 3.
379
+ d_hidden (Optional[int]):
380
+ The number of features in the hidden layer of the MLP. Defaults to 16.
381
+
382
+ Attributes:
383
+ pos_emb (`PositionalEmbedding`): The positional embedding layer.
384
+ mlp (`nn.Sequential`): The MLP that parameterizes the implicit filter.
385
+
386
+ """
387
+
388
+ def __init__(
389
+ self,
390
+ hidden_size: int,
391
+ max_len: int,
392
+ d_emb: int = 3,
393
+ d_hidden: int = 16,
394
+ **kwargs,
395
+ ):
396
+ """
397
+ Long convolution with implicit filter parameterized by an MLP.
398
+
399
+
400
+ """
401
+ super().__init__()
402
+ self.hidden_size = hidden_size
403
+ self.d_emb = d_emb
404
+
405
+ assert (
406
+ d_emb % 2 != 0 and d_emb >= 3
407
+ ), "d_emb must be odd and greater or equal to 3 (time, sine and cosine)"
408
+ self.pos_emb = PositionalEmbedding(d_emb, max_len)
409
+
410
+ # final linear layer
411
+ self.mlp = nn.Sequential(
412
+ nn.Linear(d_emb, d_hidden),
413
+ torch.nn.ReLU(),
414
+ nn.Linear(d_hidden, hidden_size),
415
+ )
416
+
417
+ def filter(self, seq_len: int, *args, **kwargs):
418
+ k = self.mlp(self.pos_emb(seq_len))
419
+
420
+ return k.transpose(1, 2)
421
+
422
+ def forward(self, x: torch.Tensor, *args, **kwargs):
423
+ """
424
+ Args:
425
+ x: [batch_size, seq_len, hidden_size] tensor
426
+ Returns:
427
+ y: [batch_size, seq_len, hidden_size] tensor
428
+ """
429
+ x = x.transpose(1, 2)
430
+ k = self.filter(x.shape[-1])
431
+ y = fft_conv(x, k, dropout_mask=None, gelu=False)
432
+
433
+ y = y.transpose(1, 2)
434
+ return y.to(dtype=x.dtype)
fla/modules/feature_map.py ADDED
@@ -0,0 +1,300 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+
3
+ from __future__ import annotations
4
+
5
+ import math
6
+ from typing import Optional
7
+
8
+ import torch
9
+ import torch.nn.functional as F
10
+ from torch import nn
11
+
12
+ from fla.modules.activations import fast_gelu_impl, sigmoid, sqrelu, swish
13
+ from fla.modules.layernorm import layer_norm
14
+ from fla.utils import checkpoint
15
+
16
+
17
+ @checkpoint
18
+ def flatten_diag_outer_product(x, y):
19
+ z = torch.einsum("...i,...j->...ij", x, y)
20
+ N = z.size(-1)
21
+ indicies = torch.triu_indices(N, N)
22
+ return z[..., indicies[0], indicies[1]]
23
+
24
+
25
+ @checkpoint
26
+ def flatten_diag_outer_product_off1(x, y):
27
+ z = torch.einsum("...i,...j->...ij", x, y)
28
+ N = z.size(-1)
29
+ indicies = torch.triu_indices(N, N, 1)
30
+ indices2 = torch.arange(0, N)
31
+ return z[..., indicies[0], indicies[1]], z[..., indices2, indices2]
32
+
33
+
34
+ def is_power_of_2(n):
35
+ return (n & (n - 1) == 0) and n != 0
36
+
37
+
38
+ class HedgehogFeatureMap(nn.Module):
39
+
40
+ r"""
41
+ Hedgehog feature map as introduced in
42
+ `The Hedgehog & the Porcupine: Expressive Linear Attentions with Softmax Mimicry <https://arxiv.org/abs/2402.04347>`_
43
+ """
44
+
45
+ def __init__(
46
+ self,
47
+ head_dim: int
48
+ ) -> HedgehogFeatureMap:
49
+ super().__init__()
50
+ # Trainable map
51
+ self.layer = nn.Linear(head_dim, head_dim)
52
+ self.init_weights_()
53
+
54
+ def init_weights_(self):
55
+ """Initialize trainable map as identity"""
56
+ with torch.no_grad():
57
+ identity = torch.eye(*self.layer.weight.shape[-2:], dtype=torch.float)
58
+ self.layer.weight.copy_(identity.to(self.layer.weight))
59
+ nn.init.zeros_(self.layer.bias)
60
+
61
+ def forward(self, x: torch.Tensor):
62
+ x = self.layer(x) # shape b, h, l, d
63
+ return torch.cat([2*x, -2*x], dim=-1).softmax(-1)
64
+
65
+
66
+ class T2RFeatureMap(nn.Module):
67
+
68
+ r"""
69
+ Simple linear mapping feature map as in
70
+ `Finetuning Pretrained Transformers into RNNs <https://arxiv.org/abs/2103.13076>`_
71
+ """
72
+
73
+ def __init__(
74
+ self,
75
+ head_dim: int,
76
+ dot_dim: int = None,
77
+ bias: Optional[bool] = False
78
+ ) -> T2RFeatureMap:
79
+ super().__init__()
80
+ # Trainable map
81
+ if dot_dim is None:
82
+ dot_dim = head_dim
83
+
84
+ self.head_dim = head_dim
85
+ self.dot_dim = dot_dim
86
+ self.bias = bias
87
+
88
+ self.layer = nn.Linear(head_dim, dot_dim, bias=bias)
89
+
90
+ def __repr__(self) -> str:
91
+ return f"{self.__class__.__name__}(head_dim={self.head_dim}, dot_dim={self.dot_dim}, bias={self.bias})"
92
+
93
+ def forward(self, x: torch.Tensor):
94
+ return self.layer(x).relu()
95
+
96
+
97
+ class DPFPFeatureMap(nn.Module):
98
+
99
+ r"""
100
+ Deterministic Parameter-Free Projection (DPFP) feature map in
101
+ `Linear Transformers Are Secretly Fast Weight Programmers <https://arxiv.org/abs/2102.11174>`_
102
+ """
103
+
104
+ def __init__(
105
+ self,
106
+ head_dim: int,
107
+ nu: int = 4
108
+ ) -> DPFPFeatureMap:
109
+ super().__init__()
110
+ self.nu = nu
111
+
112
+ def forward(self, x: torch.Tensor):
113
+ x = torch.cat([x.relu(), -x.relu()], dim=-1)
114
+ x_rolled = torch.cat([x.roll(shifts=j, dims=-1) for j in range(1, self.nu+1)], dim=-1)
115
+ x_repeat = torch.cat([x] * self.nu, dim=-1)
116
+ return x_repeat * x_rolled
117
+
118
+
119
+ class HadamardFeatureMap(nn.Module):
120
+ def __init__(
121
+ self,
122
+ head_dim: int
123
+ ) -> HadamardFeatureMap:
124
+ super().__init__()
125
+ # Trainable map
126
+ self.layer1 = nn.Linear(head_dim, head_dim)
127
+ self.layer2 = nn.Linear(head_dim, head_dim)
128
+
129
+ def forward(self, x: torch.Tensor):
130
+ return self.layer1(x) * self.layer2(x)
131
+
132
+
133
+ class LearnableOuterProductFeatureMap(nn.Module):
134
+ def __init__(
135
+ self,
136
+ head_dim: int,
137
+ feature_dim: int
138
+ ) -> LearnableOuterProductFeatureMap:
139
+ super().__init__()
140
+ # Trainable map
141
+ self.layer1 = nn.Linear(head_dim, feature_dim, bias=False)
142
+ self.layer2 = nn.Linear(head_dim, feature_dim, bias=False)
143
+ self.normalizer = feature_dim ** -0.5
144
+
145
+ def forward(self, x: torch.Tensor):
146
+ return flatten_diag_outer_product(self.layer1(x), self.layer2(x))
147
+
148
+
149
+ class LearnablePolySketchNonNegativeFeatureMap(nn.Module):
150
+
151
+ def __init__(
152
+ self,
153
+ head_dim: int,
154
+ sketch_size: Optional[int] = None,
155
+ degree: Optional[int] = 2
156
+ ) -> LearnablePolySketchNonNegativeFeatureMap:
157
+ super().__init__()
158
+
159
+ assert is_power_of_2(degree) and degree >= 2, f"The degree {degree} must be a power of 2"
160
+
161
+ self.head_dim = head_dim
162
+ self.sketch_size = sketch_size if sketch_size is not None else head_dim
163
+ self.degree = degree
164
+
165
+ self.gamma = nn.Parameter(torch.ones(head_dim))
166
+ self.beta = nn.Parameter(torch.zeros(head_dim))
167
+ # NOTE: the sketch layers defined here are quite different from the original paper
168
+ # currently we simply use linear layers without any non-linear activations
169
+ self.sketches1 = nn.ModuleList([
170
+ nn.Linear(head_dim, sketch_size, bias=False),
171
+ *[nn.Linear(sketch_size, sketch_size, bias=False) for _ in range(int(math.log2(self.degree)) - 2)]
172
+ ])
173
+ self.sketches2 = nn.ModuleList([
174
+ nn.Linear(head_dim, sketch_size, bias=False),
175
+ *[nn.Linear(sketch_size, sketch_size, bias=False) for _ in range(int(math.log2(self.degree)) - 2)]
176
+ ])
177
+
178
+ def forward(self, x: torch.Tensor):
179
+ # Section 2.1
180
+ x = layer_norm(x, self.gamma, self.beta)
181
+ # first map the input to sketch size with learnable parameters
182
+ x = self.sketches1[0](x) * self.sketches2[0](x) * self.head_dim ** -0.5
183
+ for i in range(1, int(math.log2(self.degree)) - 1):
184
+ x = self.sketches1[i](x) * self.sketches2[i](x) * self.head_dim ** -0.5
185
+ # do sketch mapping for log2(p) - 1 times in total
186
+ # do p=2 mapping to ensure non-negativity
187
+ return flatten_diag_outer_product(x, x)
188
+
189
+
190
+ class TaylorFeatureMap(nn.Module):
191
+ def __init__(
192
+ self,
193
+ head_dim: int
194
+ ) -> TaylorFeatureMap:
195
+ super().__init__()
196
+ self.head_dim = head_dim
197
+ self.r2 = math.sqrt(2)
198
+ self.rd = math.sqrt(self.head_dim)
199
+ self.rrd = math.sqrt(self.rd)
200
+
201
+ def forward(self, x: torch.Tensor):
202
+ x2_1, x2_2 = flatten_diag_outer_product_off1(x, x)
203
+ return torch.cat([torch.ones_like(x[..., 0:1]), x / self.rrd, x2_2 / (self.rd * self.r2), x2_1 / self.rd], dim=-1)
204
+
205
+
206
+ class RebasedFeatureMap(nn.Module):
207
+
208
+ def __init__(
209
+ self,
210
+ head_dim: int,
211
+ use_gamma: Optional[bool] = True,
212
+ use_beta: Optional[bool] = True,
213
+ normalize: Optional[bool] = True
214
+ ) -> RebasedFeatureMap:
215
+ super().__init__()
216
+
217
+ self.head_dim = head_dim
218
+ self.use_gamma = use_gamma
219
+ self.use_beta = use_beta
220
+ self.normalize = normalize
221
+
222
+ self.gamma = None
223
+ self.beta = None
224
+ if use_gamma:
225
+ self.gamma = nn.Parameter(torch.ones(head_dim))
226
+ if use_beta:
227
+ self.beta = nn.Parameter(torch.zeros(head_dim))
228
+
229
+ def forward(self, x: torch.Tensor, flatten: Optional[bool] = True):
230
+ if self.use_beta and self.use_gamma and self.normalize:
231
+ x = layer_norm(x, self.gamma, self.beta)
232
+ elif self.normalize:
233
+ x = F.layer_norm(x, (self.head_dim,), self.gamma, self.beta)
234
+ elif self.use_gamma and self.use_beta:
235
+ x = torch.addcmul(self.beta, x, self.gamma)
236
+ elif self.use_gamma:
237
+ x = x.mul(self.gamma)
238
+ else:
239
+ raise RuntimeError(f"Not supported combination of `use_gamma`, `use_beta` and `normalize`, "
240
+ f"which is currentlt set as (`{self.use_gamma}`, `{self.use_beta}`, `{self.normalize}`)")
241
+ if not flatten:
242
+ return x
243
+ x2_1, x2_2 = flatten_diag_outer_product_off1(x, x)
244
+ # rebased use learnable parameters to approximate any quadratic function
245
+ return torch.cat([x2_2 * self.head_dim ** -0.5, x2_1 * (2 / self.head_dim) ** 0.5], dim=-1)
246
+
247
+
248
+ class ReLUFeatureMap(nn.Module):
249
+
250
+ def __init__(
251
+ self,
252
+ ) -> ReLUFeatureMap:
253
+ super().__init__()
254
+
255
+ def forward(self, x: torch.Tensor):
256
+ return F.relu(x)
257
+
258
+
259
+ class SquaredReLUFeatureMap(nn.Module):
260
+
261
+ def __init__(
262
+ self,
263
+ ) -> SquaredReLUFeatureMap:
264
+ super().__init__()
265
+
266
+ def forward(self, x: torch.Tensor):
267
+ return sqrelu(x)
268
+
269
+
270
+ class GELUFeatureMap(nn.Module):
271
+
272
+ def __init__(
273
+ self,
274
+ ) -> GELUFeatureMap:
275
+ super().__init__()
276
+
277
+ def forward(self, x: torch.Tensor):
278
+ return fast_gelu_impl(x)
279
+
280
+
281
+ class SwishFeatureMap(nn.Module):
282
+
283
+ def __init__(
284
+ self,
285
+ ) -> SwishFeatureMap:
286
+ super().__init__()
287
+
288
+ def forward(self, x: torch.Tensor):
289
+ return swish(x)
290
+
291
+
292
+ class SigmoidFeatureMap(nn.Module):
293
+
294
+ def __init__(
295
+ self,
296
+ ) -> SigmoidFeatureMap:
297
+ super().__init__()
298
+
299
+ def forward(self, x: torch.Tensor):
300
+ return sigmoid(x)
fla/modules/fused_bitlinear.py ADDED
@@ -0,0 +1,638 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright (c) 2023-2025, Songlin Yang, Yu Zhang
3
+
4
+ # Implementations of BitLinear layer with fused LayerNorm and quantized Linear layer.
5
+ # [The Era of 1-bit LLMs: All Large Language Models are in 1.58 Bits](https://arxiv.org/abs/2402.17764)
6
+ # [Scalable MatMul-free Language Modeling](https://arxiv.org/abs/2406.02528)
7
+
8
+ # Code adapted from https://github.com/ridgerchu/matmulfreellm/
9
+
10
+ from __future__ import annotations
11
+
12
+ import math
13
+
14
+ import torch
15
+ import torch.nn as nn
16
+ import torch.nn.functional as F
17
+ import triton
18
+ import triton.language as tl
19
+
20
+ from fla.modules.layernorm import RMSNorm
21
+ from fla.utils import get_multiprocessor_count, input_guard, require_version
22
+
23
+
24
+ def activation_quant(x):
25
+ """
26
+ Per-token quantization to 8 bits. No grouping is needed for quantization.
27
+
28
+ Args:
29
+ x: An activation tensor with shape [n, d].
30
+
31
+ Returns:
32
+ A quantized activation tensor with shape [n, d].
33
+ """
34
+ # Compute the scale factor
35
+ scale = 127.0 / x.abs().max(dim=-1, keepdim=True).values.clamp_(min=1e-5)
36
+ # Quantize and then de-quantize the tensor
37
+ y = (x * scale).round().clamp_(-128, 127) / scale
38
+ return y
39
+
40
+
41
+ def weight_quant(w):
42
+ """
43
+ Per-tensor quantization to 1.58 bits. No grouping is needed for quantization.
44
+
45
+ Args:
46
+ w: A weight tensor with shape [d, k].
47
+
48
+ Returns:
49
+ A quantized weight tensor with shape [d, k].
50
+ """
51
+ # Compute the scale factor
52
+ scale = 1.0 / w.abs().mean().clamp_(min=1e-5)
53
+ # Quantize and then de-quantize the tensor
54
+ u = (w * scale).round().clamp_(-1, 1) / scale
55
+ return u
56
+
57
+
58
+ @triton.autotune(
59
+ configs=[
60
+ triton.Config({}, num_warps=1),
61
+ triton.Config({}, num_warps=2),
62
+ triton.Config({}, num_warps=4),
63
+ triton.Config({}, num_warps=8),
64
+ triton.Config({}, num_warps=16),
65
+ triton.Config({}, num_warps=32),
66
+ ],
67
+ key=["N", "HAS_RESIDUAL", "STORE_RESIDUAL_OUT", "IS_RMS_NORM", "HAS_BIAS"],
68
+ )
69
+ @triton.jit
70
+ def layer_norm_fwd_kernel_quant(
71
+ X, # pointer to the input
72
+ Y, # pointer to the output
73
+ W, # pointer to the weights
74
+ B, # pointer to the biases
75
+ RESIDUAL, # pointer to the residual
76
+ RESIDUAL_OUT, # pointer to the residual
77
+ Mean, # pointer to the mean
78
+ Rstd, # pointer to the 1/std
79
+ stride_x_row, # how much to increase the pointer when moving by 1 row
80
+ stride_y_row,
81
+ stride_res_row,
82
+ stride_res_out_row,
83
+ N, # number of columns in X
84
+ eps, # epsilon to avoid division by zero
85
+ IS_RMS_NORM: tl.constexpr,
86
+ BLOCK_N: tl.constexpr,
87
+ HAS_RESIDUAL: tl.constexpr,
88
+ STORE_RESIDUAL_OUT: tl.constexpr,
89
+ HAS_WEIGHT: tl.constexpr,
90
+ HAS_BIAS: tl.constexpr
91
+ ):
92
+ # Map the program id to the row of X and Y it should compute.
93
+ row = tl.program_id(0)
94
+ X += row * stride_x_row
95
+ Y += row * stride_y_row
96
+ if HAS_RESIDUAL:
97
+ RESIDUAL += row * stride_res_row
98
+ if STORE_RESIDUAL_OUT:
99
+ RESIDUAL_OUT += row * stride_res_out_row
100
+ # Compute mean and variance
101
+ cols = tl.arange(0, BLOCK_N)
102
+ x = tl.load(X + cols, mask=cols < N, other=0.0).to(tl.float32)
103
+ if HAS_RESIDUAL:
104
+ residual = tl.load(RESIDUAL + cols, mask=cols < N, other=0.0).to(tl.float32)
105
+ x += residual
106
+ if STORE_RESIDUAL_OUT:
107
+ tl.store(RESIDUAL_OUT + cols, x, mask=cols < N)
108
+ if not IS_RMS_NORM:
109
+ mean = tl.sum(x, axis=0) / N
110
+ tl.store(Mean + row, mean)
111
+ xbar = tl.where(cols < N, x - mean, 0.0)
112
+ var = tl.sum(xbar * xbar, axis=0) / N
113
+ else:
114
+ xbar = tl.where(cols < N, x, 0.0)
115
+ var = tl.sum(xbar * xbar, axis=0) / N
116
+ rstd = 1 / tl.sqrt(var + eps)
117
+ tl.store(Rstd + row, rstd)
118
+ # Normalize and apply linear transformation
119
+ mask = cols < N
120
+ if HAS_WEIGHT:
121
+ w = tl.load(W + cols, mask=mask).to(tl.float32)
122
+ if HAS_BIAS:
123
+ b = tl.load(B + cols, mask=mask).to(tl.float32)
124
+ x_hat = (x - mean) * rstd if not IS_RMS_NORM else x * rstd
125
+
126
+ y = x_hat * w if HAS_WEIGHT else x_hat
127
+ if HAS_BIAS:
128
+ y = y + b
129
+
130
+ # Aply quantization to the output
131
+ scale = 127.0 / tl.maximum(tl.max(tl.abs(y), 0), 1e-5)
132
+ # Quantize and then de-quantize the tensor
133
+ y = tl.extra.cuda.libdevice.round(y * scale)
134
+ y = tl.maximum(tl.minimum(y, 127), -128) / scale
135
+
136
+ # Write output
137
+ tl.store(Y + cols, y, mask=mask)
138
+
139
+
140
+ def layer_norm_fwd_quant(
141
+ x: torch.Tensor,
142
+ weight: torch.Tensor,
143
+ bias: torch.Tensor,
144
+ eps: float,
145
+ residual: torch.Tensor = None,
146
+ out_dtype: torch.dtype = None,
147
+ residual_dtype: torch.dtype = None,
148
+ is_rms_norm: bool = False
149
+ ):
150
+ if residual is not None:
151
+ residual_dtype = residual.dtype
152
+ M, N = x.shape
153
+ # allocate output
154
+ y = torch.empty_like(x, dtype=x.dtype if out_dtype is None else out_dtype)
155
+ if residual is not None or (residual_dtype is not None and residual_dtype != x.dtype):
156
+ residual_out = torch.empty(M, N, device=x.device, dtype=residual_dtype)
157
+ else:
158
+ residual_out = None
159
+ mean = torch.empty((M,), dtype=torch.float32, device=x.device) if not is_rms_norm else None
160
+ rstd = torch.empty((M,), dtype=torch.float32, device=x.device)
161
+ # Less than 64KB per feature: enqueue fused kernel
162
+ MAX_FUSED_SIZE = 65536 // x.element_size()
163
+ BLOCK_N = min(MAX_FUSED_SIZE, triton.next_power_of_2(N))
164
+ if N > BLOCK_N:
165
+ raise RuntimeError("This layer norm doesn't support feature dim >= 64KB.")
166
+ # heuristics for number of warps
167
+ layer_norm_fwd_kernel_quant[(M,)](
168
+ x,
169
+ y,
170
+ weight,
171
+ bias,
172
+ residual,
173
+ residual_out,
174
+ mean,
175
+ rstd,
176
+ x.stride(0),
177
+ y.stride(0),
178
+ residual.stride(0) if residual is not None else 0,
179
+ residual_out.stride(0) if residual_out is not None else 0,
180
+ N,
181
+ eps,
182
+ is_rms_norm,
183
+ BLOCK_N,
184
+ residual is not None,
185
+ residual_out is not None,
186
+ weight is not None,
187
+ bias is not None,
188
+ )
189
+ # residual_out is None if residual is None and residual_dtype == input_dtype
190
+ return y, mean, rstd, residual_out if residual_out is not None else x
191
+
192
+
193
+ @triton.heuristics({
194
+ "RECOMPUTE_OUTPUT": lambda args: args["Y"] is not None
195
+ })
196
+ @triton.autotune(
197
+ configs=[
198
+ triton.Config({}, num_warps=1),
199
+ triton.Config({}, num_warps=2),
200
+ triton.Config({}, num_warps=4),
201
+ triton.Config({}, num_warps=8),
202
+ triton.Config({}, num_warps=16),
203
+ triton.Config({}, num_warps=32),
204
+ ],
205
+ key=["N", "HAS_DRESIDUAL", "STORE_DRESIDUAL", "IS_RMS_NORM", "HAS_BIAS"],
206
+ )
207
+ @triton.jit
208
+ def layer_norm_bwd_kernel(
209
+ X, # pointer to the input
210
+ W, # pointer to the weights
211
+ B, # pointer to the biases
212
+ Y, # pointer to the output to be recomputed
213
+ DY, # pointer to the output gradient
214
+ DX, # pointer to the input gradient
215
+ DW, # pointer to the partial sum of weights gradient
216
+ DB, # pointer to the partial sum of biases gradient
217
+ DRESIDUAL,
218
+ DRESIDUAL_IN,
219
+ Mean, # pointer to the mean
220
+ Rstd, # pointer to the 1/std
221
+ stride_x_row, # how much to increase the pointer when moving by 1 row
222
+ stride_y_row,
223
+ stride_dy_row,
224
+ stride_dx_row,
225
+ stride_dres_row,
226
+ stride_dres_in_row,
227
+ M, # number of rows in X
228
+ N, # number of columns in X
229
+ eps, # epsilon to avoid division by zero
230
+ rows_per_program,
231
+ IS_RMS_NORM: tl.constexpr,
232
+ BLOCK_N: tl.constexpr,
233
+ HAS_DRESIDUAL: tl.constexpr,
234
+ STORE_DRESIDUAL: tl.constexpr,
235
+ HAS_WEIGHT: tl.constexpr,
236
+ HAS_BIAS: tl.constexpr,
237
+ RECOMPUTE_OUTPUT: tl.constexpr,
238
+ ):
239
+ # Map the program id to the elements of X, DX, and DY it should compute.
240
+ row_block_id = tl.program_id(0)
241
+ row_start = row_block_id * rows_per_program
242
+ cols = tl.arange(0, BLOCK_N)
243
+ mask = cols < N
244
+ X += row_start * stride_x_row
245
+ if HAS_DRESIDUAL:
246
+ DRESIDUAL += row_start * stride_dres_row
247
+ if STORE_DRESIDUAL:
248
+ DRESIDUAL_IN += row_start * stride_dres_in_row
249
+ DY += row_start * stride_dy_row
250
+ DX += row_start * stride_dx_row
251
+ if RECOMPUTE_OUTPUT:
252
+ Y += row_start * stride_y_row
253
+ if HAS_WEIGHT:
254
+ w = tl.load(W + cols, mask=mask).to(tl.float32)
255
+ dw = tl.zeros((BLOCK_N,), dtype=tl.float32)
256
+ if RECOMPUTE_OUTPUT and HAS_BIAS:
257
+ b = tl.load(B + cols, mask=mask, other=0.0).to(tl.float32)
258
+ if HAS_BIAS:
259
+ db = tl.zeros((BLOCK_N,), dtype=tl.float32)
260
+ row_end = min((row_block_id + 1) * rows_per_program, M)
261
+ for row in range(row_start, row_end):
262
+ # Load data to SRAM
263
+ x = tl.load(X + cols, mask=mask, other=0).to(tl.float32)
264
+ dy = tl.load(DY + cols, mask=mask, other=0).to(tl.float32)
265
+ if not IS_RMS_NORM:
266
+ mean = tl.load(Mean + row)
267
+ rstd = tl.load(Rstd + row)
268
+ # Compute dx
269
+ xhat = (x - mean) * rstd if not IS_RMS_NORM else x * rstd
270
+ xhat = tl.where(mask, xhat, 0.0)
271
+ if RECOMPUTE_OUTPUT:
272
+ y = xhat * w if HAS_WEIGHT else xhat
273
+ if HAS_BIAS:
274
+ y = y + b
275
+
276
+ # Aply quantization to the output
277
+ scale = 127.0 / tl.maximum(tl.max(tl.abs(y), 0), 1e-5)
278
+ # Quantize and then de-quantize the tensor
279
+ y = tl.extra.cuda.libdevice.round(y * scale)
280
+ y = tl.maximum(tl.minimum(y, 127), -128) / scale
281
+
282
+ tl.store(Y + cols, y, mask=mask)
283
+ wdy = dy
284
+ if HAS_WEIGHT:
285
+ wdy = dy * w
286
+ dw += dy * xhat
287
+ if HAS_BIAS:
288
+ db += dy
289
+ if not IS_RMS_NORM:
290
+ c1 = tl.sum(xhat * wdy, axis=0) / N
291
+ c2 = tl.sum(wdy, axis=0) / N
292
+ dx = (wdy - (xhat * c1 + c2)) * rstd
293
+ else:
294
+ c1 = tl.sum(xhat * wdy, axis=0) / N
295
+ dx = (wdy - xhat * c1) * rstd
296
+ if HAS_DRESIDUAL:
297
+ dres = tl.load(DRESIDUAL + cols, mask=mask, other=0).to(tl.float32)
298
+ dx += dres
299
+ # Write dx
300
+ if STORE_DRESIDUAL:
301
+ tl.store(DRESIDUAL_IN + cols, dx, mask=mask)
302
+ tl.store(DX + cols, dx, mask=mask)
303
+
304
+ X += stride_x_row
305
+ if HAS_DRESIDUAL:
306
+ DRESIDUAL += stride_dres_row
307
+ if STORE_DRESIDUAL:
308
+ DRESIDUAL_IN += stride_dres_in_row
309
+ if RECOMPUTE_OUTPUT:
310
+ Y += stride_y_row
311
+ DY += stride_dy_row
312
+ DX += stride_dx_row
313
+ if HAS_WEIGHT:
314
+ tl.store(DW + row_block_id * N + cols, dw, mask=mask)
315
+ if HAS_BIAS:
316
+ tl.store(DB + row_block_id * N + cols, db, mask=mask)
317
+
318
+
319
+ def layer_norm_bwd(
320
+ dy: torch.Tensor,
321
+ x: torch.Tensor,
322
+ weight: torch.Tensor,
323
+ bias: torch.Tensor,
324
+ eps: float,
325
+ mean: torch.Tensor,
326
+ rstd: torch.Tensor,
327
+ dresidual: torch.Tensor = None,
328
+ has_residual: bool = False,
329
+ is_rms_norm: bool = False,
330
+ x_dtype: torch.dtype = None,
331
+ recompute_output: bool = False,
332
+ ):
333
+ M, N = x.shape
334
+ # allocate output
335
+ dx = torch.empty_like(x) if x_dtype is None else torch.empty(M, N, dtype=x_dtype, device=x.device)
336
+ dresidual_in = torch.empty_like(x) if has_residual and dx.dtype != x.dtype else None
337
+ y = torch.empty(M, N, dtype=dy.dtype, device=dy.device) if recompute_output else None
338
+
339
+ # Less than 64KB per feature: enqueue fused kernel
340
+ MAX_FUSED_SIZE = 65536 // x.element_size()
341
+ BLOCK_N = min(MAX_FUSED_SIZE, triton.next_power_of_2(N))
342
+ if N > BLOCK_N:
343
+ raise RuntimeError("This layer norm doesn't support feature dim >= 64KB.")
344
+ sm_count = get_multiprocessor_count(x.device.index)
345
+ _dw = torch.empty((sm_count, N), dtype=torch.float32, device=weight.device) if weight is not None else None
346
+ _db = torch.empty((sm_count, N), dtype=torch.float32, device=bias.device) if bias is not None else None
347
+ rows_per_program = math.ceil(M / sm_count)
348
+ grid = (sm_count,)
349
+ layer_norm_bwd_kernel[grid](
350
+ x,
351
+ weight,
352
+ bias,
353
+ y,
354
+ dy,
355
+ dx,
356
+ _dw,
357
+ _db,
358
+ dresidual,
359
+ dresidual_in,
360
+ mean,
361
+ rstd,
362
+ x.stride(0),
363
+ 0 if not recompute_output else y.stride(0),
364
+ dy.stride(0),
365
+ dx.stride(0),
366
+ dresidual.stride(0) if dresidual is not None else 0,
367
+ dresidual_in.stride(0) if dresidual_in is not None else 0,
368
+ M,
369
+ N,
370
+ eps,
371
+ rows_per_program,
372
+ is_rms_norm,
373
+ BLOCK_N,
374
+ dresidual is not None,
375
+ dresidual_in is not None,
376
+ weight is not None,
377
+ bias is not None,
378
+ )
379
+ dw = _dw.sum(0).to(weight.dtype) if weight is not None else None
380
+ db = _db.sum(0).to(bias.dtype) if bias is not None else None
381
+ # Don't need to compute dresidual_in separately in this case
382
+ if has_residual and dx.dtype == x.dtype:
383
+ dresidual_in = dx
384
+ return (dx, dw, db, dresidual_in) if not recompute_output else (dx, dw, db, dresidual_in, y)
385
+
386
+
387
+ class LayerNormLinearQuantFn(torch.autograd.Function):
388
+
389
+ @staticmethod
390
+ @input_guard
391
+ def forward(
392
+ ctx,
393
+ x,
394
+ norm_weight,
395
+ norm_bias,
396
+ linear_weight,
397
+ linear_bias,
398
+ residual=None,
399
+ eps=1e-6,
400
+ prenorm=False,
401
+ residual_in_fp32=False,
402
+ is_rms_norm=False,
403
+ ):
404
+ x_shape_og = x.shape
405
+ # reshape input data into 2D tensor
406
+ x = x.reshape(-1, x.shape[-1])
407
+ if residual is not None:
408
+ assert residual.shape == x_shape_og
409
+ residual = residual.reshape(-1, residual.shape[-1])
410
+ residual_dtype = residual.dtype if residual is not None else (torch.float32 if residual_in_fp32 else None)
411
+ y, mean, rstd, residual_out = layer_norm_fwd_quant(
412
+ x,
413
+ norm_weight,
414
+ norm_bias,
415
+ eps,
416
+ residual,
417
+ out_dtype=None if not torch.is_autocast_enabled() else torch.get_autocast_gpu_dtype(),
418
+ residual_dtype=residual_dtype,
419
+ is_rms_norm=is_rms_norm,
420
+ )
421
+ y = y.reshape(x_shape_og)
422
+ dtype = torch.get_autocast_gpu_dtype() if torch.is_autocast_enabled() else y.dtype
423
+ linear_weight = weight_quant(linear_weight).to(dtype)
424
+ linear_bias = linear_bias.to(dtype) if linear_bias is not None else None
425
+ out = F.linear(y.to(linear_weight.dtype), linear_weight, linear_bias)
426
+ # We don't store y, will be recomputed in the backward pass to save memory
427
+ ctx.save_for_backward(residual_out, norm_weight, norm_bias, linear_weight, mean, rstd)
428
+ ctx.x_shape_og = x_shape_og
429
+ ctx.eps = eps
430
+ ctx.is_rms_norm = is_rms_norm
431
+ ctx.has_residual = residual is not None
432
+ ctx.prenorm = prenorm
433
+ ctx.x_dtype = x.dtype
434
+ ctx.linear_bias_is_none = linear_bias is None
435
+ return out if not prenorm else (out, residual_out.reshape(x_shape_og))
436
+
437
+ @staticmethod
438
+ @input_guard
439
+ def backward(ctx, dout, *args):
440
+ x, norm_weight, norm_bias, linear_weight, mean, rstd = ctx.saved_tensors
441
+ dout = dout.reshape(-1, dout.shape[-1])
442
+ dy = F.linear(dout, linear_weight.t())
443
+ dlinear_bias = None if ctx.linear_bias_is_none else dout.sum(0)
444
+ assert dy.shape == x.shape
445
+ if ctx.prenorm:
446
+ dresidual = args[0]
447
+ dresidual = dresidual.reshape(-1, dresidual.shape[-1])
448
+ assert dresidual.shape == x.shape
449
+ else:
450
+ dresidual = None
451
+ dx, dnorm_weight, dnorm_bias, dresidual_in, y = layer_norm_bwd(
452
+ dy,
453
+ x,
454
+ norm_weight,
455
+ norm_bias,
456
+ ctx.eps,
457
+ mean,
458
+ rstd,
459
+ dresidual,
460
+ ctx.has_residual,
461
+ ctx.is_rms_norm,
462
+ x_dtype=ctx.x_dtype,
463
+ recompute_output=True
464
+ )
465
+ dlinear_weight = torch.einsum("bo,bi->oi", dout, y)
466
+ return (
467
+ dx.reshape(ctx.x_shape_og),
468
+ dnorm_weight,
469
+ dnorm_bias,
470
+ dlinear_weight,
471
+ dlinear_bias,
472
+ dresidual_in.reshape(ctx.x_shape_og) if ctx.has_residual else None,
473
+ None,
474
+ None,
475
+ None,
476
+ None,
477
+ )
478
+
479
+
480
+ def layer_norm_linear_quant_fn(
481
+ x,
482
+ norm_weight,
483
+ norm_bias,
484
+ linear_weight,
485
+ linear_bias,
486
+ residual=None,
487
+ eps=1e-6,
488
+ prenorm=False,
489
+ residual_in_fp32=False,
490
+ is_rms_norm=False,
491
+ ):
492
+ return LayerNormLinearQuantFn.apply(
493
+ x,
494
+ norm_weight,
495
+ norm_bias,
496
+ linear_weight,
497
+ linear_bias,
498
+ residual,
499
+ eps,
500
+ prenorm,
501
+ residual_in_fp32,
502
+ is_rms_norm,
503
+ )
504
+
505
+
506
+ def rms_norm_linear_quant(
507
+ x: torch.Tensor,
508
+ norm_weight: torch.Tensor,
509
+ norm_bias: torch.Tensor,
510
+ linear_weight: torch.Tensor,
511
+ linear_bias: torch.Tensor,
512
+ residual: torch.Tensor = None,
513
+ eps: float = 1e-5,
514
+ prenorm: bool = False,
515
+ residual_in_fp32: bool = False
516
+ ):
517
+ return layer_norm_linear_quant_fn(
518
+ x=x,
519
+ norm_weight=norm_weight,
520
+ norm_bias=norm_bias,
521
+ linear_weight=linear_weight,
522
+ linear_bias=linear_bias,
523
+ residual=residual,
524
+ eps=eps,
525
+ prenorm=prenorm,
526
+ residual_in_fp32=residual_in_fp32,
527
+ is_rms_norm=True
528
+ )
529
+
530
+
531
+ @require_version("triton>=3.0", "Triton >= 3.0 is required to do online quantization.")
532
+ def bit_linear(x, weight, bias=None, norm_weight=None, norm_bias=None, eps=1e-8):
533
+ """
534
+ A functional version of BitLinear that applies quantization to activations and weights.
535
+
536
+ Args:
537
+ x: Input tensor with shape [n, d].
538
+ weight: Weight tensor with shape [out_features, in_features].
539
+ bias: Bias tensor with shape [out_features] (optional).
540
+ norm_weight: Weight tensor for RMS normalization with shape [in_features].
541
+ norm_bias: Bias tensor for RMS normalization with shape [in_features].
542
+ eps: A small constant for numerical stability in normalization.
543
+
544
+ Returns:
545
+ Output tensor with shape [n, out_features].
546
+ """
547
+ return layer_norm_linear_quant_fn(
548
+ x,
549
+ norm_weight,
550
+ norm_bias,
551
+ weight,
552
+ bias,
553
+ is_rms_norm=True
554
+ )
555
+
556
+
557
+ class BitLinear(nn.Linear):
558
+ """
559
+ A custom linear layer that applies quantization on both activations and weights.
560
+ This is primarily for training; kernel optimization is needed for efficiency in deployment.
561
+ """
562
+
563
+ def __init__(
564
+ self,
565
+ in_features: int,
566
+ out_features: int,
567
+ bias: bool = False,
568
+ norm_eps: float = 1e-8
569
+ ):
570
+ """
571
+ Initializes the BitLinear layer.
572
+
573
+ Args:
574
+ in_features: Size of each input sample.
575
+ out_features: Size of each output sample.
576
+ bias: If set to False, the layer will not learn an additive bias. Default: True.
577
+ """
578
+ # Initialize the superclass nn.Linear with the given parameters
579
+ super(BitLinear, self).__init__(in_features, out_features, bias=bias)
580
+
581
+ self.norm = RMSNorm(in_features, eps=norm_eps)
582
+
583
+ def __repr__(self) -> str:
584
+ return f"{self.__class__.__name__}({super().extra_repr()}, norm_eps={self.norm.eps})"
585
+
586
+ def forward(self, x):
587
+ """
588
+ Overrides the forward pass to include quantization.
589
+
590
+ Args:
591
+ x: An input tensor with shape [n, d].
592
+
593
+ Returns:
594
+ An output tensor with shape [n, d].
595
+ """
596
+ # Weight tensor
597
+ w = self.weight
598
+
599
+ # Apply RMS normalization to the input
600
+ x_norm = self.norm(x)
601
+
602
+ # Apply quantization to both activations and weights
603
+ # Uses Straight-Through Estimator (STE) trick with .detach() for gradient flow
604
+ x_quant = x_norm + (activation_quant(x_norm) - x_norm).detach()
605
+ w_quant = w + (weight_quant(w) - w).detach()
606
+ # Perform linear operation with quantized values
607
+ y = F.linear(x_quant, w_quant)
608
+
609
+ return y
610
+
611
+
612
+ class FusedBitLinear(BitLinear):
613
+ """
614
+ A custom linear layer that applies quantization on both activations and weights.
615
+ This is primarily for training; kernel optimization is needed for efficiency in deployment.
616
+ """
617
+
618
+ def __init__(self, in_features, out_features, bias=False):
619
+ """
620
+ Initializes the BitLinear layer.
621
+
622
+ Args:
623
+ in_features: Size of each input sample.
624
+ out_features: Size of each output sample.
625
+ bias: If set to False, the layer will not learn an additive bias. Default: True.
626
+ """
627
+ # Initialize the superclass nn.Linear with the given parameters
628
+ super(FusedBitLinear, self).__init__(in_features, out_features, bias=bias)
629
+
630
+ def forward(self, x):
631
+ return layer_norm_linear_quant_fn(
632
+ x,
633
+ self.norm.weight,
634
+ self.norm.bias,
635
+ self.weight,
636
+ self.bias,
637
+ is_rms_norm=True
638
+ )
fla/modules/fused_cross_entropy.py ADDED
@@ -0,0 +1,419 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+
3
+ # Copyright (c) 2023, Tri Dao.
4
+
5
+ from typing import Any, Tuple
6
+
7
+ import torch
8
+ import torch.nn as nn
9
+ import triton
10
+ import triton.language as tl
11
+
12
+ from fla.ops.utils.op import exp, log
13
+ from fla.utils import input_guard
14
+
15
+ # `all_gather_into_tensor` and `reduce_scatter_tensor` are new placeholders for
16
+ # `_all_gather_base` and `_reduce_scatter_base`. They require the most recent
17
+ # version of PyTorch. The following 2 lines are for backward compatibility with
18
+ # older PyTorch.
19
+ if "all_gather_into_tensor" not in dir(torch.distributed):
20
+ torch.distributed.all_gather_into_tensor = torch.distributed._all_gather_base
21
+
22
+
23
+ @triton.heuristics({
24
+ "HAS_SMOOTHING": lambda args: args["label_smoothing"] > 0.0,
25
+ })
26
+ @triton.jit
27
+ def cross_entropy_fwd_kernel(
28
+ loss_ptr, # data ptrs
29
+ lse_ptr,
30
+ z_loss_ptr,
31
+ logits_ptr,
32
+ labels_ptr,
33
+ label_smoothing,
34
+ logit_scale,
35
+ lse_square_scale,
36
+ ignore_index,
37
+ total_classes,
38
+ class_start_idx, # Useful for tensor parallel when each rank only has a subset of classes
39
+ n_cols, # shapes
40
+ n_rows,
41
+ logits_row_stride, # strides
42
+ BLOCK_SIZE: tl.constexpr,
43
+ HAS_SMOOTHING: tl.constexpr,
44
+ # if SPLIT (e.g. tensor parallel), don't include the LSE in the loss since it's not the final LSE
45
+ SPLIT: tl.constexpr,
46
+ ):
47
+ row_idx = tl.program_id(0)
48
+ col_block_idx = tl.program_id(1)
49
+ logits_ptr = logits_ptr + row_idx * logits_row_stride.to(tl.int64)
50
+ col_offsets = col_block_idx * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
51
+ label_idx = tl.load(labels_ptr + row_idx)
52
+ logits = tl.load(logits_ptr + col_offsets, mask=col_offsets < n_cols, other=-float("inf"))
53
+ logits = logits.to(tl.float32) * logit_scale
54
+ max_logits = tl.max(logits, 0)
55
+ if HAS_SMOOTHING:
56
+ sum_logits = tl.sum(tl.where(col_offsets < n_cols, logits, 0.0), 0)
57
+ lse = log(tl.sum(exp(logits - max_logits), 0)) + max_logits
58
+ tl.store(lse_ptr + col_block_idx * n_rows + row_idx, lse)
59
+ if label_idx == ignore_index:
60
+ loss = 0.0
61
+ z_loss = 0.0
62
+ else:
63
+ label_idx -= class_start_idx
64
+ if label_idx >= col_block_idx * BLOCK_SIZE and label_idx < min(
65
+ n_cols, (col_block_idx + 1) * BLOCK_SIZE
66
+ ):
67
+ logits_label = tl.load(logits_ptr + label_idx) * logit_scale
68
+ if HAS_SMOOTHING:
69
+ loss = (
70
+ (lse if not SPLIT else 0.0)
71
+ - label_smoothing * sum_logits / total_classes
72
+ - (1 - label_smoothing) * logits_label
73
+ )
74
+ else:
75
+ loss = (lse if not SPLIT else 0.0) - logits_label
76
+ else:
77
+ # If label is out of bounds, we set the CE loss to 0.0. But we still want the label_smoothing loss
78
+ if HAS_SMOOTHING:
79
+ loss = label_smoothing * ((lse if not SPLIT else 0.0) - sum_logits / total_classes)
80
+ else:
81
+ loss = 0.0
82
+ if not SPLIT:
83
+ z_loss = lse_square_scale * lse * lse
84
+ loss += z_loss
85
+ else:
86
+ z_loss = 0.0
87
+ tl.store(loss_ptr + col_block_idx * n_rows + row_idx, loss)
88
+ if not SPLIT:
89
+ tl.store(z_loss_ptr + col_block_idx * n_rows + row_idx, z_loss)
90
+
91
+
92
+ @triton.heuristics({
93
+ "HAS_SMOOTHING": lambda args: args["label_smoothing"] > 0.0,
94
+ })
95
+ @triton.jit
96
+ def cross_entropy_bwd_kernel(
97
+ dlogits_ptr, # data ptrs
98
+ dloss_ptr,
99
+ logits_ptr,
100
+ lse_ptr,
101
+ labels_ptr,
102
+ label_smoothing,
103
+ logit_scale,
104
+ lse_square_scale,
105
+ ignore_index,
106
+ total_classes,
107
+ class_start_idx, # Useful for tensor parallel when each rank only has a subset of classes
108
+ n_cols, # shapes
109
+ logits_row_stride, # strides
110
+ dlogits_row_stride,
111
+ dloss_row_stride,
112
+ BLOCK_SIZE: tl.constexpr,
113
+ HAS_SMOOTHING: tl.constexpr,
114
+ ):
115
+ row_idx = tl.program_id(0)
116
+ col_block_idx = tl.program_id(1)
117
+ logits_ptr = logits_ptr + row_idx * logits_row_stride.to(tl.int64)
118
+ dlogits_ptr = dlogits_ptr + row_idx * dlogits_row_stride.to(tl.int64)
119
+ col_offsets = col_block_idx * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
120
+ label_idx = tl.load(labels_ptr + row_idx)
121
+ if label_idx != ignore_index:
122
+ dloss = tl.load(dloss_ptr + row_idx * dloss_row_stride)
123
+ else:
124
+ dloss = 0.0
125
+ logits = tl.load(logits_ptr + col_offsets, mask=col_offsets < n_cols, other=-float("inf")).to(
126
+ tl.float32
127
+ ) * logit_scale
128
+ lse = tl.load(lse_ptr + row_idx)
129
+ probs = exp(logits - lse)
130
+ probs += 2.0 * lse_square_scale * lse * probs
131
+ label_idx -= class_start_idx
132
+ if HAS_SMOOTHING:
133
+ smooth_negative = label_smoothing / total_classes
134
+ probs = tl.where(col_offsets == label_idx, probs - (1 - label_smoothing), probs) - smooth_negative
135
+ else:
136
+ probs = tl.where(col_offsets == label_idx, probs - 1.0, probs)
137
+ tl.store(dlogits_ptr + col_offsets, (dloss * logit_scale) * probs, mask=col_offsets < n_cols)
138
+
139
+
140
+ def fused_cross_entropy_forward(
141
+ logits: torch.Tensor,
142
+ target: torch.Tensor,
143
+ label_smoothing: float = 0.0,
144
+ logit_scale: float = 1.0,
145
+ lse_square_scale: float = 0.0,
146
+ ignore_index: int = -100,
147
+ process_group=None,
148
+ ):
149
+ n_rows, n_cols = logits.shape
150
+ assert target.shape == (n_rows,)
151
+ world_size = 1 if process_group is None else torch.distributed.get_world_size(process_group)
152
+ total_classes = world_size * n_cols
153
+ rank = 0 if process_group is None else torch.distributed.get_rank(process_group)
154
+ class_start_idx = rank * n_cols
155
+
156
+ if logits.stride(-1) != 1:
157
+ logits = logits.contiguous()
158
+ # Set these similar to https://github.com/openai/triton/blob/main/python/tutorials/02-fused-softmax.py
159
+ MAX_BLOCK_SIZE = 64 * 1024
160
+ BLOCK_SIZE = min(triton.next_power_of_2(n_cols), MAX_BLOCK_SIZE)
161
+ num_warps = (
162
+ 4
163
+ if BLOCK_SIZE < 2048
164
+ else (8 if BLOCK_SIZE < 8192 else (16 if BLOCK_SIZE < 128 * 1024 else 32))
165
+ )
166
+ # We may split the lse computation across multiple blocks, then do a reduction
167
+ # lse(local_lse) to get the final LSE. This is faster for large n_cols (e.g., > 64k)
168
+ # where having just one thread block processing more than 64k elements is slow.
169
+ split = world_size > 1 or n_cols > MAX_BLOCK_SIZE
170
+ n_splits = (n_cols + BLOCK_SIZE - 1) // BLOCK_SIZE
171
+ loss_shape = (n_splits, n_rows) if n_splits > 1 else (n_rows,)
172
+ losses = torch.empty(*loss_shape, dtype=torch.float, device=logits.device)
173
+ lse = torch.empty(*loss_shape, dtype=torch.float, device=logits.device)
174
+ z_losses = torch.empty(*loss_shape, dtype=torch.float, device=logits.device)
175
+
176
+ cross_entropy_fwd_kernel[(n_rows, n_splits)](
177
+ losses, # data ptrs
178
+ lse,
179
+ z_losses,
180
+ logits,
181
+ target,
182
+ label_smoothing,
183
+ logit_scale,
184
+ lse_square_scale,
185
+ ignore_index,
186
+ total_classes,
187
+ class_start_idx,
188
+ n_cols, # shapes
189
+ n_rows,
190
+ logits.stride(0), # strides
191
+ BLOCK_SIZE=BLOCK_SIZE, # constants
192
+ num_warps=num_warps,
193
+ SPLIT=split
194
+ )
195
+
196
+ if split:
197
+ # If there's no label_smoothing, if target are in the vocab of this partition, losses contains
198
+ # - predicted logit, and 0 otherwise.
199
+ # If there's label_smoothing=0.1, for target in the vocab of this partition, losses contains
200
+ # -0.9 * predicted logit - 0.1 * sum logit / total_classes.
201
+ # For target not in the vocab of this partition, losses contains
202
+ # -0.1 * sum logit / total_classes.
203
+ if n_splits > 1:
204
+ lse = torch.logsumexp(lse, dim=0)
205
+ losses = losses.sum(dim=0)
206
+ if world_size > 1:
207
+ lse_allgather = torch.empty(world_size, n_rows, dtype=lse.dtype, device=lse.device)
208
+ torch.distributed.all_gather_into_tensor(lse_allgather, lse, group=process_group)
209
+ handle_losses = torch.distributed.all_reduce(
210
+ losses, op=torch.distributed.ReduceOp.SUM, group=process_group, async_op=True
211
+ )
212
+ lse = torch.logsumexp(lse_allgather, dim=0)
213
+ handle_losses.wait()
214
+ # After the allreduce, if there's no label_smoothing, the total losses are - predicted_logit,
215
+ # we just have to add the (global) lse.
216
+ # If there's label_smoothing=0.1, the total losses are
217
+ # -0.9 * predicted_logit - 0.1 * sum logit / total_classes.
218
+ # Again, we just have to add the (global) lse.
219
+ losses += lse
220
+ if lse_square_scale != 0.0:
221
+ z_losses = lse_square_scale * lse.square()
222
+ z_losses.masked_fill_(target == ignore_index, 0.0)
223
+ losses += z_losses
224
+ else:
225
+ z_losses = torch.zeros_like(losses)
226
+ losses.masked_fill_(target == ignore_index, 0.0)
227
+
228
+ return losses, z_losses, lse, total_classes, class_start_idx
229
+
230
+
231
+ class CrossEntropyLossFunction(torch.autograd.Function):
232
+
233
+ @staticmethod
234
+ @input_guard
235
+ def forward(
236
+ ctx,
237
+ logits,
238
+ target,
239
+ label_smoothing=0.0,
240
+ logit_scale=1.0,
241
+ lse_square_scale=0.0,
242
+ ignore_index=-100,
243
+ inplace_backward=False,
244
+ process_group=None,
245
+ ):
246
+ losses, z_losses, lse, total_classes, class_start_idx = fused_cross_entropy_forward(
247
+ logits,
248
+ target,
249
+ label_smoothing,
250
+ logit_scale,
251
+ lse_square_scale,
252
+ ignore_index,
253
+ process_group,
254
+ )
255
+ ctx.save_for_backward(logits, lse, target)
256
+ ctx.mark_non_differentiable(z_losses)
257
+ ctx.label_smoothing = label_smoothing
258
+ ctx.logit_scale = logit_scale
259
+ ctx.lse_square_scale = lse_square_scale
260
+ ctx.ignore_index = ignore_index
261
+ ctx.total_classes = total_classes
262
+ ctx.class_start_idx = class_start_idx
263
+ ctx.inplace_backward = inplace_backward
264
+
265
+ return losses, z_losses
266
+
267
+ @staticmethod
268
+ @input_guard
269
+ def backward(ctx, grad_losses, grad_z_losses):
270
+ del grad_z_losses # z_losses are only for logging.
271
+
272
+ logits, lse, target = ctx.saved_tensors
273
+ dlogits = logits if ctx.inplace_backward else torch.empty_like(logits)
274
+ n_rows, n_cols = logits.shape
275
+ BLOCK_SIZE = min(triton.next_power_of_2(n_cols), 4 * 1024)
276
+ num_warps = 4 if BLOCK_SIZE < 2048 else (8 if BLOCK_SIZE < 8192 else 16)
277
+ def grid(META): return (n_rows, triton.cdiv(n_cols, META["BLOCK_SIZE"])) # noqa
278
+ cross_entropy_bwd_kernel[grid](
279
+ dlogits, # data ptrs
280
+ grad_losses,
281
+ logits,
282
+ lse,
283
+ target,
284
+ ctx.label_smoothing,
285
+ ctx.logit_scale,
286
+ ctx.lse_square_scale,
287
+ ctx.ignore_index,
288
+ ctx.total_classes,
289
+ ctx.class_start_idx,
290
+ n_cols, # shapes
291
+ logits.stride(0), # strides
292
+ dlogits.stride(0),
293
+ grad_losses.stride(0),
294
+ BLOCK_SIZE=BLOCK_SIZE, # constants
295
+ num_warps=num_warps,
296
+ )
297
+ return dlogits, None, None, None, None, None, None, None, None
298
+
299
+
300
+ def cross_entropy_loss(
301
+ logits: torch.Tensor,
302
+ target: torch.Tensor,
303
+ label_smoothing: float = 0.0,
304
+ logit_scale: float = 1.0,
305
+ lse_square_scale: float = 0.0,
306
+ ignore_index=-100,
307
+ inplace_backward: bool = False,
308
+ process_group=None,
309
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
310
+ """
311
+ Arguments:
312
+ logits: [batch, vocab_size]
313
+ target: [batch,]
314
+ label_smoothing: float
315
+ logit_scale: float.
316
+ Multiply logits by this scale before calculating the loss.
317
+ lse_square_scale: float.
318
+ If > 0, we add lse_square_scale * lse(logits) ^ 2 to the loss.
319
+ This is also referred to as "z-loss".
320
+ ignore_index: int.
321
+ If target == ignore_index, the loss is set to 0.0.
322
+ inplace_backward: bool.
323
+ If True, we do the backward pass in-place by modifying the logits.
324
+ This saves memory.
325
+ process_group:
326
+ if not None, we're doing Tensor Parallel: each process is responsible for
327
+ one part of the vocab. The loss will be aggregated across processes.
328
+ Returns:
329
+ losses: [batch,], float
330
+ z_losses: [batch,], float
331
+ """
332
+ return CrossEntropyLossFunction.apply(
333
+ logits,
334
+ target,
335
+ label_smoothing,
336
+ logit_scale,
337
+ lse_square_scale,
338
+ ignore_index,
339
+ inplace_backward,
340
+ process_group,
341
+ )
342
+
343
+
344
+ class FusedCrossEntropyLoss(nn.Module):
345
+ def __init__(
346
+ self,
347
+ ignore_index: int = -100,
348
+ reduction: str = "mean",
349
+ label_smoothing: float = 0.0,
350
+ logit_scale: float = 1.0,
351
+ lse_square_scale: float = 0.0,
352
+ inplace_backward: bool = False,
353
+ process_group: Any = None,
354
+ return_z_loss: bool = False,
355
+ ):
356
+ """
357
+ Arguments:
358
+ ignore_index: int. If target == ignore_index, the loss is set to 0.0.
359
+ label_smoothing: float
360
+ lse_square_scale: float. If > 0, we add lse_square_scale * lse(logits) ^ 2 to the loss.
361
+ This is also referred to as "z-loss".
362
+ inplace_backward: bool. If True, we do the backward pass in-place by modifying the logits.
363
+ This saves memory.
364
+ process_group: if not None, we're doing Tensor Parallel: each process is responsible for
365
+ one part of the vocab. The loss will be aggregated across processes.
366
+ return_z_loss: bool. If True, we return the component of the loss contributed by
367
+ the lse_square_scale value. This value is only for logging and does not support
368
+ backprop.
369
+ """
370
+ super().__init__()
371
+ if reduction not in ["mean", "none", "sum"]:
372
+ raise NotImplementedError("Only support reduction = 'mean' or 'none' or 'sum'")
373
+ self.ignore_index = ignore_index
374
+ self.reduction = reduction
375
+ self.label_smoothing = label_smoothing
376
+ self.logit_scale = logit_scale
377
+ self.lse_square_scale = lse_square_scale
378
+ self.inplace_backward = inplace_backward
379
+ self.process_group = process_group
380
+ self.return_z_loss = return_z_loss
381
+
382
+ def forward(self, input, target):
383
+ """
384
+ Arguments:
385
+ input: (batch, vocab_size)
386
+ target: (batch,)
387
+ Returns:
388
+ losses: (batch,) if reduction is 'none', else (1,), dtype float
389
+ z_loss: (batch,) if reduction is 'none', else (1,), dtype float (if self.return_z_loss)
390
+ """
391
+ assert input.is_cuda and target.is_cuda, "Only support CUDA tensors"
392
+ loss, z_loss = cross_entropy_loss(
393
+ input,
394
+ target,
395
+ label_smoothing=self.label_smoothing,
396
+ logit_scale=self.logit_scale,
397
+ lse_square_scale=self.lse_square_scale,
398
+ ignore_index=self.ignore_index,
399
+ inplace_backward=self.inplace_backward,
400
+ process_group=self.process_group,
401
+ )
402
+ if self.reduction == "mean":
403
+ loss = loss.sum() / (target != self.ignore_index).sum()
404
+ elif self.reduction == "sum":
405
+ loss = loss.sum()
406
+ else:
407
+ loss = loss
408
+
409
+ if not self.return_z_loss:
410
+ return loss
411
+
412
+ if self.reduction == "mean":
413
+ z_loss = z_loss.sum() / (target != self.ignore_index).sum()
414
+ elif self.reduction == "sum":
415
+ z_loss = z_loss.sum()
416
+ else:
417
+ z_loss = z_loss
418
+
419
+ return loss, z_loss
fla/modules/fused_kl_div.py ADDED
@@ -0,0 +1,323 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+
3
+ from typing import Tuple
4
+
5
+ import torch
6
+ import torch.nn as nn
7
+ import torch.nn.functional as F
8
+ import triton
9
+ import triton.language as tl
10
+
11
+ from fla.ops.utils.op import exp, log
12
+ from fla.utils import input_guard
13
+
14
+ # The hard limit of TRITON_MAX_TENSOR_NUMEL is 1048576
15
+ # https://github.com/triton-lang/triton/blob/ba42a5c68fd0505f8c42f4202d53be0f8d9a5fe0/python/triton/language/core.py#L19
16
+ # However, setting limit as 65536 as in LayerNorm tutorial is faster because of less register spilling
17
+ # The optimal maximum block size depends on your hardware, your kernel, and your dtype
18
+ MAX_FUSED_SIZE = 65536 // 2
19
+
20
+
21
+ @triton.jit
22
+ def kl_div_kernel(
23
+ logits,
24
+ target_logits,
25
+ loss,
26
+ s_logits,
27
+ s_loss,
28
+ reduction: tl.constexpr,
29
+ N: tl.constexpr,
30
+ V: tl.constexpr,
31
+ BV: tl.constexpr
32
+ ):
33
+ # https://github.com/triton-lang/triton/issues/1058
34
+ # If N*V is too large, i_n * stride will overflow out of int32, so we convert to int64
35
+ i_n = tl.program_id(0).to(tl.int64)
36
+
37
+ logits += i_n * s_logits
38
+ target_logits += i_n * s_logits
39
+
40
+ # m is the max value. use the notation from the paper
41
+ sm = float('-inf')
42
+ tm = float('-inf')
43
+ # d is the sum. use the notation from the paper
44
+ sd, td = 0.0, 0.0
45
+
46
+ NV = tl.cdiv(V, BV)
47
+ for iv in range(0, NV):
48
+ o_x = iv * BV + tl.arange(0, BV)
49
+ # for student
50
+ b_sl = tl.load(logits + o_x, mask=o_x < V, other=float('-inf'))
51
+ b_sm = tl.max(b_sl)
52
+ m_new = tl.maximum(sm, b_sm)
53
+ sd = sd * exp(sm - m_new) + tl.sum(exp(b_sl - m_new))
54
+ sm = m_new
55
+ # for teacher
56
+ b_tl = tl.load(target_logits + o_x, mask=o_x < V, other=float('-inf'))
57
+ b_tm = tl.max(b_tl)
58
+ m_new = tl.maximum(tm, b_tm)
59
+ td = td * exp(tm - m_new) + tl.sum(exp(b_tl - m_new))
60
+ tm = m_new
61
+
62
+ b_loss = 0.
63
+ # KL(y_true || y) = exp(y_true) * (log(y_true) - log(y))
64
+ for iv in range(0, NV):
65
+ o_x = iv * BV + tl.arange(0, BV)
66
+ b_sl = tl.load(logits + o_x, mask=o_x < V, other=float('-inf'))
67
+ b_tl = tl.load(target_logits + o_x, mask=o_x < V, other=float('-inf'))
68
+ b_sp_log = b_sl - sm - log(sd)
69
+ b_tp_log = b_tl - tm - log(td)
70
+ b_sp = exp(b_sp_log)
71
+ b_tp = exp(b_tp_log)
72
+ b_kl = tl.where(o_x < V, b_tp * (b_tp_log - b_sp_log), 0)
73
+ b_dl = -b_tp + b_sp
74
+ b_loss += tl.sum(b_kl)
75
+ if reduction == 'batchmean':
76
+ b_dl = b_dl / N
77
+ tl.store(logits + o_x, b_dl, mask=o_x < V)
78
+
79
+ # Normalize the loss by the number of elements if reduction is 'batchmean'
80
+ if reduction == 'batchmean':
81
+ b_loss = b_loss / N
82
+
83
+ tl.store(loss + i_n * s_loss, b_loss)
84
+
85
+
86
+ @triton.jit
87
+ def elementwise_mul_kernel(
88
+ x,
89
+ g,
90
+ N: tl.constexpr,
91
+ B: tl.constexpr
92
+ ):
93
+ """
94
+ This function multiplies each element of the tensor pointed by x with the value pointed by g.
95
+ The multiplication is performed in-place on the tensor pointed by x.
96
+
97
+ Parameters:
98
+ x:
99
+ Pointer to the input tensor.
100
+ g:
101
+ Pointer to the gradient output value.
102
+ N (int):
103
+ The number of columns in the input tensor.
104
+ B (int):
105
+ The block size for Triton operations.
106
+ """
107
+
108
+ # Get the program ID and convert it to int64 to avoid overflow
109
+ i_x = tl.program_id(0).to(tl.int64)
110
+ o_x = i_x * B + tl.arange(0, B)
111
+
112
+ # Load the gradient output value
113
+ b_g = tl.load(g)
114
+ b_x = tl.load(x + o_x, mask=o_x < N)
115
+ tl.store(x + o_x, b_x * b_g, mask=o_x < N)
116
+
117
+
118
+ def fused_kl_div_forward(
119
+ x: torch.Tensor,
120
+ target_x: torch.Tensor,
121
+ weight: torch.Tensor,
122
+ target_weight: torch.Tensor,
123
+ reduction: str = 'batchmean'
124
+ ):
125
+ device = x.device
126
+
127
+ # ideally, we would like to achieve the same memory consumption as [N, H],
128
+ # so the expected chunk size should be:
129
+ # NC = ceil(V / H)
130
+ # C = ceil(N / NC)
131
+ # for ex: N = 4096*4, V = 32000, H = 4096 ==> NC = 8, C = ceil(N / NC) = 2048
132
+ N, H, V = *x.shape, weight.shape[0]
133
+ BV = min(MAX_FUSED_SIZE, triton.next_power_of_2(V))
134
+ # TODO: in real cases, we may need to limit the number of chunks NC to
135
+ # ensure the precisions of accumulated gradients
136
+ NC = min(8, triton.cdiv(V, H))
137
+ C = triton.next_power_of_2(triton.cdiv(N, NC))
138
+ NC = triton.cdiv(N, C)
139
+
140
+ dx = torch.zeros_like(x, device=device)
141
+ dw = torch.zeros_like(weight, device=device) if weight is not None else None
142
+ # we use fp32 for loss accumulator
143
+ loss = torch.zeros(N, dtype=torch.float32, device=device)
144
+
145
+ for ic in range(NC):
146
+ start, end = ic * C, min((ic + 1) * C, N)
147
+ # [C, N]
148
+ c_sx = x[start:end]
149
+ c_tx = target_x[start:end]
150
+ # when doing matmul, use the original precision
151
+ # [C, V]
152
+ c_sl = F.linear(c_sx, weight)
153
+ c_tl = F.linear(c_tx, target_weight)
154
+
155
+ # unreduced loss
156
+ c_loss = loss[start:end]
157
+
158
+ # Here we calculate the gradient of c_sx in place so we can save memory.
159
+ kl_div_kernel[(c_sx.shape[0],)](
160
+ logits=c_sl,
161
+ target_logits=c_tl,
162
+ loss=c_loss,
163
+ s_logits=c_sl.stride(-2),
164
+ s_loss=c_loss.stride(-1),
165
+ reduction=reduction,
166
+ N=N,
167
+ V=V,
168
+ BV=BV,
169
+ num_warps=32
170
+ )
171
+
172
+ # gradient of logits is computed in-place by the above triton kernel and is of shape: C x V
173
+ # thus dx[start: end] should be of shape: C x H
174
+ # additionally, since we are chunking the inputs, observe that the loss and gradients are calculated only
175
+ # on `n_non_ignore` tokens. However, the gradient of the input should be calculated for all tokens.
176
+ # Thus, we need an additional scaling factor of (n_non_ignore/total) to scale the gradients.
177
+ # [C, H]
178
+
179
+ dx[start:end] = torch.mm(c_sl, weight)
180
+
181
+ if weight is not None:
182
+ torch.addmm(input=dw, mat1=c_sl.t(), mat2=c_sx, out=dw)
183
+
184
+ loss = loss.sum()
185
+ return loss, dx, dw
186
+
187
+
188
+ def fused_kl_div_backward(
189
+ do: torch.Tensor,
190
+ dx: torch.Tensor,
191
+ dw: torch.Tensor
192
+ ):
193
+ # If cross entropy is the last layer, do is 1.0. Skip the mul to save time
194
+ if torch.ne(do, torch.tensor(1.0, device=do.device)):
195
+ # We use a Triton kernel instead of a PyTorch operation because modifying inputs in-place
196
+ # for gradient storage and backward multiple times causes anomalies with PyTorch but not with Triton.
197
+ N, H = dx.shape
198
+ B = min(MAX_FUSED_SIZE, triton.next_power_of_2(H))
199
+
200
+ elementwise_mul_kernel[(triton.cdiv(N * H, B),)](
201
+ x=dx,
202
+ g=do,
203
+ N=N*H,
204
+ B=B,
205
+ num_warps=32,
206
+ )
207
+
208
+ # handle dw
209
+ if dw is not None:
210
+ V, H = dw.shape
211
+ elementwise_mul_kernel[(triton.cdiv(V * H, B),)](
212
+ x=dw,
213
+ g=do,
214
+ N=V*H,
215
+ B=B,
216
+ num_warps=32,
217
+ )
218
+
219
+ return dx, dw
220
+
221
+
222
+ class FusedKLDivLossFunction(torch.autograd.Function):
223
+
224
+ @staticmethod
225
+ @input_guard
226
+ def forward(
227
+ ctx,
228
+ x: torch.Tensor,
229
+ target_x: torch.Tensor,
230
+ weight: torch.Tensor,
231
+ target_weight: torch.Tensor,
232
+ reduction: str
233
+ ):
234
+ loss, dx, dw = fused_kl_div_forward(
235
+ x=x,
236
+ target_x=target_x,
237
+ weight=weight,
238
+ target_weight=target_weight,
239
+ reduction=reduction
240
+ )
241
+ ctx.save_for_backward(dx, dw)
242
+ return loss
243
+
244
+ @staticmethod
245
+ @input_guard
246
+ def backward(ctx, do):
247
+ dx, dw = ctx.saved_tensors
248
+ dx, dw = fused_kl_div_backward(do, dx, dw)
249
+ return dx, None, dw, None, None
250
+
251
+
252
+ def fused_kl_div_loss(
253
+ x: torch.Tensor,
254
+ target_x: torch.Tensor,
255
+ weight: torch.Tensor,
256
+ target_weight: torch.Tensor,
257
+ reduction: str = 'batchmean'
258
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
259
+ """
260
+ Args:
261
+ x (torch.Tensor): [batch_size * seq_len, hidden_size]
262
+ target_x (torch.Tensor): [batch_size * seq_len, hidden_size]
263
+ weight (torch.Tensor): [vocab_size, hidden_size]
264
+ where `vocab_size` is the number of classes.
265
+ target_weight (torch.Tensor): [vocab_size, hidden_size]
266
+ where `vocab_size` is the number of classes.
267
+ reduction:
268
+ Specifies the reduction to apply to the output: 'batchmean'. Default: 'batchmean'.
269
+ Returns:
270
+ loss
271
+ """
272
+ return FusedKLDivLossFunction.apply(
273
+ x,
274
+ target_x,
275
+ weight,
276
+ target_weight,
277
+ reduction
278
+ )
279
+
280
+
281
+ class FusedKLDivLoss(nn.Module):
282
+
283
+ def __init__(
284
+ self,
285
+ reduction: str = 'batchmean'
286
+ ):
287
+ """
288
+ Args:
289
+ reduction:
290
+ Specifies the reduction to apply to the output: 'batchmean'. Default: 'batchmean'.
291
+ """
292
+ super().__init__()
293
+
294
+ assert reduction in ['batchmean'], f"reduction: {reduction} is not supported"
295
+
296
+ self.reduction = reduction
297
+
298
+ def forward(
299
+ self,
300
+ x: torch.Tensor,
301
+ target_x: torch.Tensor,
302
+ weight: torch.Tensor,
303
+ target_weight: torch.Tensor
304
+ ):
305
+ """
306
+ Args:
307
+ x (torch.Tensor): [batch_size * seq_len, hidden_size]
308
+ target_x (torch.Tensor): [batch_size * seq_len, hidden_size]
309
+ weight (torch.Tensor): [vocab_size, hidden_size]
310
+ where `vocab_size` is the number of classes.
311
+ target_weight (torch.Tensor): [vocab_size, hidden_size]
312
+ where `vocab_size` is the number of classes.
313
+ Returns:
314
+ loss
315
+ """
316
+ loss = fused_kl_div_loss(
317
+ x=x,
318
+ target_x=target_x,
319
+ weight=weight,
320
+ target_weight=target_weight,
321
+ reduction=self.reduction
322
+ )
323
+ return loss
fla/modules/fused_linear_cross_entropy.py ADDED
@@ -0,0 +1,570 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+
3
+ # Code adapted from
4
+ # https://github.com/linkedin/Liger-Kernel/blob/main/src/liger_kernel/ops/fused_linear_cross_entropy.py
5
+
6
+ from functools import partial
7
+ from typing import Optional, Tuple
8
+
9
+ import torch
10
+ import torch.nn as nn
11
+ import torch.nn.functional as F
12
+ import triton
13
+ import triton.language as tl
14
+ from torch.distributed import DeviceMesh
15
+ from torch.distributed.tensor import DTensor, Replicate, Shard, distribute_module
16
+ from torch.distributed.tensor.parallel import ParallelStyle
17
+
18
+ from fla.ops.utils import logsumexp_fwd
19
+ from fla.ops.utils.op import exp
20
+ from fla.utils import input_guard
21
+
22
+ # The hard limit of TRITON_MAX_TENSOR_NUMEL is 1048576
23
+ # https://github.com/triton-lang/triton/blob/ba42a5c68fd0505f8c42f4202d53be0f8d9a5fe0/python/triton/language/core.py#L19
24
+ # However, setting limit as 65536 as in LayerNorm tutorial is faster because of less register spilling
25
+ # The optimal maximum block size depends on your hardware, your kernel, and your dtype
26
+ MAX_FUSED_SIZE = 65536 // 2
27
+
28
+
29
+ @triton.jit
30
+ def cross_entropy_kernel(
31
+ logits,
32
+ lse,
33
+ target,
34
+ loss,
35
+ total,
36
+ ignore_index,
37
+ label_smoothing: tl.constexpr,
38
+ logit_scale: tl.constexpr,
39
+ reduction: tl.constexpr,
40
+ V: tl.constexpr,
41
+ BV: tl.constexpr
42
+ ):
43
+ """
44
+ This kernel computes both cross entropy loss and the gradient of the input.
45
+ We only consider hard label + mean reduction for now.
46
+ Please refer to https://pytorch.org/docs/stable/generated/torch.nn.CrossEntropyLoss.html for the math.
47
+
48
+ Args:
49
+ logits:
50
+ Pointer to logits tensor.
51
+ lse:
52
+ Pointer to logsumexp tensor.
53
+ target: Pointer to target tensor.
54
+ loss:
55
+ Pointer to tensor to store the loss.
56
+ V (int):
57
+ The number of columns in the input tensor.
58
+ total (int):
59
+ The number of non-ignored classes.
60
+ ignore_index (int):
61
+ The index to ignore in the target.
62
+ label_smoothing (float):
63
+ The amount of smoothing when computing the loss, where 0.0 means no smoothing.
64
+ reduction (str):
65
+ The string for the reduction to apply
66
+ BV (int):
67
+ The block size for vocab.
68
+ """
69
+
70
+ # https://github.com/triton-lang/triton/issues/1058
71
+ # If B*T*V is too large, i_n * stride will overflow out of int32, so we convert to int64
72
+ i_n = tl.program_id(0).to(tl.int64)
73
+ NV = tl.cdiv(V, BV)
74
+
75
+ # 1. Load target first because if the target is ignore_index, we can return right away
76
+ b_y = tl.load(target + i_n)
77
+
78
+ # 2. locate the start index
79
+ logits += i_n * V
80
+
81
+ if b_y == ignore_index:
82
+ # set all x as 0
83
+ for i in range(0, V, BV):
84
+ o_v = i + tl.arange(0, BV)
85
+ tl.store(logits + o_v, 0.0, mask=o_v < V)
86
+ return
87
+
88
+ # Online softmax: 2 loads + 1 store (compared with 3 loads + 1 store for the safe softmax)
89
+ # Refer to Algorithm 3 in the paper: https://arxiv.org/pdf/1805.02867
90
+
91
+ # 3. [Online softmax] first pass: compute logsumexp
92
+ # we did this in anouter kernel
93
+ b_l = tl.load(logits + b_y) * logit_scale
94
+ b_lse = tl.load(lse + i_n)
95
+
96
+ # 4. Calculate the loss
97
+ # loss = lse - logits_l
98
+ b_loss = b_lse - b_l
99
+
100
+ # Label smoothing is a general case of normal cross entropy
101
+ # See the full derivation at https://github.com/linkedin/Liger-Kernel/pull/198#issue-2503665310
102
+ b_z = 0.0
103
+ eps = label_smoothing / V
104
+
105
+ # We need tl.debug_barrier() as mentioned in
106
+ # https://github.com/triton-lang/triton/blob/ba42a5c68fd0505f8c42f4202d53be0f8d9a5fe0/python/triton/ops/cross_entropy.py#L34
107
+ tl.debug_barrier()
108
+
109
+ # 5. [Online Softmax] Second pass: compute gradients
110
+ # For 'mean' reduction, gradients are normalized by number of non-ignored elements
111
+ # dx_y = (softmax(x_y) - 1) / N
112
+ # dx_i = softmax(x_i) / N, i != y
113
+ # For label smoothing:
114
+ # dx_i = (softmax(x_y) - label_smoothing / V) / N, i != y
115
+ # dx_y = (softmax(x_y) - label_smoothing / V - (1 - label_smoothing)) / N
116
+ # = dx_i - (1 - label_smoothing) / N
117
+ for iv in range(0, NV):
118
+ o_v = iv * BV + tl.arange(0, BV)
119
+ b_logits = tl.load(logits + o_v, mask=o_v < V, other=float('-inf')) * logit_scale
120
+ if label_smoothing > 0:
121
+ # scale X beforehand to avoid overflow
122
+ b_z += tl.sum(tl.where(o_v < V, -eps * b_logits, 0.0))
123
+ b_p = (exp(b_logits - b_lse) - eps) * logit_scale
124
+ if reduction == "mean":
125
+ b_p = b_p / total
126
+ tl.store(logits + o_v, b_p, mask=o_v < V)
127
+
128
+ tl.debug_barrier()
129
+
130
+ # Orginal loss = H(q, p), with label smoothing regularization = H(q', p) and (label_smoothing / V) = eps
131
+ # H(q', p) = (1 - label_smoothing) * H(q, p) + label_smoothing * H(u, p)
132
+ # = (1 - label_smoothing) * H(q, p) + eps * sum(logsoftmax(x_i))
133
+ # By using m (global max of xi) and d (sum of e^(xi-m)), we can simplify as:
134
+ # = (1 - label_smoothing) * H(q, p) + (-sum(x_i * eps) + label_smoothing * (m + logd))
135
+ # Refer to H(q', p) in section 7 of the paper:
136
+ # https://arxiv.org/pdf/1512.00567
137
+ # pytorch:
138
+ # https://github.com/pytorch/pytorch/blob/2981534f54d49fa3a9755c9b0855e7929c2527f0/aten/src/ATen/native/LossNLL.cpp#L516
139
+ # See full derivation at https://github.com/linkedin/Liger-Kernel/pull/198#issuecomment-2333753087
140
+ if label_smoothing > 0:
141
+ b_loss = b_loss * (1 - label_smoothing) + (b_z + label_smoothing * b_lse)
142
+
143
+ # 6. Specially handle the i==y case where `dx_y = (softmax(x_y) - (1 - label_smoothing) / N`
144
+ b_l = tl.load(logits + b_y)
145
+
146
+ # Normalize the loss by the number of non-ignored elements if reduction is "mean"
147
+ if reduction == 'mean':
148
+ b_loss = b_loss / total
149
+ b_l += (label_smoothing - 1) / total * logit_scale
150
+ else:
151
+ b_l += (label_smoothing - 1) * logit_scale
152
+
153
+ tl.store(loss + i_n, b_loss)
154
+ tl.store(logits + b_y, b_l)
155
+
156
+
157
+ @triton.jit
158
+ def elementwise_mul_kernel(
159
+ x,
160
+ g,
161
+ N: tl.constexpr,
162
+ B: tl.constexpr
163
+ ):
164
+ """
165
+ This function multiplies each element of the tensor pointed by x with the value pointed by g.
166
+ The multiplication is performed in-place on the tensor pointed by x.
167
+
168
+ Parameters:
169
+ x:
170
+ Pointer to the input tensor.
171
+ g:
172
+ Pointer to the gradient output value.
173
+ N (int):
174
+ The number of columns in the input tensor.
175
+ B (int):
176
+ The block size for Triton operations.
177
+ """
178
+
179
+ # Get the program ID and convert it to int64 to avoid overflow
180
+ i_x = tl.program_id(0).to(tl.int64)
181
+ o_x = i_x * B + tl.arange(0, B)
182
+
183
+ # Load the gradient output value
184
+ b_g = tl.load(g)
185
+ b_x = tl.load(x + o_x, mask=o_x < N)
186
+ tl.store(x + o_x, b_x * b_g, mask=o_x < N)
187
+
188
+
189
+ def fused_linear_cross_entropy_forward(
190
+ x: torch.Tensor,
191
+ target: torch.LongTensor,
192
+ weight: torch.Tensor,
193
+ bias: torch.Tensor = None,
194
+ ignore_index: int = -100,
195
+ label_smoothing: float = 0.0,
196
+ logit_scale: float = 1.0,
197
+ num_chunks: int = 8,
198
+ reduction: str = "mean"
199
+ ):
200
+ device = x.device
201
+ # inputs have shape: [N, H]
202
+ # materialized activations will have shape: [N, V]
203
+ # the increase in memory = [N, V]
204
+ # reduction can be achieved by partitioning the number of tokens N into smaller chunks.
205
+
206
+ # ideally, we would like to achieve the same memory consumption as [N, H],
207
+ # so the expected chunk size should be:
208
+ # NC = ceil(V / H)
209
+ # C = ceil(N / NC)
210
+ # for ex: N = 4096*4, V = 32000, H = 4096 ==> NC = 8, C = ceil(N / NC) = 2048
211
+ N, H, V = *x.shape, weight.shape[0]
212
+ BV = min(MAX_FUSED_SIZE, triton.next_power_of_2(V))
213
+ # TODO: in real cases, we may need to limit the number of chunks NC to
214
+ # ensure the precisions of accumulated gradients
215
+ NC = min(num_chunks, triton.cdiv(V, H))
216
+ C = triton.next_power_of_2(triton.cdiv(N, NC))
217
+ NC = triton.cdiv(N, C)
218
+
219
+ # [N, H]
220
+ dx = torch.zeros_like(x, device=device)
221
+ # [V, H]
222
+ dw = torch.zeros_like(weight, device=device, dtype=torch.float) if weight is not None else None
223
+ # [V]
224
+ db = torch.zeros_like(bias, device=device, dtype=torch.float) if bias is not None else None
225
+ # [N]
226
+ loss = torch.zeros(N, device=device, dtype=torch.float)
227
+
228
+ total = target.ne(ignore_index).sum().item()
229
+
230
+ for ic in range(NC):
231
+ start, end = ic * C, min((ic + 1) * C, N)
232
+ # [C, N]
233
+ c_x = x[start:end]
234
+ # when doing matmul, use the original precision
235
+ # [C, V]
236
+ c_logits = F.linear(c_x, weight, bias)
237
+ c_target = target[start:end]
238
+ # [C]
239
+ # keep lse in fp32 to maintain precision
240
+ c_lse = logsumexp_fwd(c_logits, scale=logit_scale, dtype=torch.float)
241
+
242
+ # unreduced loss
243
+ c_loss = loss[start:end]
244
+
245
+ # Here we calculate the gradient of c_logits in place so we can save memory.
246
+ cross_entropy_kernel[(c_logits.shape[0],)](
247
+ logits=c_logits,
248
+ lse=c_lse,
249
+ target=c_target,
250
+ loss=c_loss,
251
+ total=total,
252
+ ignore_index=ignore_index,
253
+ label_smoothing=label_smoothing,
254
+ logit_scale=logit_scale,
255
+ reduction=reduction,
256
+ V=V,
257
+ BV=BV,
258
+ num_warps=32
259
+ )
260
+
261
+ # gradient of logits is computed in-place by the above triton kernel and is of shape: C x V
262
+ # thus dx should be of shape: C x H
263
+ dx[start:end] = torch.mm(c_logits, weight)
264
+
265
+ # keep dw in fp32 to maintain precision
266
+ if weight is not None:
267
+ dw += c_logits.t() @ c_x
268
+
269
+ if bias is not None:
270
+ torch.add(input=db, other=c_logits.sum(0), out=db)
271
+
272
+ loss = loss.sum()
273
+ if dw is not None:
274
+ dw = dw.to(weight)
275
+ if db is not None:
276
+ db = db.to(bias)
277
+ return loss, dx, dw, db
278
+
279
+
280
+ def fused_linear_cross_entropy_backward(
281
+ do: torch.Tensor,
282
+ dx: torch.Tensor,
283
+ dw: torch.Tensor,
284
+ db: torch.Tensor
285
+ ):
286
+ # If cross entropy is the last layer, do is 1.0. Skip the mul to save time
287
+ if torch.ne(do, torch.tensor(1.0, device=do.device)):
288
+ # We use a Triton kernel instead of a PyTorch operation because modifying inputs in-place
289
+ # for gradient storage and backward multiple times causes anomalies with PyTorch but not with Triton.
290
+ N, H = dx.shape
291
+ B = min(MAX_FUSED_SIZE, triton.next_power_of_2(H))
292
+
293
+ elementwise_mul_kernel[(triton.cdiv(N * H, B),)](
294
+ x=dx,
295
+ g=do,
296
+ N=N*H,
297
+ B=B,
298
+ num_warps=32,
299
+ )
300
+
301
+ # handle dw
302
+ if dw is not None:
303
+ V, H = dw.shape
304
+ elementwise_mul_kernel[(triton.cdiv(V * H, B),)](
305
+ x=dw,
306
+ g=do,
307
+ N=V*H,
308
+ B=B,
309
+ num_warps=32,
310
+ )
311
+
312
+ if db is not None:
313
+ V = db.shape[0]
314
+ elementwise_mul_kernel[(triton.cdiv(V, B),)](
315
+ x=db,
316
+ g=do,
317
+ N=V,
318
+ B=B,
319
+ num_warps=32,
320
+ )
321
+ return dx, dw, db
322
+
323
+
324
+ class FusedLinearCrossEntropyFunction(torch.autograd.Function):
325
+
326
+ @staticmethod
327
+ @input_guard
328
+ def forward(
329
+ ctx,
330
+ x: torch.Tensor,
331
+ target: torch.LongTensor,
332
+ weight: torch.Tensor,
333
+ bias: torch.Tensor = None,
334
+ ignore_index: int = -100,
335
+ label_smoothing: float = 0.0,
336
+ logit_scale: float = 1.0,
337
+ num_chunks: int = 8,
338
+ reduction: str = "mean"
339
+ ):
340
+ """
341
+ Fusing the last linear layer with cross-entropy loss
342
+ Reference: https://github.com/mgmalek/efficient_cross_entropy
343
+
344
+ Handle the forward and backward pass of the final linear layer via cross-entropy loss by avoiding
345
+ the materialization of the large logits tensor. Since Cross Entropy Loss is the last layer, we can
346
+ compute the gradient at the forward pass. By doing so, we don't have to store the x and target
347
+ for the backward pass.
348
+
349
+ x (torch.Tensor): [batch_size * seq_len, hidden_size]
350
+ target (torch.LongTensor): [batch_size * seq_len]
351
+ where each value is in [0, vocab_size).
352
+ weight (torch.Tensor): [vocab_size, hidden_size]
353
+ where `vocab_size` is the number of classes.
354
+ bias (Optional[torch.Tensor]): [vocab_size]
355
+ where `vocab_size` is the number of classes.
356
+ ignore_index:
357
+ the index to ignore in the target.
358
+ label_smoothing:
359
+ the amount of smoothing when computing the loss, where 0.0 means no smoothing.
360
+ logit_scale: float = 1.0,
361
+ A scaling factor applied to the logits. Default: 1.0
362
+ num_chunks: int
363
+ The number of chunks to split the input tensor into for processing.
364
+ This can help optimize memory usage and computation speed.
365
+ Default: 8
366
+ reduction:
367
+ Specifies the reduction to apply to the output: 'mean' | 'sum'.
368
+ 'mean': the weighted mean of the output is taken,
369
+ 'sum': the output will be summed.
370
+ Default: 'mean'.
371
+ """
372
+ loss, dx, dw, db = fused_linear_cross_entropy_forward(
373
+ x,
374
+ target,
375
+ weight,
376
+ bias,
377
+ ignore_index,
378
+ label_smoothing,
379
+ logit_scale,
380
+ num_chunks,
381
+ reduction
382
+ )
383
+ # downcast to dtype and store for backward
384
+ ctx.save_for_backward(
385
+ dx.detach(),
386
+ dw.detach() if weight is not None else None,
387
+ db.detach() if bias is not None else None,
388
+ )
389
+ return loss
390
+
391
+ @staticmethod
392
+ @input_guard
393
+ def backward(ctx, do):
394
+ dx, dw, db = ctx.saved_tensors
395
+ dx, dw, db = fused_linear_cross_entropy_backward(do, dx, dw, db)
396
+ return dx, None, dw, db, None, None, None, None, None
397
+
398
+
399
+ def fused_linear_cross_entropy_loss(
400
+ x: torch.Tensor,
401
+ target: torch.LongTensor,
402
+ weight: torch.Tensor,
403
+ bias: torch.Tensor = None,
404
+ ignore_index: int = -100,
405
+ label_smoothing: float = 0.0,
406
+ logit_scale: float = 1.0,
407
+ num_chunks: int = 8,
408
+ reduction: str = "mean"
409
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
410
+ """
411
+ Args:
412
+ x (torch.Tensor): [batch_size * seq_len, hidden_size]
413
+ target (torch.LongTensor): [batch_size * seq_len]
414
+ where each value is in [0, vocab_size).
415
+ weight (torch.Tensor): [vocab_size, hidden_size]
416
+ where `vocab_size` is the number of classes.
417
+ bias (Optional[torch.Tensor]): [vocab_size]
418
+ where `vocab_size` is the number of classes.
419
+ ignore_index: int.
420
+ If target == ignore_index, the loss is set to 0.0.
421
+ label_smoothing: float
422
+ logit_scale: float
423
+ A scaling factor applied to the logits. Default: 1.0
424
+ num_chunks: int
425
+ The number of chunks to split the input tensor into for processing.
426
+ This can help optimize memory usage and computation speed.
427
+ Default: 8
428
+ reduction:
429
+ Specifies the reduction to apply to the output: 'mean' | 'sum'.
430
+ 'mean': the weighted mean of the output is taken,
431
+ 'sum': the output will be summed.
432
+ Default: 'mean'.
433
+ Returns:
434
+ losses: [batch,], float
435
+ """
436
+ return FusedLinearCrossEntropyFunction.apply(
437
+ x,
438
+ target,
439
+ weight,
440
+ bias,
441
+ ignore_index,
442
+ label_smoothing,
443
+ logit_scale,
444
+ num_chunks,
445
+ reduction
446
+ )
447
+
448
+
449
+ class FusedLinearCrossEntropyLoss(nn.Module):
450
+
451
+ def __init__(
452
+ self,
453
+ ignore_index: int = -100,
454
+ label_smoothing: float = 0.0,
455
+ logit_scale: float = 1.0,
456
+ num_chunks: int = 8,
457
+ reduction: str = "mean"
458
+ ):
459
+ """
460
+ Args:
461
+ ignore_index: int.
462
+ If target == ignore_index, the loss is set to 0.0.
463
+ label_smoothing: float
464
+ logit_scale: float
465
+ A scaling factor applied to the logits. Default: 1.0
466
+ num_chunks: int
467
+ The number of chunks to split the input tensor into for processing.
468
+ This can help optimize memory usage and computation speed.
469
+ Default: 8
470
+ reduction:
471
+ Specifies the reduction to apply to the output: 'mean' | 'sum'.
472
+ 'mean': the weighted mean of the output is taken,
473
+ 'sum': the output will be summed.
474
+ Default: 'mean'.
475
+ """
476
+ super().__init__()
477
+
478
+ assert reduction in ["mean", "sum"], f"reduction: {reduction} is not supported"
479
+
480
+ self.ignore_index = ignore_index
481
+ self.label_smoothing = label_smoothing
482
+ self.logit_scale = logit_scale
483
+ self.num_chunks = num_chunks
484
+ self.reduction = reduction
485
+
486
+ @torch.compiler.disable
487
+ def forward(
488
+ self,
489
+ x: torch.Tensor,
490
+ target: torch.LongTensor,
491
+ weight: torch.Tensor,
492
+ bias: Optional[torch.Tensor] = None
493
+ ):
494
+ """
495
+ Args:
496
+ x (torch.Tensor): [batch_size, seq_len, hidden_size]
497
+ target (torch.LongTensor): [batch_size, seq_len]
498
+ where each value is in [0, V).
499
+ weight (torch.Tensor): [vocab_size, hidden_size]
500
+ where `vocab_size` is the number of classes.
501
+ bias (Optional[torch.Tensor]): [vocab_size]
502
+ where `vocab_size` is the number of classes.
503
+ Returns:
504
+ loss
505
+ """
506
+ loss = fused_linear_cross_entropy_loss(
507
+ x.view(-1, x.shape[-1]),
508
+ target.view(-1),
509
+ weight=weight,
510
+ bias=bias,
511
+ ignore_index=self.ignore_index,
512
+ label_smoothing=self.label_smoothing,
513
+ logit_scale=self.logit_scale,
514
+ num_chunks=self.num_chunks,
515
+ reduction=self.reduction
516
+ )
517
+ return loss
518
+
519
+
520
+ class LinearLossParallel(ParallelStyle):
521
+ def __init__(
522
+ self,
523
+ *,
524
+ sequence_dim: int = 1,
525
+ use_local_output: bool = False,
526
+ ):
527
+ super().__init__()
528
+
529
+ self.sequence_sharding = (Shard(sequence_dim),)
530
+ self.use_local_output = use_local_output
531
+
532
+ @staticmethod
533
+ def _prepare_input_fn(sequence_sharding, mod, inputs, device_mesh):
534
+ x, target, weight, bias = inputs
535
+
536
+ if not isinstance(x, DTensor):
537
+ # assume the input passed in already sharded on the sequence dim and create the DTensor
538
+ x = DTensor.from_local(x, device_mesh, sequence_sharding)
539
+ if x.placements != sequence_sharding:
540
+ x = x.redistribute(placements=sequence_sharding, async_op=True)
541
+ if not isinstance(target, DTensor):
542
+ target = DTensor.from_local(target, device_mesh, [Replicate()])
543
+ if target.placements != sequence_sharding:
544
+ target = target.redistribute(placements=sequence_sharding, async_op=True)
545
+
546
+ if not isinstance(weight, DTensor):
547
+ weight = DTensor.from_local(weight, device_mesh, [Replicate()])
548
+ if weight.placements != [Replicate()]:
549
+ # we replicate the weight/bias in FLCE
550
+ weight = weight.redistribute(placements=[Replicate()], async_op=True)
551
+
552
+ if bias is not None and not isinstance(bias, DTensor):
553
+ bias = DTensor.from_local(bias, device_mesh, [Replicate()])
554
+ if bias is not None and bias.placements != [Replicate()]:
555
+ bias = bias.redistribute(placements=[Replicate()], async_op=True)
556
+
557
+ return x.to_local(), target.to_local(), weight.to_local(), bias.to_local() if bias is not None else bias
558
+
559
+ @staticmethod
560
+ def _prepare_output_fn(use_local_output, mod, outputs, device_mesh):
561
+ return outputs.to_local() if use_local_output else outputs
562
+
563
+ def _apply(self, module: nn.Module, device_mesh: DeviceMesh) -> nn.Module:
564
+ return distribute_module(
565
+ module,
566
+ device_mesh,
567
+ partition_fn=None,
568
+ input_fn=partial(self._prepare_input_fn, self.sequence_sharding),
569
+ output_fn=partial(self._prepare_output_fn, self.use_local_output)
570
+ )
fla/modules/fused_norm_gate.py ADDED
@@ -0,0 +1,995 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright (c) 2023-2025, Songlin Yang, Yu Zhang
3
+
4
+ from __future__ import annotations
5
+
6
+ import math
7
+ from typing import Optional
8
+
9
+ import torch
10
+ import torch.nn as nn
11
+ import torch.nn.functional as F
12
+ import triton
13
+ import triton.language as tl
14
+
15
+ from fla.utils import get_multiprocessor_count, input_guard
16
+
17
+
18
+ @triton.autotune(
19
+ configs=[
20
+ triton.Config({}, num_warps=num_warps, num_stages=num_stages)
21
+ for num_warps in [1, 2, 4, 8, 16, 32]
22
+ for num_stages in [2, 3, 4]
23
+ ],
24
+ key=['N', 'HAS_RESIDUAL', 'STORE_RESIDUAL_OUT', 'IS_RMS_NORM', 'HAS_BIAS'],
25
+ )
26
+ @triton.jit
27
+ def layer_norm_gated_fwd_kernel(
28
+ X, # pointer to the input
29
+ G, # pointer to the gate
30
+ Y, # pointer to the output
31
+ W, # pointer to the weights
32
+ B, # pointer to the biases
33
+ RESIDUAL, # pointer to the residual
34
+ RESIDUAL_OUT, # pointer to the residual
35
+ Mean, # pointer to the mean
36
+ Rstd, # pointer to the 1/std
37
+ N, # number of columns in X
38
+ eps, # epsilon to avoid division by zero
39
+ ACTIVATION: tl.constexpr,
40
+ IS_RMS_NORM: tl.constexpr,
41
+ BLOCK_N: tl.constexpr,
42
+ HAS_RESIDUAL: tl.constexpr,
43
+ STORE_RESIDUAL_OUT: tl.constexpr,
44
+ HAS_WEIGHT: tl.constexpr,
45
+ HAS_BIAS: tl.constexpr
46
+ ):
47
+ # Map the program id to the row of X and Y it should compute.
48
+ row = tl.program_id(0)
49
+ X += row * N
50
+ Y += row * N
51
+ G += row * N
52
+ if HAS_RESIDUAL:
53
+ RESIDUAL += row * N
54
+ if STORE_RESIDUAL_OUT:
55
+ RESIDUAL_OUT += row * N
56
+ # Compute mean and variance
57
+ cols = tl.arange(0, BLOCK_N)
58
+ x = tl.load(X + cols, mask=cols < N, other=0.0).to(tl.float32)
59
+ if HAS_RESIDUAL:
60
+ residual = tl.load(RESIDUAL + cols, mask=cols < N, other=0.0).to(tl.float32)
61
+ x += residual
62
+ if STORE_RESIDUAL_OUT:
63
+ tl.store(RESIDUAL_OUT + cols, x, mask=cols < N)
64
+ if not IS_RMS_NORM:
65
+ mean = tl.sum(x, axis=0) / N
66
+ tl.store(Mean + row, mean)
67
+ xbar = tl.where(cols < N, x - mean, 0.0)
68
+ var = tl.sum(xbar * xbar, axis=0) / N
69
+ else:
70
+ xbar = tl.where(cols < N, x, 0.0)
71
+ var = tl.sum(xbar * xbar, axis=0) / N
72
+ rstd = 1 / tl.sqrt(var + eps)
73
+ tl.store(Rstd + row, rstd)
74
+ # Normalize and apply linear transformation
75
+ mask = cols < N
76
+ if HAS_WEIGHT:
77
+ w = tl.load(W + cols, mask=mask).to(tl.float32)
78
+ if HAS_BIAS:
79
+ b = tl.load(B + cols, mask=mask).to(tl.float32)
80
+ x_hat = (x - mean) * rstd if not IS_RMS_NORM else x * rstd
81
+ y = x_hat * w if HAS_WEIGHT else x_hat
82
+ if HAS_BIAS:
83
+ y = y + b
84
+
85
+ # Swish output gate
86
+ g = tl.load(G + cols, mask=cols < N, other=0.0).to(tl.float32)
87
+ if ACTIVATION == 'swish':
88
+ y = y * g * tl.sigmoid(g)
89
+ elif ACTIVATION == 'silu':
90
+ y = y * g * tl.sigmoid(g)
91
+ elif ACTIVATION == 'sigmoid':
92
+ y = y * tl.sigmoid(g)
93
+
94
+ # Write output
95
+ tl.store(Y + cols, y, mask=mask)
96
+
97
+
98
+ def layer_norm_gated_fwd(
99
+ x: torch.Tensor,
100
+ g: torch.Tensor,
101
+ weight: torch.Tensor,
102
+ bias: torch.Tensor,
103
+ activation: str = 'swish',
104
+ eps: float = 1e-5,
105
+ residual: torch.Tensor = None,
106
+ out_dtype: torch.dtype = None,
107
+ residual_dtype: torch.dtype = None,
108
+ is_rms_norm: bool = False
109
+ ):
110
+ if residual is not None:
111
+ residual_dtype = residual.dtype
112
+ M, N = x.shape
113
+ if residual is not None:
114
+ assert residual.shape == (M, N)
115
+ if weight is not None:
116
+ assert weight.shape == (N,)
117
+ if bias is not None:
118
+ assert bias.shape == (N,)
119
+ # allocate output
120
+ y = torch.empty_like(x, dtype=x.dtype if out_dtype is None else out_dtype)
121
+ if residual is not None or (residual_dtype is not None and residual_dtype != x.dtype):
122
+ residual_out = torch.empty(M, N, device=x.device, dtype=residual_dtype)
123
+ else:
124
+ residual_out = None
125
+ mean = torch.empty((M,), dtype=torch.float, device=x.device) if not is_rms_norm else None
126
+ rstd = torch.empty((M,), dtype=torch.float, device=x.device)
127
+ # Less than 64KB per feature: enqueue fused kernel
128
+ MAX_FUSED_SIZE = 65536 // x.element_size()
129
+ BLOCK_N = min(MAX_FUSED_SIZE, triton.next_power_of_2(N))
130
+ if N > BLOCK_N:
131
+ raise RuntimeError("This layer norm doesn't support feature dim >= 64KB.")
132
+ # heuristics for number of warps
133
+
134
+ layer_norm_gated_fwd_kernel[(M,)](
135
+ x,
136
+ g,
137
+ y,
138
+ weight,
139
+ bias,
140
+ residual,
141
+ residual_out,
142
+ mean,
143
+ rstd,
144
+ N,
145
+ eps,
146
+ ACTIVATION=activation,
147
+ IS_RMS_NORM=is_rms_norm,
148
+ BLOCK_N=BLOCK_N,
149
+ HAS_RESIDUAL=residual is not None,
150
+ STORE_RESIDUAL_OUT=residual_out is not None,
151
+ HAS_WEIGHT=weight is not None,
152
+ HAS_BIAS=bias is not None,
153
+ )
154
+ # residual_out is None if residual is None and residual_dtype == input_dtype
155
+ return y, mean, rstd, residual_out if residual_out is not None else x
156
+
157
+
158
+ @triton.heuristics({
159
+ 'RECOMPUTE_OUTPUT': lambda args: args["Y"] is not None
160
+ })
161
+ @triton.autotune(
162
+ configs=[
163
+ triton.Config({}, num_warps=num_warps, num_stages=num_stages)
164
+ for num_warps in [1, 2, 4, 8, 16, 32]
165
+ for num_stages in [2, 3, 4]
166
+ ],
167
+ key=['N', 'HAS_DRESIDUAL', 'STORE_DRESIDUAL', 'IS_RMS_NORM', 'HAS_BIAS'],
168
+ )
169
+ @triton.jit
170
+ def layer_norm_gated_bwd_kernel(
171
+ X, # pointer to the input
172
+ G, # pointer to the gate
173
+ W, # pointer to the weights
174
+ B, # pointer to the biases
175
+ Y, # pointer to the output to be recomputed
176
+ DY, # pointer to the output gradient
177
+ DX, # pointer to the input gradient
178
+ DG, # pointer to the gate gradient
179
+ DW, # pointer to the partial sum of weights gradient
180
+ DB, # pointer to the partial sum of biases gradient
181
+ DRESIDUAL,
182
+ DRESIDUAL_IN,
183
+ Mean, # pointer to the mean
184
+ Rstd, # pointer to the 1/std
185
+ M, # number of rows in X
186
+ N, # number of columns in X
187
+ eps, # epsilon to avoid division by zero
188
+ rows_per_program,
189
+ ACTIVATION: tl.constexpr,
190
+ IS_RMS_NORM: tl.constexpr,
191
+ BLOCK_N: tl.constexpr,
192
+ HAS_DRESIDUAL: tl.constexpr,
193
+ STORE_DRESIDUAL: tl.constexpr,
194
+ HAS_WEIGHT: tl.constexpr,
195
+ HAS_BIAS: tl.constexpr,
196
+ RECOMPUTE_OUTPUT: tl.constexpr,
197
+ ):
198
+ # Map the program id to the elements of X, DX, and DY it should compute.
199
+ row_block_id = tl.program_id(0)
200
+ row_start = row_block_id * rows_per_program
201
+ cols = tl.arange(0, BLOCK_N)
202
+ mask = cols < N
203
+ X += row_start * N
204
+ G += row_start * N
205
+ if HAS_DRESIDUAL:
206
+ DRESIDUAL += row_start * N
207
+ if STORE_DRESIDUAL:
208
+ DRESIDUAL_IN += row_start * N
209
+ DY += row_start * N
210
+ DX += row_start * N
211
+ DG += row_start * N
212
+ if RECOMPUTE_OUTPUT:
213
+ Y += row_start * N
214
+ if HAS_WEIGHT:
215
+ w = tl.load(W + cols, mask=mask).to(tl.float32)
216
+ dw = tl.zeros((BLOCK_N,), dtype=tl.float32)
217
+ if HAS_BIAS:
218
+ b = tl.load(B + cols, mask=mask, other=0.0).to(tl.float32)
219
+ if HAS_BIAS:
220
+ db = tl.zeros((BLOCK_N,), dtype=tl.float32)
221
+
222
+ row_end = min((row_block_id + 1) * rows_per_program, M)
223
+ for row in range(row_start, row_end):
224
+ # Load data to SRAM
225
+ x = tl.load(X + cols, mask=mask, other=0).to(tl.float32)
226
+ g = tl.load(G + cols, mask=mask, other=0).to(tl.float32)
227
+ dy = tl.load(DY + cols, mask=mask, other=0).to(tl.float32)
228
+
229
+ if not IS_RMS_NORM:
230
+ mean = tl.load(Mean + row)
231
+ rstd = tl.load(Rstd + row)
232
+ # Compute dx
233
+ xhat = (x - mean) * rstd if not IS_RMS_NORM else x * rstd
234
+ xhat = tl.where(mask, xhat, 0.0)
235
+
236
+ y = xhat * w if HAS_WEIGHT else xhat
237
+ if HAS_BIAS:
238
+ y = y + b
239
+ if RECOMPUTE_OUTPUT:
240
+ tl.store(Y + cols, y, mask=mask)
241
+
242
+ sigmoid_g = tl.sigmoid(g)
243
+ if ACTIVATION == 'swish':
244
+ dg = dy * y * (sigmoid_g + g * sigmoid_g * (1 - sigmoid_g))
245
+ dy = dy * g * sigmoid_g
246
+ elif ACTIVATION == 'silu':
247
+ dg = dy * y * (sigmoid_g + g * sigmoid_g * (1 - sigmoid_g))
248
+ dy = dy * g * sigmoid_g
249
+ elif ACTIVATION == 'sigmoid':
250
+ dg = dy * y * sigmoid_g * (1 - sigmoid_g)
251
+ dy = dy * sigmoid_g
252
+ wdy = dy
253
+ if HAS_WEIGHT:
254
+ wdy = dy * w
255
+ dw += dy * xhat
256
+ if HAS_BIAS:
257
+ db += dy
258
+ if not IS_RMS_NORM:
259
+ c1 = tl.sum(xhat * wdy, axis=0) / N
260
+ c2 = tl.sum(wdy, axis=0) / N
261
+ dx = (wdy - (xhat * c1 + c2)) * rstd
262
+ else:
263
+ c1 = tl.sum(xhat * wdy, axis=0) / N
264
+ dx = (wdy - xhat * c1) * rstd
265
+ if HAS_DRESIDUAL:
266
+ dres = tl.load(DRESIDUAL + cols, mask=mask, other=0).to(tl.float32)
267
+ dx += dres
268
+ # Write dx
269
+ if STORE_DRESIDUAL:
270
+ tl.store(DRESIDUAL_IN + cols, dx, mask=mask)
271
+ tl.store(DX + cols, dx, mask=mask)
272
+ tl.store(DG + cols, dg, mask=mask)
273
+
274
+ X += N
275
+ G += N
276
+ if HAS_DRESIDUAL:
277
+ DRESIDUAL += N
278
+ if STORE_DRESIDUAL:
279
+ DRESIDUAL_IN += N
280
+ if RECOMPUTE_OUTPUT:
281
+ Y += N
282
+ DY += N
283
+ DX += N
284
+ DG += N
285
+ if HAS_WEIGHT:
286
+ tl.store(DW + row_block_id * N + cols, dw, mask=mask)
287
+ if HAS_BIAS:
288
+ tl.store(DB + row_block_id * N + cols, db, mask=mask)
289
+
290
+
291
+ def layer_norm_gated_bwd(
292
+ dy: torch.Tensor,
293
+ x: torch.Tensor,
294
+ g: torch.Tensor,
295
+ weight: torch.Tensor,
296
+ bias: torch.Tensor,
297
+ activation: str = 'swish',
298
+ eps: float = 1e-5,
299
+ mean: torch.Tensor = None,
300
+ rstd: torch.Tensor = None,
301
+ dresidual: torch.Tensor = None,
302
+ has_residual: bool = False,
303
+ is_rms_norm: bool = False,
304
+ x_dtype: torch.dtype = None,
305
+ recompute_output: bool = False,
306
+ ):
307
+ M, N = x.shape
308
+ assert dy.shape == (M, N)
309
+ if dresidual is not None:
310
+ assert dresidual.shape == (M, N)
311
+ if weight is not None:
312
+ assert weight.shape == (N,)
313
+ if bias is not None:
314
+ assert bias.shape == (N,)
315
+ # allocate output
316
+ dx = torch.empty_like(x) if x_dtype is None else torch.empty(M, N, dtype=x_dtype, device=x.device)
317
+ dg = torch.empty_like(g) if x_dtype is None else torch.empty(M, N, dtype=x_dtype, device=x.device)
318
+ dresidual_in = torch.empty_like(x) if has_residual and dx.dtype != x.dtype else None
319
+ y = torch.empty(M, N, dtype=dy.dtype, device=dy.device) if recompute_output else None
320
+
321
+ # Less than 64KB per feature: enqueue fused kernel
322
+ MAX_FUSED_SIZE = 65536 // x.element_size()
323
+ BLOCK_N = min(MAX_FUSED_SIZE, triton.next_power_of_2(N))
324
+ if N > BLOCK_N:
325
+ raise RuntimeError("This layer norm doesn't support feature dim >= 64KB.")
326
+ sm_count = get_multiprocessor_count(x.device.index)
327
+ dw = torch.empty((sm_count, N), dtype=torch.float, device=weight.device) if weight is not None else None
328
+ db = torch.empty((sm_count, N), dtype=torch.float, device=bias.device) if bias is not None else None
329
+ rows_per_program = math.ceil(M / sm_count)
330
+ grid = (sm_count,)
331
+ layer_norm_gated_bwd_kernel[grid](
332
+ x,
333
+ g,
334
+ weight,
335
+ bias,
336
+ y,
337
+ dy,
338
+ dx,
339
+ dg,
340
+ dw,
341
+ db,
342
+ dresidual,
343
+ dresidual_in,
344
+ mean,
345
+ rstd,
346
+ M,
347
+ N,
348
+ eps,
349
+ rows_per_program,
350
+ ACTIVATION=activation,
351
+ IS_RMS_NORM=is_rms_norm,
352
+ BLOCK_N=BLOCK_N,
353
+ HAS_DRESIDUAL=dresidual is not None,
354
+ STORE_DRESIDUAL=dresidual_in is not None,
355
+ HAS_WEIGHT=weight is not None,
356
+ HAS_BIAS=bias is not None,
357
+ )
358
+ dw = dw.sum(0).to(weight.dtype) if weight is not None else None
359
+ db = db.sum(0).to(bias.dtype) if bias is not None else None
360
+ # Don't need to compute dresidual_in separately in this case
361
+ if has_residual and dx.dtype == x.dtype:
362
+ dresidual_in = dx
363
+ return (dx, dg, dw, db, dresidual_in) if not recompute_output else (dx, dg, dw, db, dresidual_in, y)
364
+
365
+
366
+ class LayerNormGatedFunction(torch.autograd.Function):
367
+
368
+ @staticmethod
369
+ @input_guard
370
+ def forward(
371
+ ctx,
372
+ x: torch.Tensor,
373
+ g: torch.Tensor,
374
+ weight: torch.Tensor,
375
+ bias: torch.Tensor,
376
+ activation: str,
377
+ residual: Optional[torch.Tensor] = None,
378
+ eps: float = 1e-6,
379
+ prenorm: bool = False,
380
+ residual_in_fp32: bool = False,
381
+ is_rms_norm: bool = False,
382
+ ):
383
+ x_shape_og = x.shape
384
+ g_shape_og = g.shape
385
+ # reshape input data into 2D tensor
386
+ x = x.reshape(-1, x.shape[-1])
387
+ g = g.reshape(-1, g.shape[-1])
388
+ if residual is not None:
389
+ assert residual.shape == x_shape_og
390
+ residual = residual.reshape(-1, residual.shape[-1])
391
+ residual_dtype = (
392
+ residual.dtype
393
+ if residual is not None
394
+ else (torch.float if residual_in_fp32 else None)
395
+ )
396
+ y, mean, rstd, residual_out = layer_norm_gated_fwd(
397
+ x=x,
398
+ g=g,
399
+ weight=weight,
400
+ bias=bias,
401
+ activation=activation,
402
+ eps=eps,
403
+ residual=residual,
404
+ residual_dtype=residual_dtype,
405
+ is_rms_norm=is_rms_norm
406
+ )
407
+ ctx.save_for_backward(residual_out, g, weight, bias, mean, rstd)
408
+ ctx.x_shape_og = x_shape_og
409
+ ctx.g_shape_og = g_shape_og
410
+ ctx.activation = activation
411
+ ctx.eps = eps
412
+ ctx.is_rms_norm = is_rms_norm
413
+ ctx.has_residual = residual is not None
414
+ ctx.prenorm = prenorm
415
+ ctx.x_dtype = x.dtype
416
+ y = y.reshape(x_shape_og)
417
+ return y if not prenorm else (y, residual_out.reshape(x_shape_og))
418
+
419
+ @staticmethod
420
+ @input_guard
421
+ def backward(ctx, dy, *args):
422
+ x, g, weight, bias, mean, rstd = ctx.saved_tensors
423
+ dy = dy.reshape(-1, dy.shape[-1])
424
+ assert dy.shape == x.shape
425
+ if ctx.prenorm:
426
+ dresidual = args[0]
427
+ dresidual = dresidual.reshape(-1, dresidual.shape[-1])
428
+ assert dresidual.shape == x.shape
429
+ else:
430
+ dresidual = None
431
+ dx, dg, dw, db, dresidual_in = layer_norm_gated_bwd(
432
+ dy=dy,
433
+ x=x,
434
+ g=g,
435
+ weight=weight,
436
+ bias=bias,
437
+ activation=ctx.activation,
438
+ eps=ctx.eps,
439
+ mean=mean,
440
+ rstd=rstd,
441
+ dresidual=dresidual,
442
+ has_residual=ctx.has_residual,
443
+ is_rms_norm=ctx.is_rms_norm,
444
+ x_dtype=ctx.x_dtype,
445
+ )
446
+ return (
447
+ dx.reshape(ctx.x_shape_og),
448
+ dg.reshape(ctx.g_shape_og),
449
+ dw,
450
+ db,
451
+ None,
452
+ dresidual_in.reshape(ctx.x_shape_og) if ctx.has_residual else None,
453
+ None,
454
+ None,
455
+ None,
456
+ None,
457
+ )
458
+
459
+
460
+ class LayerNormGatedLinearFunction(torch.autograd.Function):
461
+
462
+ @staticmethod
463
+ @input_guard
464
+ def forward(
465
+ ctx,
466
+ x: torch.Tensor,
467
+ g: torch.Tensor,
468
+ norm_weight: torch.Tensor,
469
+ norm_bias: torch.Tensor,
470
+ linear_weight: torch.Tensor,
471
+ linear_bias: torch.Tensor,
472
+ residual: Optional[torch.Tensor] = None,
473
+ eps: float = 1e-6,
474
+ prenorm: bool = False,
475
+ residual_in_fp32: bool = False,
476
+ is_rms_norm: bool = False,
477
+ ):
478
+ x_shape_og = x.shape
479
+ g_shape_og = g.shape
480
+ # reshape input data into 2D tensor
481
+ x = x.reshape(-1, x.shape[-1])
482
+ g = g.reshape(-1, g.shape[-1])
483
+ if residual is not None:
484
+ assert residual.shape == x_shape_og
485
+ residual = residual.reshape(-1, residual.shape[-1])
486
+ residual_dtype = (
487
+ residual.dtype
488
+ if residual is not None
489
+ else (torch.float if residual_in_fp32 else None)
490
+ )
491
+ y, mean, rstd, residual_out = layer_norm_gated_fwd(
492
+ x=x,
493
+ g=g,
494
+ weight=norm_weight,
495
+ bias=norm_bias,
496
+ eps=eps,
497
+ residual=residual,
498
+ residual_dtype=residual_dtype,
499
+ is_rms_norm=is_rms_norm
500
+ )
501
+ y = y.reshape(x_shape_og)
502
+ dtype = torch.get_autocast_gpu_dtype() if torch.is_autocast_enabled() else y.dtype
503
+ linear_weight = linear_weight.to(dtype)
504
+ linear_bias = linear_bias.to(dtype) if linear_bias is not None else None
505
+ out = F.linear(y.to(linear_weight.dtype), linear_weight, linear_bias)
506
+ # We don't store y, will be recomputed in the backward pass to save memory
507
+ ctx.save_for_backward(residual_out, g, norm_weight, norm_bias, linear_weight, mean, rstd)
508
+ ctx.x_shape_og = x_shape_og
509
+ ctx.g_shape_og = g_shape_og
510
+ ctx.eps = eps
511
+ ctx.is_rms_norm = is_rms_norm
512
+ ctx.has_residual = residual is not None
513
+ ctx.prenorm = prenorm
514
+ ctx.x_dtype = x.dtype
515
+ ctx.linear_bias_is_none = linear_bias is None
516
+ return out if not prenorm else (out, residual_out.reshape(x_shape_og))
517
+
518
+ @staticmethod
519
+ @input_guard
520
+ def backward(ctx, dout, *args):
521
+ x, g, norm_weight, norm_bias, linear_weight, mean, rstd = ctx.saved_tensors
522
+ dout = dout.reshape(-1, dout.shape[-1])
523
+ dy = F.linear(dout, linear_weight.t())
524
+ dlinear_bias = None if ctx.linear_bias_is_none else dout.sum(0)
525
+ assert dy.shape == x.shape
526
+ if ctx.prenorm:
527
+ dresidual = args[0]
528
+ dresidual = dresidual.reshape(-1, dresidual.shape[-1])
529
+ assert dresidual.shape == x.shape
530
+ else:
531
+ dresidual = None
532
+ dx, dg, dnorm_weight, dnorm_bias, dresidual_in, y = layer_norm_gated_bwd(
533
+ dy=dy,
534
+ x=x,
535
+ g=g,
536
+ norm_weight=norm_weight,
537
+ norm_bias=norm_bias,
538
+ eps=ctx.eps,
539
+ mean=mean,
540
+ rstd=rstd,
541
+ dresidual=dresidual,
542
+ has_residual=ctx.has_residual,
543
+ is_rms_norm=ctx.is_rms_norm,
544
+ x_dtype=ctx.x_dtype,
545
+ recompute_output=True,
546
+ )
547
+ dlinear_weight = torch.einsum("bo,bi->oi", dout, y)
548
+ return (
549
+ dx.reshape(ctx.x_shape_og),
550
+ dg.reshape(ctx.g_shape_og),
551
+ dnorm_weight,
552
+ dnorm_bias,
553
+ dlinear_weight,
554
+ dlinear_bias,
555
+ dresidual_in.reshape(ctx.x_shape_og) if ctx.has_residual else None,
556
+ None,
557
+ None,
558
+ None,
559
+ None,
560
+ )
561
+
562
+
563
+ def layer_norm_gated(
564
+ x: torch.Tensor,
565
+ g: torch.Tensor,
566
+ weight: torch.Tensor,
567
+ bias: torch.Tensor,
568
+ activation: str = 'swish',
569
+ residual: Optional[torch.Tensor] = None,
570
+ prenorm: bool = False,
571
+ residual_in_fp32: bool = False,
572
+ eps: float = 1e-6
573
+ ):
574
+ return LayerNormGatedFunction.apply(
575
+ x,
576
+ g,
577
+ weight,
578
+ bias,
579
+ activation,
580
+ residual,
581
+ eps,
582
+ prenorm,
583
+ residual_in_fp32,
584
+ False
585
+ )
586
+
587
+
588
+ def rms_norm_gated(
589
+ x: torch.Tensor,
590
+ g: torch.Tensor,
591
+ weight: torch.Tensor,
592
+ bias: torch.Tensor,
593
+ activation: str = 'swish',
594
+ residual: Optional[torch.Tensor] = None,
595
+ prenorm: bool = False,
596
+ residual_in_fp32: bool = False,
597
+ eps: float = 1e-6
598
+ ):
599
+ return LayerNormGatedFunction.apply(
600
+ x,
601
+ g,
602
+ weight,
603
+ bias,
604
+ activation,
605
+ residual,
606
+ eps,
607
+ prenorm,
608
+ residual_in_fp32,
609
+ True
610
+ )
611
+
612
+
613
+ def layer_norm_swish_gate_linear(
614
+ x: torch.Tensor,
615
+ g: torch.Tensor,
616
+ norm_weight: torch.Tensor,
617
+ norm_bias: torch.Tensor,
618
+ linear_weight: torch.Tensor,
619
+ linear_bias: torch.Tensor,
620
+ residual: Optional[torch.Tensor] = None,
621
+ prenorm: bool = False,
622
+ residual_in_fp32: bool = False,
623
+ eps: float = 1e-6
624
+ ):
625
+ return LayerNormGatedLinearFunction.apply(
626
+ x,
627
+ g,
628
+ norm_weight,
629
+ norm_bias,
630
+ linear_weight,
631
+ linear_bias,
632
+ residual,
633
+ eps,
634
+ prenorm,
635
+ residual_in_fp32,
636
+ False
637
+ )
638
+
639
+
640
+ def rms_norm_swish_gate_linear(
641
+ x,
642
+ g: torch.Tensor,
643
+ norm_weight: torch.Tensor,
644
+ norm_bias: torch.Tensor,
645
+ linear_weight: torch.Tensor,
646
+ linear_bias: torch.Tensor,
647
+ residual: Optional[torch.Tensor] = None,
648
+ prenorm: bool = False,
649
+ residual_in_fp32: bool = False,
650
+ eps: float = 1e-6
651
+ ):
652
+ return LayerNormGatedLinearFunction.apply(
653
+ x,
654
+ g,
655
+ norm_weight,
656
+ norm_bias,
657
+ linear_weight,
658
+ linear_bias,
659
+ residual,
660
+ eps,
661
+ prenorm,
662
+ residual_in_fp32,
663
+ True
664
+ )
665
+
666
+
667
+ class FusedLayerNormGated(nn.Module):
668
+
669
+ def __init__(
670
+ self,
671
+ hidden_size: int,
672
+ elementwise_affine: bool = True,
673
+ bias: bool = False,
674
+ activation: str = 'swish',
675
+ eps: float = 1e-5,
676
+ device: Optional[torch.device] = None,
677
+ dtype: Optional[torch.dtype] = None,
678
+ ) -> FusedLayerNormGated:
679
+ factory_kwargs = {"device": device, "dtype": dtype}
680
+ super().__init__()
681
+
682
+ self.hidden_size = hidden_size
683
+ self.elementwise_affine = elementwise_affine
684
+ self.eps = eps
685
+ self.activation = activation
686
+
687
+ if self.activation not in ['swish', 'silu', 'sigmoid']:
688
+ raise ValueError(f"Unsupported activation: {self.activation}")
689
+
690
+ self.register_parameter("weight", None)
691
+ self.register_parameter("bias", None)
692
+ if elementwise_affine:
693
+ self.weight = nn.Parameter(torch.empty(hidden_size, **factory_kwargs))
694
+ if bias:
695
+ self.bias = nn.Parameter(torch.empty(hidden_size, **factory_kwargs))
696
+
697
+ self.reset_parameters()
698
+
699
+ def reset_parameters(self):
700
+ if self.elementwise_affine:
701
+ nn.init.ones_(self.weight)
702
+ if self.bias is not None:
703
+ nn.init.zeros_(self.bias)
704
+
705
+ def __repr__(self) -> str:
706
+ s = f"{self.__class__.__name__}({self.hidden_size}"
707
+ if not self.elementwise_affine:
708
+ s += f", elementwise_affine={self.elementwise_affine}"
709
+ s += f", eps={self.eps}"
710
+ s += f", activation={self.activation}"
711
+ s += ")"
712
+ return s
713
+
714
+ def forward(
715
+ self,
716
+ x: torch.Tensor,
717
+ g: torch.Tensor,
718
+ residual: Optional[torch.Tensor] = None,
719
+ prenorm: bool = False,
720
+ residual_in_fp32: bool = False
721
+ ) -> torch.Tensor:
722
+ return layer_norm_gated(
723
+ x,
724
+ g,
725
+ self.weight,
726
+ self.bias,
727
+ self.activation,
728
+ residual=residual,
729
+ eps=self.eps,
730
+ prenorm=prenorm,
731
+ residual_in_fp32=residual_in_fp32
732
+ )
733
+
734
+
735
+ class FusedRMSNormGated(nn.Module):
736
+
737
+ def __init__(
738
+ self,
739
+ hidden_size: int,
740
+ elementwise_affine: bool = True,
741
+ eps: float = 1e-5,
742
+ activation: str = 'swish',
743
+ device: Optional[torch.device] = None,
744
+ dtype: Optional[torch.dtype] = None,
745
+ ) -> FusedRMSNormGated:
746
+ factory_kwargs = {"device": device, "dtype": dtype}
747
+ super().__init__()
748
+
749
+ self.hidden_size = hidden_size
750
+ self.elementwise_affine = elementwise_affine
751
+ self.eps = eps
752
+ self.activation = activation
753
+
754
+ if self.activation not in ['swish', 'silu', 'sigmoid']:
755
+ raise ValueError(f"Unsupported activation: {self.activation}")
756
+
757
+ if elementwise_affine:
758
+ self.weight = nn.Parameter(torch.empty(hidden_size, **factory_kwargs))
759
+ else:
760
+ self.register_parameter("weight", None)
761
+ self.register_parameter("bias", None)
762
+
763
+ self.reset_parameters()
764
+
765
+ def reset_parameters(self):
766
+ if self.elementwise_affine:
767
+ nn.init.ones_(self.weight)
768
+
769
+ def __repr__(self) -> str:
770
+ s = f"{self.__class__.__name__}({self.hidden_size}"
771
+ if not self.elementwise_affine:
772
+ s += f", elementwise_affine={self.elementwise_affine}"
773
+ s += f", eps={self.eps}"
774
+ s += f", activation={self.activation}"
775
+ s += ")"
776
+ return s
777
+
778
+ def forward(
779
+ self,
780
+ x: torch.Tensor,
781
+ g: torch.Tensor,
782
+ residual: Optional[torch.Tensor] = None,
783
+ prenorm: bool = False,
784
+ residual_in_fp32: bool = False
785
+ ) -> torch.Tensor:
786
+ return rms_norm_gated(
787
+ x,
788
+ g,
789
+ self.weight,
790
+ self.bias,
791
+ self.activation,
792
+ residual=residual,
793
+ eps=self.eps,
794
+ prenorm=prenorm,
795
+ residual_in_fp32=residual_in_fp32
796
+ )
797
+
798
+
799
+ class FusedLayerNormSwishGate(FusedLayerNormGated):
800
+
801
+ def __init__(
802
+ self,
803
+ hidden_size: int,
804
+ elementwise_affine: bool = True,
805
+ bias: bool = False,
806
+ eps: float = 1e-5,
807
+ device: Optional[torch.device] = None,
808
+ dtype: Optional[torch.dtype] = None,
809
+ ) -> FusedLayerNormSwishGate:
810
+ super().__init__(
811
+ hidden_size=hidden_size,
812
+ elementwise_affine=elementwise_affine,
813
+ bias=bias,
814
+ eps=eps,
815
+ device=device,
816
+ dtype=dtype
817
+ )
818
+
819
+
820
+ class FusedRMSNormSwishGate(FusedRMSNormGated):
821
+
822
+ def __init__(
823
+ self,
824
+ hidden_size: int,
825
+ elementwise_affine: bool = True,
826
+ eps: float = 1e-5,
827
+ device: Optional[torch.device] = None,
828
+ dtype: Optional[torch.dtype] = None,
829
+ ) -> FusedRMSNormSwishGate:
830
+ super().__init__(
831
+ hidden_size=hidden_size,
832
+ elementwise_affine=elementwise_affine,
833
+ eps=eps,
834
+ device=device,
835
+ dtype=dtype
836
+ )
837
+
838
+
839
+ class FusedLayerNormGatedLinear(nn.Module):
840
+
841
+ def __init__(
842
+ self,
843
+ hidden_size: int,
844
+ elementwise_affine: bool = True,
845
+ eps: float = 1e-5,
846
+ device: Optional[torch.device] = None,
847
+ dtype: Optional[torch.dtype] = None,
848
+ ) -> FusedLayerNormGatedLinear:
849
+ factory_kwargs = {"device": device, "dtype": dtype}
850
+ super().__init__()
851
+
852
+ self.hidden_size = hidden_size
853
+ self.elementwise_affine = elementwise_affine
854
+ self.eps = eps
855
+
856
+ if elementwise_affine:
857
+ self.weight = nn.Parameter(torch.empty(hidden_size, **factory_kwargs))
858
+ else:
859
+ self.register_parameter("weight", None)
860
+ self.register_parameter("bias", None)
861
+
862
+ self.reset_parameters()
863
+
864
+ def reset_parameters(self):
865
+ if self.elementwise_affine:
866
+ nn.init.ones_(self.weight)
867
+
868
+ def __repr__(self) -> str:
869
+ s = f"{self.__class__.__name__}({self.hidden_size}"
870
+ if not self.elementwise_affine:
871
+ s += f", elementwise_affine={self.elementwise_affine}"
872
+ s += f", eps={self.eps}"
873
+ s += ")"
874
+ return s
875
+
876
+ def forward(
877
+ self,
878
+ x: torch.Tensor,
879
+ g: torch.Tensor,
880
+ weight: Optional[torch.Tensor] = None,
881
+ bias: Optional[torch.Tensor] = None,
882
+ residual: Optional[torch.Tensor] = None,
883
+ prenorm: bool = False,
884
+ residual_in_fp32: bool = False
885
+ ) -> torch.Tensor:
886
+ return layer_norm_swish_gate_linear(
887
+ x,
888
+ g,
889
+ self.weight,
890
+ self.bias,
891
+ weight,
892
+ bias,
893
+ residual=residual,
894
+ eps=self.eps,
895
+ prenorm=prenorm,
896
+ residual_in_fp32=residual_in_fp32
897
+ )
898
+
899
+
900
+ class FusedLayerNormSwishGateLinear(FusedLayerNormGatedLinear):
901
+
902
+ def __init__(
903
+ self,
904
+ hidden_size: int,
905
+ elementwise_affine: bool = True,
906
+ eps: float = 1e-5,
907
+ device: Optional[torch.device] = None,
908
+ dtype: Optional[torch.dtype] = None,
909
+ ) -> FusedLayerNormSwishGateLinear:
910
+ super().__init__(
911
+ hidden_size=hidden_size,
912
+ elementwise_affine=elementwise_affine,
913
+ eps=eps,
914
+ device=device,
915
+ dtype=dtype
916
+ )
917
+
918
+
919
+ class FusedRMSNormGatedLinear(nn.Module):
920
+
921
+ def __init__(
922
+ self,
923
+ hidden_size,
924
+ elementwise_affine: bool = True,
925
+ eps: float = 1e-5,
926
+ device: Optional[torch.device] = None,
927
+ dtype: Optional[torch.dtype] = None,
928
+ ) -> FusedRMSNormGatedLinear:
929
+ factory_kwargs = {"device": device, "dtype": dtype}
930
+ super().__init__()
931
+
932
+ self.hidden_size = hidden_size
933
+ self.elementwise_affine = elementwise_affine
934
+ self.eps = eps
935
+
936
+ self.register_parameter("weight", None)
937
+ self.register_parameter("bias", None)
938
+ if elementwise_affine:
939
+ self.weight = nn.Parameter(torch.empty(hidden_size, **factory_kwargs))
940
+
941
+ self.reset_parameters()
942
+
943
+ def reset_parameters(self):
944
+ if self.elementwise_affine:
945
+ nn.init.ones_(self.weight)
946
+
947
+ def __repr__(self) -> str:
948
+ s = f"{self.__class__.__name__}({self.hidden_size}"
949
+ if not self.elementwise_affine:
950
+ s += f", elementwise_affine={self.elementwise_affine}"
951
+ s += f", eps={self.eps}"
952
+ s += ")"
953
+ return s
954
+
955
+ def forward(
956
+ self,
957
+ x: torch.Tensor,
958
+ g: torch.Tensor,
959
+ weight: Optional[torch.Tensor] = None,
960
+ bias: Optional[torch.Tensor] = None,
961
+ residual: Optional[torch.Tensor] = None,
962
+ prenorm: bool = False,
963
+ residual_in_fp32: bool = False
964
+ ) -> torch.Tensor:
965
+ return rms_norm_swish_gate_linear(
966
+ x,
967
+ g,
968
+ self.weight,
969
+ self.bias,
970
+ weight,
971
+ bias,
972
+ residual=residual,
973
+ eps=self.eps,
974
+ prenorm=prenorm,
975
+ residual_in_fp32=residual_in_fp32
976
+ )
977
+
978
+
979
+ class FusedRMSNormSwishGateLinear(FusedRMSNormGatedLinear):
980
+
981
+ def __init__(
982
+ self,
983
+ hidden_size: int,
984
+ elementwise_affine: bool = True,
985
+ eps: float = 1e-5,
986
+ device: Optional[torch.device] = None,
987
+ dtype: Optional[torch.dtype] = None,
988
+ ) -> FusedRMSNormSwishGateLinear:
989
+ super().__init__(
990
+ hidden_size=hidden_size,
991
+ elementwise_affine=elementwise_affine,
992
+ eps=eps,
993
+ device=device,
994
+ dtype=dtype
995
+ )
fla/modules/grpo.py ADDED
@@ -0,0 +1,396 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+
3
+ # https://github.com/huggingface/trl/blob/main/trl/trainer/grpo_trainer.py
4
+ """
5
+ # Get the per-token log probabilities for the completions for the model and the reference model
6
+ def _get_per_token_logps(self, model, input_ids, attention_mask, logits_to_keep):
7
+ # We add 1 to `logits_to_keep` because the last logits of the sequence is later excluded
8
+ logits = model(input_ids=input_ids, attention_mask=attention_mask, logits_to_keep=logits_to_keep + 1).logits
9
+ logits = logits[:, :-1, :] # (B, L-1, V), exclude the last logit: it corresponds to the next token pred
10
+
11
+ input_ids = input_ids[:, -logits_to_keep:]
12
+ # For transformers<=4.48, logits_to_keep argument isn't supported, so here we drop logits ourselves.
13
+ # See https://github.com/huggingface/trl/issues/2770
14
+ logits = logits[:, -logits_to_keep:]
15
+ return selective_log_softmax(logits, input_ids) # compute logprobs for the input tokens
16
+
17
+ def compute_loss(self, model, inputs, return_outputs=False, num_items_in_batch=None):
18
+ if return_outputs:
19
+ raise ValueError("The GRPOTrainer does not support returning outputs")
20
+ # Compute the per-token log probabilities for the model
21
+
22
+ prompt_ids, prompt_mask = inputs["prompt_ids"], inputs["prompt_mask"]
23
+ completion_ids, completion_mask = inputs["completion_ids"], inputs["completion_mask"]
24
+ input_ids = torch.cat([prompt_ids, completion_ids], dim=1)
25
+ attention_mask = torch.cat([prompt_mask, completion_mask], dim=1)
26
+ logits_to_keep = completion_ids.size(1) # we only need to compute the logits for the completion tokens
27
+
28
+ per_token_logps = self._get_per_token_logps(model, input_ids, attention_mask, logits_to_keep)
29
+
30
+ # Compute the KL divergence between the model and the reference model
31
+ ref_per_token_logps = inputs["ref_per_token_logps"]
32
+ per_token_kl = torch.exp(ref_per_token_logps - per_token_logps) - (ref_per_token_logps - per_token_logps) - 1
33
+
34
+ # x - x.detach() allows for preserving gradients from x
35
+ advantages = inputs["advantages"]
36
+ per_token_loss = torch.exp(per_token_logps - per_token_logps.detach()) * advantages.unsqueeze(1)
37
+ per_token_loss = -(per_token_loss - self.beta * per_token_kl)
38
+ loss = ((per_token_loss * completion_mask).sum(dim=1) / completion_mask.sum(dim=1)).mean()
39
+
40
+ # Log the metrics
41
+ completion_length = self.accelerator.gather_for_metrics(completion_mask.sum(1)).float().mean().item()
42
+ self._metrics["completion_length"].append(completion_length)
43
+
44
+ mean_kl = ((per_token_kl * completion_mask).sum(dim=1) / completion_mask.sum(dim=1)).mean()
45
+ self._metrics["kl"].append(self.accelerator.gather_for_metrics(mean_kl).mean().item())
46
+
47
+ return loss
48
+ """
49
+
50
+
51
+ import torch
52
+ import triton
53
+ import triton.language as tl
54
+
55
+ from fla.ops.utils.op import exp, log
56
+ from fla.utils import input_guard
57
+
58
+
59
+ @triton.autotune(
60
+ [triton.Config({'BLOCK_SIZE': BLOCK_SIZE}, num_warps=NUM_WARPS, num_stages=NUM_STAGES)
61
+ for BLOCK_SIZE in [1024, 2048, 4096, 8192]
62
+ for NUM_WARPS in [8, 16, 32]
63
+ for NUM_STAGES in [1, 2, 4]
64
+ ], key=['B', 'N']
65
+ )
66
+ @triton.jit
67
+ def grpo_fwd_kernel(
68
+ logits_ptr,
69
+ ref_logp_ptr,
70
+ input_ids_ptr,
71
+ advantages_ptr,
72
+ completion_mask_ptr,
73
+ loss_ptr,
74
+ lse_ptr,
75
+ beta,
76
+ save_kl: tl.constexpr,
77
+ B,
78
+ M,
79
+ N,
80
+ L,
81
+ start_idx,
82
+ BLOCK_SIZE: tl.constexpr
83
+ ):
84
+ row_idx = tl.program_id(0)
85
+
86
+ off_b = row_idx // L
87
+ N = tl.cast(N, tl.int64)
88
+
89
+ loss_ptr += row_idx
90
+
91
+ completion_mask_ptr += row_idx
92
+ not_skip = tl.load(completion_mask_ptr).to(tl.int1)
93
+ if not_skip == 1:
94
+ ref_logp_ptr += row_idx
95
+ lse_ptr += row_idx
96
+ advantages_ptr += off_b
97
+ logits_ptr += N * (row_idx + off_b)
98
+ input_ids_ptr += row_idx + (off_b+1) * start_idx
99
+ base_cols = tl.arange(0, BLOCK_SIZE)
100
+
101
+ m_i = -float("inf")
102
+ l_i = 0.0
103
+ for start_n in tl.range(0, N, BLOCK_SIZE):
104
+ cols = start_n + base_cols
105
+ mask = cols < N
106
+ logits = tl.load(logits_ptr+cols, mask=mask, other=-float('inf')).to(tl.float32)
107
+ m_ij = tl.max(logits)
108
+ new_m_i = tl.maximum(m_i, m_ij)
109
+ l_i = l_i * exp(m_i - new_m_i) + tl.sum(exp(logits - new_m_i))
110
+ m_i = new_m_i
111
+ lse = log(l_i) + m_i
112
+
113
+ idx = tl.load(input_ids_ptr)
114
+ x = tl.load(logits_ptr+idx).to(tl.float32)
115
+ advantage = tl.load(advantages_ptr).to(tl.float32)
116
+ ref_logp = tl.load(ref_logp_ptr)
117
+ logp = x - lse
118
+ diff = ref_logp - logp
119
+ kl = exp(diff) - diff - 1
120
+ loss = kl * beta - advantage
121
+
122
+ tl.store(loss_ptr, loss.to(loss_ptr.dtype.element_ty))
123
+ tl.store(lse_ptr, lse.to(lse_ptr.dtype.element_ty))
124
+ if save_kl:
125
+ tl.store(loss_ptr+M, kl.to(loss_ptr.dtype.element_ty))
126
+ else:
127
+ # store 0
128
+ tl.store(loss_ptr, 0.0)
129
+ if save_kl:
130
+ tl.store(loss_ptr+M, 0.0)
131
+
132
+
133
+ @triton.autotune(
134
+ [triton.Config({'BLOCK_SIZE': BLOCK_SIZE}, num_warps=NUM_WARPS, num_stages=NUM_STAGES)
135
+ for BLOCK_SIZE in [1024, 2048, 4096, 8192]
136
+ for NUM_WARPS in [8, 16, 32]
137
+ for NUM_STAGES in [1, 2, 4]
138
+ ], key=['B', 'N']
139
+ )
140
+ @triton.jit
141
+ def grpo_bwd_kernel(
142
+ dloss_ptr,
143
+ dlogits_ptr,
144
+ logits_ptr,
145
+ ref_logp_ptr,
146
+ input_ids_ptr,
147
+ advantages_ptr,
148
+ completion_mask_ptr,
149
+ lse_ptr,
150
+ beta,
151
+ B,
152
+ N,
153
+ L,
154
+ start_idx,
155
+ BLOCK_SIZE: tl.constexpr
156
+ ):
157
+
158
+ row_idx = tl.program_id(0) # B*L
159
+ off_b = row_idx // L
160
+
161
+ N = tl.cast(N, tl.int64)
162
+
163
+ dlogits_ptr += N * (row_idx + off_b)
164
+ base_cols = tl.arange(0, BLOCK_SIZE)
165
+ completion_mask_ptr += row_idx
166
+ not_skip = tl.load(completion_mask_ptr).to(tl.int1)
167
+
168
+ if not_skip == 1:
169
+ lse_ptr += row_idx
170
+ dloss_ptr += row_idx
171
+ advantages_ptr += off_b
172
+ ref_logp_ptr += row_idx
173
+ logits_ptr += N * (row_idx + off_b)
174
+ input_ids_ptr += row_idx + (off_b+1) * start_idx
175
+ dloss = tl.load(dloss_ptr).to(tl.float32)
176
+ lse = tl.load(lse_ptr).to(tl.float32)
177
+ idx = tl.load(input_ids_ptr)
178
+ x = tl.load(logits_ptr+idx).to(tl.float32)
179
+ advantage = tl.load(advantages_ptr).to(tl.float32)
180
+ ref_logp = tl.load(ref_logp_ptr)
181
+ logp = x - lse
182
+
183
+ dlogp = (beta * (-1.0 * exp(ref_logp - logp) + 1)
184
+ - advantage) * dloss
185
+
186
+ for start_n in tl.range(0, N, BLOCK_SIZE):
187
+ cols = start_n + base_cols
188
+ mask = cols < N
189
+ logits = tl.load(logits_ptr+cols, mask=mask, other=-float('inf')).to(tl.float32)
190
+ probs = exp(logits - lse)
191
+ dlogits = tl.where(cols == idx, 1-probs, -probs) * dlogp
192
+
193
+ tl.store(dlogits_ptr+cols, dlogits.to(dlogits_ptr.dtype.element_ty), mask=mask)
194
+ else:
195
+ dlogits = tl.zeros((BLOCK_SIZE,), dtype=tl.float32)
196
+ for start_n in tl.range(0, N, BLOCK_SIZE):
197
+ cols = start_n + base_cols
198
+ mask = cols < N
199
+
200
+ tl.store(dlogits_ptr+cols, dlogits.to(dlogits_ptr.dtype.element_ty), mask=mask)
201
+
202
+
203
+ class GrpoLoss(torch.autograd.Function):
204
+
205
+ @input_guard
206
+ @staticmethod
207
+ def forward(ctx, logits, ref_logp, input_ids, advantages, beta, completion_mask, save_kl):
208
+ ctx.input_shape = logits.shape
209
+ B, L_ADD_1, N = ctx.input_shape
210
+ L = L_ADD_1 - 1
211
+ M = B * L
212
+ input_ids_start_index = input_ids.size(1) - L
213
+
214
+ if not save_kl:
215
+ loss = torch.empty(B, L, device=logits.device, dtype=torch.float32)
216
+ else:
217
+ loss = torch.empty(B*2, L, device=logits.device, dtype=torch.float32)
218
+
219
+ lse = torch.empty(B, L, device=logits.device, dtype=torch.float32)
220
+
221
+ if completion_mask is None:
222
+ completion_mask = torch.ones(B, L, device=logits.device, dtype=torch.int32)
223
+ else:
224
+ loss[:B].masked_fill_(completion_mask.logical_not(), 0.0)
225
+
226
+ grpo_fwd_kernel[(M,)](
227
+ logits_ptr=logits,
228
+ ref_logp_ptr=ref_logp,
229
+ input_ids_ptr=input_ids,
230
+ advantages_ptr=advantages,
231
+ completion_mask_ptr=completion_mask,
232
+ loss_ptr=loss,
233
+ lse_ptr=lse,
234
+ beta=beta,
235
+ save_kl=save_kl,
236
+ B=B, M=M, N=N, L=L,
237
+ start_idx=input_ids_start_index,
238
+ )
239
+ ctx.beta = beta
240
+ ctx.save_for_backward(lse, logits, input_ids, advantages, completion_mask)
241
+ ctx.ref_logp = ref_logp
242
+ return loss
243
+
244
+ @input_guard
245
+ @staticmethod
246
+ def backward(ctx, dloss):
247
+ # The grad of logits comes from two parts, the reward part and the kl part
248
+ lse, logits, input_ids, advantages, completion_mask = ctx.saved_tensors
249
+ B, L_ADD_1, N = ctx.input_shape
250
+ L = L_ADD_1 - 1
251
+ M = B * L
252
+
253
+ input_ids_start_index = input_ids.size(1) - L
254
+
255
+ dlogits = torch.empty_like(logits) # B, L_ADD_1, N
256
+
257
+ grpo_bwd_kernel[(M,)](
258
+ dloss_ptr=dloss,
259
+ dlogits_ptr=dlogits,
260
+ logits_ptr=logits,
261
+ ref_logp_ptr=ctx.ref_logp,
262
+ input_ids_ptr=input_ids,
263
+ advantages_ptr=advantages,
264
+ completion_mask_ptr=completion_mask,
265
+ lse_ptr=lse,
266
+ beta=ctx.beta,
267
+ B=B, N=N, L=L,
268
+ start_idx=input_ids_start_index,
269
+ )
270
+ # The last token in the completion is not used in the loss computation
271
+ # and therefore its gradient should be set to 0
272
+ dlogits[:, -1, :].fill_(0.0)
273
+ return dlogits.view(*ctx.input_shape), None, None, None, None, None, None
274
+
275
+
276
+ def fused_grpo_loss(logits, ref_logp, input_ids, advantages, beta=0.1, completion_mask=None, save_kl=False) -> torch.Tensor:
277
+ '''
278
+ compute grpo loss, save memory(no addition usage) and fast speed(6X for A800)
279
+
280
+ Args:
281
+ logtits: Tensor, [B, L+1, vocab_size], the origin output of model, it's not logits[:, :-1]
282
+ ref_logp: Tensor, [B, L], the origin output of model, it's not ref_logits[:, :-1]
283
+ input_ids: Tensor, [B, K+L], it's prompt_completion_id, it contains the prompt ids and output ids
284
+ advantages: Tensor, [B], the advantages of each prompt
285
+ beta: float, the weight of kl loss
286
+ completion_mask: Tensor, loss mask
287
+ save_kl: bool, if true will save kl
288
+
289
+ Retutn:
290
+ loss: Tensor, [B, L], the loss of grpo, it contains the advantage part and kl part
291
+
292
+ NOTE: logits(ref_logits) is computed by these steps
293
+ logits_to_keep = completion_ids.size(1)
294
+
295
+ def get_per_token_logits(model, input_ids, attention_mask, logits_to_keep):
296
+ # We add 1 to `logits_to_keep` because the last logits of the sequence is later excluded
297
+ logits = model(
298
+ input_ids=input_ids, attention_mask=attention_mask, logits_to_keep=logits_to_keep + 1
299
+ ).logits
300
+ return logits
301
+
302
+ logits = get_per_token_logits(model, prompt_completion_ids, attention_mask, logits_to_keep)
303
+ '''
304
+ out = GrpoLoss.apply(logits, ref_logp, input_ids, advantages, beta, completion_mask, save_kl)
305
+ if not save_kl:
306
+ return out
307
+ else:
308
+ return out.chunk(2, axis=0)
309
+
310
+
311
+ def grpo_loss_torch(logits, ref_logp, input_ids, advantages, beta=0.1, completion_mask=None, save_kl=False):
312
+ def get_log_probs(logits, input_ids):
313
+ per_token_logps = []
314
+ for logits_row, input_ids_row in zip(logits, input_ids[:, -logits.size(1):]):
315
+ log_probs = logits_row.log_softmax(dim=-1)
316
+ token_log_prob = torch.gather(log_probs, dim=1, index=input_ids_row.unsqueeze(1)).squeeze(1)
317
+ per_token_logps.append(token_log_prob)
318
+ return torch.stack(per_token_logps)
319
+
320
+ logits = logits[:, :-1]
321
+ per_token_logps = get_log_probs(logits, input_ids)
322
+ ref_per_token_logps = ref_logp
323
+ per_token_kl = torch.exp(ref_per_token_logps - per_token_logps) - (ref_per_token_logps - per_token_logps) - 1
324
+
325
+ per_token_loss = torch.exp(per_token_logps - per_token_logps.detach()) * advantages.unsqueeze(1)
326
+ per_token_loss = -(per_token_loss - beta * per_token_kl)
327
+ if completion_mask is not None:
328
+ per_token_loss *= completion_mask
329
+ if save_kl:
330
+ per_token_kl *= completion_mask
331
+ return per_token_loss if not save_kl else (per_token_loss, per_token_kl)
332
+
333
+
334
+ @torch.compile(fullgraph=True)
335
+ def grpo_loss_with_old_logps(
336
+ logps: torch.Tensor,
337
+ ref_logps: torch.Tensor,
338
+ old_logps: torch.Tensor,
339
+ pad_mask: torch.Tensor,
340
+ logits_to_keep: int,
341
+ rewards: torch.Tensor,
342
+ beta: float = 0.2,
343
+ epsilon: float = 0.2
344
+ ):
345
+ """
346
+ Compute the GRPO (Group Relative Policy Optimization) loss.
347
+
348
+ Args:
349
+ logps (torch.Tensor): [Batch, Token_length] Log probabilities of the current policy.
350
+ ref_logps (torch.Tensor):[Batch, Token_length] Log probabilities of the reference policy.
351
+ old_logps (torch.Tensor): [Batch, Token_length] Log probabilities of the old policy.
352
+ completion_ids (torch.Tensor): [Batch, Token_length] Completion token IDs (bool).
353
+ pad_token_id: Pad token ID.
354
+ logits_to_keep (int): Number of logits to keep for masking.
355
+ rewards (torch.Tensor): [Batch] Rewards for each generation.
356
+ beta (float) = 0.2: A hyperparameter for weighting the KL divergence term.
357
+ epsilon (float) = 0.2: An float hyperparameter for clipping the importance weights.
358
+
359
+ Returns:
360
+ torch.Tensor: The computed GRPO loss.
361
+ """
362
+ B = logps.shape[0]
363
+ assert B > 1, "Batch * Num generations should be greater than 1"
364
+
365
+ rewards_shaped = rewards.view(-1, B) # B,num_generations
366
+ advantages = (rewards_shaped - rewards_shaped.mean(dim=1, keepdim=True)) / \
367
+ (rewards_shaped.std(dim=1, keepdim=True) + 1e-8)
368
+ advantages = advantages.view(-1) # B*num_generations
369
+ # Calculate the per - token KL divergence
370
+ per_token_kl = torch.exp(ref_logps - logps) - (ref_logps - logps) - 1
371
+
372
+ # Calculate the ratio of probabilities (importance weights)
373
+ # Importance weights are calculated as exp(log_pi_theta - log_pi_theta_old)
374
+ importance_weights = torch.exp(logps - old_logps)
375
+
376
+ # Clip the importance weights to the range [1 - epsilon, 1 + epsilon]
377
+ importance_weights_clipped = torch.clamp(importance_weights, 1 - epsilon, 1 + epsilon)
378
+
379
+ # Create a completion mask. It checks which positions are valid based on logits_to_keep
380
+ completion_mask = torch.arange(logits_to_keep, device=logps.device)[None, :] >= 0
381
+
382
+ # Combine the completion mask and padding mask
383
+ completion_mask = completion_mask & pad_mask # Ensure matching shape
384
+
385
+ # Add an extra dimension to advantages to match the shape for element - wise multiplication
386
+ advantages = advantages.unsqueeze(1)
387
+
388
+ # Calculate the per - token loss. It takes the minimum of the unclipped and clipped importance weights
389
+ # and subtracts the KL divergence term weighted by beta, then multiplies by the completion mask
390
+ token_loss = -(torch.min(advantages * importance_weights, advantages *
391
+ importance_weights_clipped) - beta * per_token_kl) * completion_mask
392
+
393
+ # Calculate the final loss by summing the token losses and normalizing by the number of valid tokens
394
+ loss = -token_loss.sum() / completion_mask.sum()
395
+
396
+ return loss
fla/modules/l2norm.py ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright (c) 2023-2025, Songlin Yang, Yu Zhang
3
+
4
+ from typing import Optional
5
+
6
+ import torch
7
+ import triton
8
+ import triton.language as tl
9
+
10
+ from fla.utils import input_guard
11
+
12
+
13
+ @triton.autotune(
14
+ configs=[
15
+ triton.Config({}, num_warps=num_warps)
16
+ for num_warps in [1, 2, 4, 8, 16, 32]
17
+ ],
18
+ key=['N']
19
+ )
20
+ @triton.jit
21
+ def l2norm_fwd_kernel(
22
+ X,
23
+ Y,
24
+ N,
25
+ eps,
26
+ BLOCK_N: tl.constexpr,
27
+ ):
28
+ i_m = tl.program_id(0)
29
+ X += i_m * N
30
+ Y += i_m * N
31
+ # Compute mean and variance
32
+ cols = tl.arange(0, BLOCK_N)
33
+ mask = cols < N
34
+ x = tl.load(X + cols, mask=mask, other=0.0).to(tl.float32)
35
+ xbar = tl.where(mask, x, 0.0)
36
+ var = tl.sum(xbar * xbar, axis=0)
37
+ rstd = 1 / tl.sqrt(var + eps)
38
+ # tl.store(Rstd + i_m, rstd)
39
+ # Normalize and apply linear transformation
40
+ y = x * rstd
41
+ # Write output
42
+ tl.store(Y + cols, y, mask=mask)
43
+
44
+
45
+ @triton.autotune(
46
+ configs=[
47
+ triton.Config({}, num_warps=num_warps)
48
+ for num_warps in [1, 2, 4, 8, 16, 32]
49
+ ],
50
+ key=['N']
51
+ )
52
+ @triton.jit
53
+ def l2norm_bwd_kernel(
54
+ X,
55
+ DY,
56
+ DX,
57
+ N,
58
+ eps,
59
+ BLOCK_N: tl.constexpr,
60
+ ):
61
+ i_m = tl.program_id(0)
62
+ X += i_m * N
63
+ DX += i_m * N
64
+ DY += i_m * N
65
+
66
+ # Y += i_m * stride_y_row
67
+ cols = tl.arange(0, BLOCK_N)
68
+ mask = cols < N
69
+ x = tl.load(X + cols, mask=mask, other=0.0).to(tl.float32)
70
+ x = tl.where(mask, x, 0.0)
71
+ var = tl.sum(x * x)
72
+ rstd = 1 / tl.sqrt(var + eps)
73
+ # tl.store(Rstd + i_m, rstd)
74
+ # Normalize and apply linear transformation
75
+ # y = x * rstd
76
+ dy = tl.load(DY + cols, mask=mask, other=0.0).to(tl.float32)
77
+ dy = tl.where(mask, dy, 0.0)
78
+ dx = dy * rstd - tl.sum(dy * x) * (1 / (var+eps)) * rstd * x
79
+ tl.store(DX + cols, dx, mask=mask)
80
+
81
+
82
+ def l2norm_fwd(
83
+ x: torch.Tensor,
84
+ eps: float = 1e-6,
85
+ output_dtype: Optional[torch.dtype] = None
86
+ ):
87
+ x_shape_og = x.shape
88
+ x = x.reshape(-1, x.shape[-1])
89
+ # allocate output
90
+ if output_dtype is None:
91
+ y = torch.empty_like(x)
92
+ else:
93
+ y = torch.empty_like(x, dtype=output_dtype)
94
+ assert y.stride(-1) == 1
95
+ N = x.shape[-1]
96
+ M = x.shape[0]
97
+ # rstd = torch.empty((M,), dtype=torch.float32, device=x.device)
98
+ # Less than 64KB per feature: enqueue fused kernel
99
+ MAX_FUSED_SIZE = 65536 // x.element_size()
100
+ BLOCK_N = min(MAX_FUSED_SIZE, triton.next_power_of_2(N))
101
+ if N > BLOCK_N:
102
+ raise RuntimeError("This layer norm doesn't support feature dim >= 64KB.")
103
+ # heuristics for number of warps
104
+ l2norm_fwd_kernel[(M,)](
105
+ x,
106
+ y,
107
+ N,
108
+ eps,
109
+ BLOCK_N,
110
+ )
111
+ return y.reshape(x_shape_og)
112
+
113
+
114
+ def l2norm_bwd(
115
+ x: torch.Tensor,
116
+ dy: torch.Tensor,
117
+ eps: float = 1e-5
118
+ ):
119
+ x_shape_og = x.shape
120
+ x = x.reshape(-1, dy.shape[-1])
121
+ dy = dy.reshape(-1, dy.shape[-1])
122
+ if dy.stride(-1) != 1:
123
+ dy = dy.contiguous()
124
+ assert dy.shape == x.shape
125
+ # allocate output
126
+ dx = torch.empty_like(x)
127
+ M = x.shape[0]
128
+ N = x.shape[-1]
129
+ # rstd = torch.empty((M,), dtype=torch.float32, device=x.device)
130
+ # Less than 64KB per feature: enqueue fused kernel
131
+ MAX_FUSED_SIZE = 65536 // x.element_size()
132
+ BLOCK_N = min(MAX_FUSED_SIZE, triton.next_power_of_2(N))
133
+ if N > BLOCK_N:
134
+ raise RuntimeError("This layer norm doesn't support feature dim >= 64KB.")
135
+ # heuristics for number of warps
136
+ l2norm_bwd_kernel[(M,)](
137
+ x,
138
+ dy,
139
+ dx,
140
+ N,
141
+ eps,
142
+ BLOCK_N,
143
+ )
144
+ return dx.reshape(x_shape_og)
145
+
146
+
147
+ class L2NormFunction(torch.autograd.Function):
148
+
149
+ @staticmethod
150
+ @input_guard
151
+ def forward(
152
+ ctx,
153
+ x,
154
+ eps=1e-6,
155
+ output_dtype=None
156
+ ):
157
+ y = l2norm_fwd(x, eps, output_dtype)
158
+ ctx.eps = eps
159
+ ctx.x_dtype = x.dtype
160
+ ctx.save_for_backward(x)
161
+ return y
162
+
163
+ @staticmethod
164
+ @input_guard
165
+ def backward(ctx, dy):
166
+ x, = ctx.saved_tensors
167
+ dx = l2norm_bwd(x, dy, ctx.eps)
168
+ return dx, None, None
169
+
170
+
171
+ def l2_norm(
172
+ x: torch.Tensor,
173
+ eps: float = 1e-6,
174
+ output_dtype: Optional[torch.dtype] = None
175
+ ) -> torch.Tensor:
176
+ return L2NormFunction.apply(x, eps, output_dtype)
fla/modules/layernorm.py ADDED
@@ -0,0 +1,1196 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+
3
+ # Copyright (c) 2023, Tri Dao.
4
+ # https://github.com/state-spaces/mamba/blob/fb7b5310fa865dbd62aa059b1e26f2b431363e2a/mamba_ssm/ops/triton/layernorm.py
5
+ # Implement residual + layer_norm / rms_norm.
6
+
7
+ # Based on the Triton LayerNorm tutorial: https://triton-lang.org/main/getting-started/tutorials/05-layer-norm.html
8
+ # For the backward pass, we keep weight_grad and bias_grad in registers and accumulate.
9
+ # This is faster for dimensions up to 8k, but after that it's much slower due to register spilling.
10
+ # The models we train have hidden dim up to 8k anyway (e.g. Llama 70B), so this is fine.
11
+
12
+ from __future__ import annotations
13
+
14
+ from functools import partial
15
+
16
+ import torch
17
+ import torch.nn as nn
18
+ import torch.nn.functional as F
19
+ import triton
20
+ import triton.language as tl
21
+ from einops import rearrange
22
+ from torch.distributed import DeviceMesh
23
+ from torch.distributed.tensor import DTensor, Replicate, Shard, distribute_module
24
+ from torch.distributed.tensor.parallel import ParallelStyle
25
+
26
+ from fla.utils import get_multiprocessor_count, input_guard
27
+
28
+
29
+ def layer_norm_ref(
30
+ x: torch.Tensor,
31
+ weight: torch.Tensor,
32
+ bias: torch.Tensor,
33
+ residual: torch.Tensor = None,
34
+ eps: float = 1e-5,
35
+ prenorm: bool = False,
36
+ upcast: bool = False
37
+ ):
38
+ dtype = x.dtype
39
+ if upcast:
40
+ weight = weight.float()
41
+ bias = bias.float() if bias is not None else None
42
+ if upcast:
43
+ x = x.float()
44
+ residual = residual.float() if residual is not None else residual
45
+ if residual is not None:
46
+ x = (x + residual).to(x.dtype)
47
+ out = F.layer_norm(x.to(weight.dtype), x.shape[-1:], weight=weight, bias=bias, eps=eps).to(
48
+ dtype
49
+ )
50
+ return out if not prenorm else (out, x)
51
+
52
+
53
+ def rms_norm_ref(
54
+ x: torch.Tensor,
55
+ weight: torch.Tensor,
56
+ bias: torch.Tensor,
57
+ residual: torch.Tensor = None,
58
+ eps: float = 1e-5,
59
+ prenorm: bool = False,
60
+ upcast: bool = False
61
+ ):
62
+ dtype = x.dtype
63
+ if upcast:
64
+ weight = weight.float()
65
+ bias = bias.float() if bias is not None else None
66
+ if upcast:
67
+ x = x.float()
68
+ residual = residual.float() if residual is not None else residual
69
+ if residual is not None:
70
+ x = (x + residual).to(x.dtype)
71
+ rstd = 1 / torch.sqrt((x.square()).mean(dim=-1, keepdim=True) + eps)
72
+ out = (x * rstd * weight) + bias if bias is not None else (x * rstd * weight)
73
+ out = out.to(dtype)
74
+ return out if not prenorm else (out, x)
75
+
76
+
77
+ def group_norm_ref(
78
+ x: torch.Tensor,
79
+ weight: torch.Tensor,
80
+ bias: torch.Tensor,
81
+ num_groups: int,
82
+ residual: torch.Tensor = None,
83
+ eps: float = 1e-5,
84
+ is_rms_norm: bool = False,
85
+ prenorm: bool = False,
86
+ upcast: bool = False
87
+ ):
88
+ dtype = x.dtype
89
+ if upcast:
90
+ weight = weight.float()
91
+ bias = bias.float() if bias is not None else None
92
+ if upcast:
93
+ x = x.float()
94
+ residual = residual.float() if residual is not None else residual
95
+ if residual is not None:
96
+ x = (x + residual).to(x.dtype)
97
+ residual = x
98
+ x, weight = [
99
+ rearrange(data, "... (g d) -> ... g d", g=num_groups) for data in (x, weight)
100
+ ]
101
+ if bias is not None:
102
+ bias = rearrange(bias, '... (g d) -> ... g d', g=num_groups)
103
+ if not is_rms_norm:
104
+ mean = x.mean(dim=-1, keepdim=True)
105
+ x = x - mean
106
+ rstd = 1 / torch.sqrt((x.square()).mean(dim=-1, keepdim=True) + eps)
107
+ out = (x * rstd * weight) + bias if bias is not None else (x * rstd * weight)
108
+ out = rearrange(out, "... g d -> ... (g d)")
109
+ out = out.to(dtype)
110
+ return out if not prenorm else (out, residual)
111
+
112
+
113
+ class GroupNormRef(nn.Module):
114
+
115
+ def __init__(
116
+ self,
117
+ num_groups: int,
118
+ hidden_size: int,
119
+ elementwise_affine: bool = True,
120
+ bias: bool = False,
121
+ eps: float = 1e-5,
122
+ is_rms_norm: bool = False
123
+ ) -> GroupNormRef:
124
+ super().__init__()
125
+
126
+ if hidden_size % num_groups != 0:
127
+ raise ValueError('num_channels must be divisible by num_groups')
128
+
129
+ self.num_groups = num_groups
130
+ self.hidden_size = hidden_size
131
+ self.elementwise_affine = elementwise_affine
132
+ self.eps = eps
133
+ self.is_rms_norm = is_rms_norm
134
+
135
+ self.register_parameter("weight", None)
136
+ self.register_parameter("bias", None)
137
+ if elementwise_affine:
138
+ self.weight = nn.Parameter(torch.empty(hidden_size))
139
+ if bias:
140
+ self.bias = nn.Parameter(torch.empty(hidden_size))
141
+
142
+ self.reset_parameters()
143
+
144
+ def reset_parameters(self):
145
+ if self.elementwise_affine:
146
+ nn.init.ones_(self.weight)
147
+ if self.bias is not None:
148
+ nn.init.zeros_(self.bias)
149
+
150
+ def __repr__(self) -> str:
151
+ s = f"{self.__class__.__name__}({self.num_groups}, {self.hidden_size}"
152
+ if not self.elementwise_affine:
153
+ s += f", elementwise_affine={self.elementwise_affine}"
154
+ if self.is_rms_norm:
155
+ s += f", is_rms_norm={self.is_rms_norm}"
156
+ s += f", eps={self.eps}"
157
+ s += ")"
158
+ return s
159
+
160
+ def forward(self, x, residual=None, prenorm=False):
161
+ return group_norm_ref(
162
+ x,
163
+ self.weight,
164
+ self.bias,
165
+ num_groups=self.num_groups,
166
+ residual=residual,
167
+ eps=self.eps,
168
+ is_rms_norm=self.is_rms_norm,
169
+ prenorm=prenorm,
170
+ upcast=True
171
+ )
172
+
173
+
174
+ @triton.autotune(
175
+ configs=[
176
+ triton.Config({}, num_warps=num_warps, num_stages=num_stages)
177
+ for num_warps in [1, 2, 4, 8, 16, 32]
178
+ for num_stages in [2, 3, 4]
179
+ ],
180
+ key=["N", "HAS_RESIDUAL", "STORE_RESIDUAL_OUT", "IS_RMS_NORM", "HAS_BIAS"],
181
+ )
182
+ @triton.jit
183
+ def layer_norm_fwd_kernel(
184
+ X, # pointer to the input
185
+ Y, # pointer to the output
186
+ W, # pointer to the weights
187
+ B, # pointer to the biases
188
+ RESIDUAL, # pointer to the residual
189
+ RESIDUAL_OUT, # pointer to the residual
190
+ Mean, # pointer to the mean
191
+ Rstd, # pointer to the 1/std
192
+ N, # number of columns in X
193
+ G, # number of groups
194
+ eps, # epsilon to avoid division by zero
195
+ IS_RMS_NORM: tl.constexpr,
196
+ BLOCK_N: tl.constexpr,
197
+ HAS_RESIDUAL: tl.constexpr,
198
+ STORE_RESIDUAL_OUT: tl.constexpr,
199
+ HAS_WEIGHT: tl.constexpr,
200
+ HAS_BIAS: tl.constexpr
201
+ ):
202
+ # Map the program id to the row of X and Y it should compute.
203
+ row = tl.program_id(0)
204
+ group = row % G
205
+ X += row * N
206
+ Y += row * N
207
+ if HAS_RESIDUAL:
208
+ RESIDUAL += row * N
209
+ if STORE_RESIDUAL_OUT:
210
+ RESIDUAL_OUT += row * N
211
+ # Compute mean and variance
212
+ cols = tl.arange(0, BLOCK_N)
213
+ x = tl.load(X + cols, mask=cols < N, other=0.0).to(tl.float32)
214
+ if HAS_RESIDUAL:
215
+ residual = tl.load(RESIDUAL + cols, mask=cols < N, other=0.0).to(tl.float32)
216
+ x += residual
217
+ if STORE_RESIDUAL_OUT:
218
+ tl.store(RESIDUAL_OUT + cols, x, mask=cols < N)
219
+ if not IS_RMS_NORM:
220
+ mean = tl.sum(x, axis=0) / N
221
+ tl.store(Mean + row, mean)
222
+ xbar = tl.where(cols < N, x - mean, 0.0)
223
+ var = tl.sum(xbar * xbar, axis=0) / N
224
+ else:
225
+ xbar = tl.where(cols < N, x, 0.0)
226
+ var = tl.sum(xbar * xbar, axis=0) / N
227
+ rstd = 1 / tl.sqrt(var + eps)
228
+ tl.store(Rstd + row, rstd)
229
+ # Normalize and apply linear transformation
230
+ mask = cols < N
231
+ if HAS_WEIGHT:
232
+ w = tl.load(W + group * N + cols, mask=mask).to(tl.float32)
233
+ if HAS_BIAS:
234
+ b = tl.load(B + group * N + cols, mask=mask).to(tl.float32)
235
+ x_hat = (x - mean) * rstd if not IS_RMS_NORM else x * rstd
236
+
237
+ y = tl.fma(x_hat, w, b) if HAS_WEIGHT and HAS_BIAS else \
238
+ x_hat * w if HAS_WEIGHT else \
239
+ x_hat + b if HAS_BIAS else x_hat
240
+ # Write output
241
+ y = tl.cast(y, dtype=Y.dtype.element_ty, fp_downcast_rounding="rtne")
242
+ tl.store(Y + cols, y, mask=mask)
243
+
244
+
245
+ def layer_norm_fwd(
246
+ x: torch.Tensor,
247
+ weight: torch.Tensor,
248
+ bias: torch.Tensor,
249
+ eps: float,
250
+ residual: torch.Tensor = None,
251
+ out_dtype: torch.dtype = None,
252
+ residual_dtype: torch.dtype = None,
253
+ is_rms_norm: bool = False,
254
+ num_groups: int = 1
255
+ ):
256
+ if residual is not None:
257
+ residual_dtype = residual.dtype
258
+ M, N, G = *x.shape, num_groups
259
+ if residual is not None:
260
+ assert residual.shape == (M, N)
261
+ if weight is not None:
262
+ assert weight.shape == (G * N,)
263
+ if bias is not None:
264
+ assert bias.shape == (G * N,)
265
+ # allocate output
266
+ y = torch.empty_like(x, dtype=x.dtype if out_dtype is None else out_dtype)
267
+ if residual is not None or (residual_dtype is not None and residual_dtype != x.dtype):
268
+ residual_out = torch.empty(M, N, device=x.device, dtype=residual_dtype)
269
+ else:
270
+ residual_out = None
271
+ mean = torch.empty((M,), dtype=torch.float32, device=x.device) if not is_rms_norm else None
272
+ rstd = torch.empty((M,), dtype=torch.float32, device=x.device)
273
+ # Less than 64KB per feature: enqueue fused kernel
274
+ MAX_FUSED_SIZE = 65536 // x.element_size()
275
+ BLOCK_N = min(MAX_FUSED_SIZE, triton.next_power_of_2(N))
276
+ if N > BLOCK_N:
277
+ raise RuntimeError("This layer norm doesn't support feature dim >= 64KB.")
278
+ # heuristics for number of warps
279
+ layer_norm_fwd_kernel[(M,)](
280
+ x,
281
+ y,
282
+ weight,
283
+ bias,
284
+ residual,
285
+ residual_out,
286
+ mean,
287
+ rstd,
288
+ N,
289
+ G,
290
+ eps,
291
+ is_rms_norm,
292
+ BLOCK_N,
293
+ residual is not None,
294
+ residual_out is not None,
295
+ weight is not None,
296
+ bias is not None,
297
+ )
298
+ # residual_out is None if residual is None and residual_dtype == input_dtype
299
+ return y, mean, rstd, residual_out if residual_out is not None else x
300
+
301
+
302
+ @triton.heuristics({
303
+ "RECOMPUTE_OUTPUT": lambda args: args["Y"] is not None
304
+ })
305
+ @triton.autotune(
306
+ configs=[
307
+ triton.Config({}, num_warps=num_warps, num_stages=num_stages)
308
+ for num_warps in [1, 2, 4, 8, 16, 32]
309
+ for num_stages in [2, 3, 4]
310
+ ],
311
+ key=["N", "HAS_DRESIDUAL", "STORE_DRESIDUAL", "IS_RMS_NORM", "HAS_BIAS"],
312
+ )
313
+ @triton.jit
314
+ def layer_norm_bwd_kernel(
315
+ X, # pointer to the input
316
+ W, # pointer to the weights
317
+ B, # pointer to the biases
318
+ Y, # pointer to the output to be recomputed
319
+ DY, # pointer to the output gradient
320
+ DX, # pointer to the input gradient
321
+ DW, # pointer to the partial sum of weights gradient
322
+ DB, # pointer to the partial sum of biases gradient
323
+ DRESIDUAL,
324
+ DRESIDUAL_IN,
325
+ Mean, # pointer to the mean
326
+ Rstd, # pointer to the 1/std
327
+ M, # number of rows in X
328
+ N, # number of columns in X
329
+ G, # number of groups
330
+ rows_per_program,
331
+ programs_per_group,
332
+ IS_RMS_NORM: tl.constexpr,
333
+ BLOCK_N: tl.constexpr,
334
+ HAS_DRESIDUAL: tl.constexpr,
335
+ STORE_DRESIDUAL: tl.constexpr,
336
+ HAS_WEIGHT: tl.constexpr,
337
+ HAS_BIAS: tl.constexpr,
338
+ RECOMPUTE_OUTPUT: tl.constexpr,
339
+ ):
340
+ row_block_id = tl.program_id(0)
341
+ group_id, program_id_in_group = row_block_id // programs_per_group, row_block_id % programs_per_group
342
+
343
+ row_start = group_id + program_id_in_group * G * rows_per_program
344
+ row_end = min(row_start + G * rows_per_program, M)
345
+
346
+ cols = tl.arange(0, BLOCK_N)
347
+ mask = cols < N
348
+
349
+ if HAS_WEIGHT:
350
+ w = tl.load(W + group_id * N + cols, mask=mask).to(tl.float32)
351
+ dw = tl.zeros((BLOCK_N,), dtype=tl.float32)
352
+ if RECOMPUTE_OUTPUT and HAS_BIAS:
353
+ b = tl.load(B + group_id * N + cols, mask=mask, other=0.0).to(tl.float32)
354
+ if HAS_BIAS:
355
+ db = tl.zeros((BLOCK_N,), dtype=tl.float32)
356
+
357
+ for row in range(row_start, row_end, G):
358
+ # Load data to SRAM
359
+ x = tl.load(X + row * N + cols, mask=mask, other=0).to(tl.float32)
360
+ dy = tl.load(DY + row * N + cols, mask=mask, other=0).to(tl.float32)
361
+ if not IS_RMS_NORM:
362
+ mean = tl.load(Mean + row)
363
+ rstd = tl.load(Rstd + row)
364
+ # Compute dx
365
+ xhat = (x - mean) * rstd if not IS_RMS_NORM else x * rstd
366
+ xhat = tl.where(mask, xhat, 0.0)
367
+ if RECOMPUTE_OUTPUT:
368
+ y = xhat * w if HAS_WEIGHT else xhat
369
+ if HAS_BIAS:
370
+ y = y + b
371
+ tl.store(Y + row * N + cols, y, mask=mask)
372
+ wdy = dy
373
+ if HAS_WEIGHT:
374
+ wdy = dy * w
375
+ dw += dy * xhat
376
+ if HAS_BIAS:
377
+ db += dy
378
+ if not IS_RMS_NORM:
379
+ c1 = tl.sum(xhat * wdy, axis=0) / N
380
+ c2 = tl.sum(wdy, axis=0) / N
381
+ dx = (wdy - (xhat * c1 + c2)) * rstd
382
+ else:
383
+ c1 = tl.sum(xhat * wdy, axis=0) / N
384
+ dx = (wdy - xhat * c1) * rstd
385
+ if HAS_DRESIDUAL:
386
+ dres = tl.load(DRESIDUAL + row * N + cols, mask=mask, other=0).to(tl.float32)
387
+ dx += dres
388
+ # Write dx
389
+ dx = tl.cast(dx, dtype=DX.dtype.element_ty, fp_downcast_rounding="rtne")
390
+ if STORE_DRESIDUAL:
391
+ tl.store(DRESIDUAL_IN + row * N + cols, dx, mask=mask)
392
+ tl.store(DX + row * N + cols, dx, mask=mask)
393
+
394
+ if HAS_WEIGHT:
395
+ tl.store(DW + row_block_id * N + cols, dw, mask=mask)
396
+ if HAS_BIAS:
397
+ tl.store(DB + row_block_id * N + cols, db, mask=mask)
398
+
399
+
400
+ def layer_norm_bwd(
401
+ dy: torch.Tensor,
402
+ x: torch.Tensor,
403
+ weight: torch.Tensor,
404
+ bias: torch.Tensor,
405
+ eps: float,
406
+ mean: torch.Tensor,
407
+ rstd: torch.Tensor,
408
+ dresidual: torch.Tensor = None,
409
+ has_residual: bool = False,
410
+ is_rms_norm: bool = False,
411
+ x_dtype: torch.dtype = None,
412
+ recompute_output: bool = False,
413
+ num_groups: int = 1
414
+ ):
415
+ M, N, G = *x.shape, num_groups
416
+ assert dy.shape == (M, N)
417
+ if dresidual is not None:
418
+ assert dresidual.shape == (M, N)
419
+ if weight is not None:
420
+ assert weight.shape == (G * N,)
421
+ if bias is not None:
422
+ assert bias.shape == (G * N,)
423
+ # allocate output
424
+ dx = torch.empty_like(x) if x_dtype is None else torch.empty(M, N, dtype=x_dtype, device=x.device)
425
+ dresidual_in = torch.empty_like(x) if has_residual and dx.dtype != x.dtype else None
426
+ y = torch.empty(M, N, dtype=dy.dtype, device=dy.device) if recompute_output else None
427
+
428
+ # Less than 64KB per feature: enqueue fused kernel
429
+ MAX_FUSED_SIZE = 65536 // x.element_size()
430
+ BLOCK_N = min(MAX_FUSED_SIZE, triton.next_power_of_2(N))
431
+ if N > BLOCK_N:
432
+ raise RuntimeError("This layer norm doesn't support feature dim >= 64KB.")
433
+ # each program handles one group only
434
+ S = triton.cdiv(get_multiprocessor_count(x.device.index), G) * G
435
+ dw = torch.empty((S, N), dtype=torch.float32, device=weight.device) if weight is not None else None
436
+ db = torch.empty((S, N), dtype=torch.float32, device=bias.device) if bias is not None else None
437
+ rows_per_program = triton.cdiv(M, S)
438
+ programs_per_group = S // G
439
+ grid = (S,)
440
+ layer_norm_bwd_kernel[grid](
441
+ x,
442
+ weight,
443
+ bias,
444
+ y,
445
+ dy,
446
+ dx,
447
+ dw,
448
+ db,
449
+ dresidual,
450
+ dresidual_in,
451
+ mean,
452
+ rstd,
453
+ M,
454
+ N,
455
+ G,
456
+ rows_per_program,
457
+ programs_per_group,
458
+ is_rms_norm,
459
+ BLOCK_N,
460
+ dresidual is not None,
461
+ dresidual_in is not None,
462
+ weight is not None,
463
+ bias is not None,
464
+ )
465
+ dw = dw.view(G, -1, N).sum(1).to(weight).view_as(weight) if weight is not None else None
466
+ db = db.view(G, -1, N).sum(1).to(bias).view_as(bias) if bias is not None else None
467
+ # Don't need to compute dresidual_in separately in this case
468
+ if has_residual and dx.dtype == x.dtype:
469
+ dresidual_in = dx
470
+ return (dx, dw, db, dresidual_in) if not recompute_output else (dx, dw, db, dresidual_in, y)
471
+
472
+
473
+ class LayerNormFunction(torch.autograd.Function):
474
+
475
+ @staticmethod
476
+ @input_guard
477
+ def forward(
478
+ ctx,
479
+ x,
480
+ weight,
481
+ bias,
482
+ residual=None,
483
+ eps=1e-5,
484
+ prenorm=False,
485
+ residual_in_fp32=False,
486
+ is_rms_norm=False,
487
+ num_groups=1
488
+ ):
489
+ x_shape_og = x.shape
490
+
491
+ if x.shape[-1] % num_groups != 0:
492
+ raise ValueError('num_channels must be divisible by num_groups')
493
+ # reshape input data into 2D tensor
494
+ x = x.reshape(-1, (x.shape[-1] // num_groups))
495
+ if residual is not None:
496
+ assert residual.shape == x_shape_og
497
+ residual = residual.reshape_as(x)
498
+ residual_dtype = (
499
+ residual.dtype
500
+ if residual is not None
501
+ else (torch.float32 if residual_in_fp32 else None)
502
+ )
503
+ y, mean, rstd, residual_out = layer_norm_fwd(
504
+ x,
505
+ weight,
506
+ bias,
507
+ eps,
508
+ residual,
509
+ residual_dtype=residual_dtype,
510
+ is_rms_norm=is_rms_norm,
511
+ num_groups=num_groups
512
+ )
513
+ ctx.save_for_backward(residual_out, weight, bias, mean, rstd)
514
+ ctx.x_shape_og = x_shape_og
515
+ ctx.eps = eps
516
+ ctx.is_rms_norm = is_rms_norm
517
+ ctx.num_groups = num_groups
518
+ ctx.has_residual = residual is not None
519
+ ctx.prenorm = prenorm
520
+ ctx.x_dtype = x.dtype
521
+ y = y.reshape(x_shape_og)
522
+ return y if not prenorm else (y, residual_out.reshape(x_shape_og))
523
+
524
+ @staticmethod
525
+ @input_guard
526
+ def backward(ctx, dy, *args):
527
+ x, weight, bias, mean, rstd = ctx.saved_tensors
528
+ dy = dy.reshape(-1, (dy.shape[-1] // ctx.num_groups))
529
+ assert dy.shape == x.shape
530
+ if ctx.prenorm:
531
+ dresidual = args[0]
532
+ dresidual = dresidual.reshape(-1, x.shape[-1])
533
+ assert dresidual.shape == x.shape
534
+ else:
535
+ dresidual = None
536
+ dx, dw, db, dresidual_in = layer_norm_bwd(
537
+ dy,
538
+ x,
539
+ weight,
540
+ bias,
541
+ ctx.eps,
542
+ mean,
543
+ rstd,
544
+ dresidual,
545
+ ctx.has_residual,
546
+ ctx.is_rms_norm,
547
+ x_dtype=ctx.x_dtype,
548
+ num_groups=ctx.num_groups
549
+ )
550
+ return (
551
+ dx.reshape(ctx.x_shape_og),
552
+ dw,
553
+ db,
554
+ dresidual_in.reshape(ctx.x_shape_og) if ctx.has_residual else None,
555
+ None,
556
+ None,
557
+ None,
558
+ None,
559
+ None
560
+ )
561
+
562
+
563
+ def layer_norm(
564
+ x: torch.Tensor,
565
+ weight: torch.Tensor,
566
+ bias: torch.Tensor,
567
+ residual: torch.Tensor = None,
568
+ eps: float = 1e-5,
569
+ prenorm: bool = False,
570
+ residual_in_fp32: bool = False,
571
+ is_rms_norm: bool = False
572
+ ):
573
+ return LayerNormFunction.apply(
574
+ x,
575
+ weight,
576
+ bias,
577
+ residual,
578
+ eps,
579
+ prenorm,
580
+ residual_in_fp32,
581
+ is_rms_norm
582
+ )
583
+
584
+
585
+ def group_norm(
586
+ x: torch.Tensor,
587
+ weight: torch.Tensor,
588
+ bias: torch.Tensor,
589
+ residual: torch.Tensor = None,
590
+ eps: float = 1e-5,
591
+ prenorm: bool = False,
592
+ residual_in_fp32: bool = False,
593
+ is_rms_norm: bool = False,
594
+ num_groups: int = 1
595
+ ):
596
+ return LayerNormFunction.apply(
597
+ x,
598
+ weight,
599
+ bias,
600
+ residual,
601
+ eps,
602
+ prenorm,
603
+ residual_in_fp32,
604
+ is_rms_norm,
605
+ num_groups
606
+ )
607
+
608
+
609
+ def rms_norm(
610
+ x: torch.Tensor,
611
+ weight: torch.Tensor,
612
+ bias: torch.Tensor,
613
+ residual: torch.Tensor = None,
614
+ eps: float = 1e-5,
615
+ prenorm: bool = False,
616
+ residual_in_fp32: bool = False
617
+ ):
618
+ return LayerNormFunction.apply(
619
+ x,
620
+ weight,
621
+ bias,
622
+ residual,
623
+ eps,
624
+ prenorm,
625
+ residual_in_fp32,
626
+ True
627
+ )
628
+
629
+
630
+ def layer_norm_linear(
631
+ x: torch.Tensor,
632
+ norm_weight: torch.Tensor,
633
+ norm_bias: torch.Tensor,
634
+ linear_weight: torch.Tensor,
635
+ linear_bias: torch.Tensor,
636
+ residual: torch.Tensor = None,
637
+ eps: float = 1e-5,
638
+ prenorm: bool = False,
639
+ residual_in_fp32: bool = False,
640
+ is_rms_norm: bool = False,
641
+ num_groups: int = 1
642
+ ):
643
+ return LayerNormLinearFunction.apply(
644
+ x,
645
+ norm_weight,
646
+ norm_bias,
647
+ linear_weight,
648
+ linear_bias,
649
+ residual,
650
+ eps,
651
+ prenorm,
652
+ residual_in_fp32,
653
+ is_rms_norm,
654
+ num_groups
655
+ )
656
+
657
+
658
+ def rms_norm_linear(
659
+ x: torch.Tensor,
660
+ norm_weight: torch.Tensor,
661
+ norm_bias: torch.Tensor,
662
+ linear_weight: torch.Tensor,
663
+ linear_bias: torch.Tensor,
664
+ residual: torch.Tensor = None,
665
+ eps: float = 1e-5,
666
+ prenorm: bool = False,
667
+ residual_in_fp32: bool = False
668
+ ):
669
+ return layer_norm_linear(
670
+ x=x,
671
+ norm_weight=norm_weight,
672
+ norm_bias=norm_bias,
673
+ linear_weight=linear_weight,
674
+ linear_bias=linear_bias,
675
+ residual=residual,
676
+ eps=eps,
677
+ prenorm=prenorm,
678
+ residual_in_fp32=residual_in_fp32,
679
+ is_rms_norm=True
680
+ )
681
+
682
+
683
+ def group_norm_linear(
684
+ x: torch.Tensor,
685
+ norm_weight: torch.Tensor,
686
+ norm_bias: torch.Tensor,
687
+ linear_weight: torch.Tensor,
688
+ linear_bias: torch.Tensor,
689
+ residual: torch.Tensor = None,
690
+ eps: float = 1e-5,
691
+ prenorm: bool = False,
692
+ residual_in_fp32: bool = False,
693
+ is_rms_norm: bool = False,
694
+ num_groups: int = 1
695
+ ):
696
+ return layer_norm_linear(
697
+ x=x,
698
+ norm_weight=norm_weight,
699
+ norm_bias=norm_bias,
700
+ linear_weight=linear_weight,
701
+ linear_bias=linear_bias,
702
+ residual=residual,
703
+ eps=eps,
704
+ prenorm=prenorm,
705
+ residual_in_fp32=residual_in_fp32,
706
+ is_rms_norm=is_rms_norm,
707
+ num_groups=num_groups
708
+ )
709
+
710
+
711
+ class LayerNorm(nn.Module):
712
+
713
+ def __init__(
714
+ self,
715
+ hidden_size: int,
716
+ elementwise_affine: bool = True,
717
+ bias: bool = False,
718
+ eps: float = 1e-5
719
+ ) -> LayerNorm:
720
+ super().__init__()
721
+
722
+ self.hidden_size = hidden_size
723
+ self.elementwise_affine = elementwise_affine
724
+ self.eps = eps
725
+
726
+ self.register_parameter("weight", None)
727
+ self.register_parameter("bias", None)
728
+ if elementwise_affine:
729
+ self.weight = nn.Parameter(torch.empty(hidden_size))
730
+ if bias:
731
+ self.bias = nn.Parameter(torch.empty(hidden_size))
732
+
733
+ self.reset_parameters()
734
+
735
+ def reset_parameters(self):
736
+ if self.elementwise_affine:
737
+ nn.init.ones_(self.weight)
738
+ if self.bias is not None:
739
+ nn.init.zeros_(self.bias)
740
+
741
+ def __repr__(self) -> str:
742
+ s = f"{self.__class__.__name__}({self.hidden_size}"
743
+ if not self.elementwise_affine:
744
+ s += f", elementwise_affine={self.elementwise_affine}"
745
+ s += f", eps={self.eps}"
746
+ s += ")"
747
+ return s
748
+
749
+ def forward(self, x, residual=None, prenorm=False, residual_in_fp32=False):
750
+ return layer_norm(
751
+ x,
752
+ self.weight,
753
+ self.bias,
754
+ residual=residual,
755
+ eps=self.eps,
756
+ prenorm=prenorm,
757
+ residual_in_fp32=residual_in_fp32
758
+ )
759
+
760
+
761
+ class GroupNorm(nn.Module):
762
+
763
+ def __init__(
764
+ self,
765
+ num_groups: int,
766
+ hidden_size: int,
767
+ elementwise_affine: bool = True,
768
+ bias: bool = False,
769
+ eps: float = 1e-5,
770
+ is_rms_norm: bool = False
771
+ ) -> GroupNorm:
772
+ super().__init__()
773
+
774
+ if hidden_size % num_groups != 0:
775
+ raise ValueError('num_channels must be divisible by num_groups')
776
+
777
+ self.num_groups = num_groups
778
+ self.hidden_size = hidden_size
779
+ self.elementwise_affine = elementwise_affine
780
+ self.eps = eps
781
+ self.is_rms_norm = is_rms_norm
782
+
783
+ self.register_parameter("weight", None)
784
+ self.register_parameter("bias", None)
785
+ if elementwise_affine:
786
+ self.weight = nn.Parameter(torch.empty(hidden_size))
787
+ if bias:
788
+ self.bias = nn.Parameter(torch.empty(hidden_size))
789
+
790
+ self.reset_parameters()
791
+
792
+ def reset_parameters(self):
793
+ if self.elementwise_affine:
794
+ nn.init.ones_(self.weight)
795
+ if self.bias is not None:
796
+ nn.init.zeros_(self.bias)
797
+
798
+ def __repr__(self) -> str:
799
+ s = f"{self.__class__.__name__}({self.num_groups}, {self.hidden_size}"
800
+ if not self.elementwise_affine:
801
+ s += f", elementwise_affine={self.elementwise_affine}"
802
+ if self.is_rms_norm:
803
+ s += f", is_rms_norm={self.is_rms_norm}"
804
+ s += f", eps={self.eps}"
805
+ s += ")"
806
+ return s
807
+
808
+ def forward(self, x, residual=None, prenorm=False, residual_in_fp32=False):
809
+ return group_norm(
810
+ x,
811
+ self.weight,
812
+ self.bias,
813
+ residual=residual,
814
+ eps=self.eps,
815
+ prenorm=prenorm,
816
+ residual_in_fp32=residual_in_fp32,
817
+ is_rms_norm=self.is_rms_norm,
818
+ num_groups=self.num_groups
819
+ )
820
+
821
+
822
+ class RMSNorm(nn.Module):
823
+
824
+ def __init__(
825
+ self,
826
+ hidden_size: int,
827
+ elementwise_affine: bool = True,
828
+ bias: bool = False,
829
+ eps: float = 1e-5
830
+ ) -> RMSNorm:
831
+ super().__init__()
832
+
833
+ self.hidden_size = hidden_size
834
+ self.elementwise_affine = elementwise_affine
835
+ self.eps = eps
836
+
837
+ self.register_parameter("weight", None)
838
+ self.register_parameter("bias", None)
839
+ if elementwise_affine:
840
+ self.weight = nn.Parameter(torch.empty(hidden_size))
841
+ if bias:
842
+ self.bias = nn.Parameter(torch.empty(hidden_size))
843
+
844
+ self.reset_parameters()
845
+
846
+ def reset_parameters(self):
847
+ if self.elementwise_affine:
848
+ nn.init.ones_(self.weight)
849
+ if self.bias is not None:
850
+ nn.init.zeros_(self.bias)
851
+
852
+ def __repr__(self) -> str:
853
+ s = f"{self.__class__.__name__}({self.hidden_size}"
854
+ if not self.elementwise_affine:
855
+ s += f", elementwise_affine={self.elementwise_affine}"
856
+ s += f", eps={self.eps}"
857
+ s += ")"
858
+ return s
859
+
860
+ def forward(self, x, residual=None, prenorm=False, residual_in_fp32=False):
861
+ return rms_norm(
862
+ x,
863
+ self.weight,
864
+ self.bias,
865
+ residual=residual,
866
+ eps=self.eps,
867
+ prenorm=prenorm,
868
+ residual_in_fp32=residual_in_fp32,
869
+ )
870
+
871
+
872
+ class LayerNormLinearFunction(torch.autograd.Function):
873
+
874
+ @staticmethod
875
+ @input_guard
876
+ def forward(
877
+ ctx,
878
+ x,
879
+ norm_weight,
880
+ norm_bias,
881
+ linear_weight,
882
+ linear_bias,
883
+ residual=None,
884
+ eps=1e-5,
885
+ prenorm=False,
886
+ residual_in_fp32=False,
887
+ is_rms_norm=False,
888
+ num_groups=1
889
+ ):
890
+ x_shape_og = x.shape
891
+
892
+ if x.shape[-1] % num_groups != 0:
893
+ raise ValueError('num_channels must be divisible by num_groups')
894
+ # reshape input data into 2D tensor
895
+ x = x.reshape(-1, (x.shape[-1] // num_groups))
896
+ if residual is not None:
897
+ assert residual.shape == x_shape_og
898
+ residual = residual.reshape_as(x)
899
+ residual_dtype = (
900
+ residual.dtype
901
+ if residual is not None
902
+ else (torch.float32 if residual_in_fp32 else None)
903
+ )
904
+ y, mean, rstd, residual_out = layer_norm_fwd(
905
+ x,
906
+ norm_weight,
907
+ norm_bias,
908
+ eps,
909
+ residual,
910
+ out_dtype=None if not torch.is_autocast_enabled() else torch.get_autocast_gpu_dtype(),
911
+ residual_dtype=residual_dtype,
912
+ is_rms_norm=is_rms_norm,
913
+ num_groups=num_groups
914
+ )
915
+ y = y.reshape(x_shape_og)
916
+ dtype = torch.get_autocast_gpu_dtype() if torch.is_autocast_enabled() else y.dtype
917
+ linear_weight = linear_weight.to(dtype)
918
+ linear_bias = linear_bias.to(dtype) if linear_bias is not None else None
919
+ out = F.linear(y.to(linear_weight.dtype), linear_weight, linear_bias)
920
+ # We don't store y, will be recomputed in the backward pass to save memory
921
+ ctx.save_for_backward(residual_out, norm_weight, norm_bias, linear_weight, mean, rstd)
922
+ ctx.x_shape_og = x_shape_og
923
+ ctx.eps = eps
924
+ ctx.is_rms_norm = is_rms_norm
925
+ ctx.num_groups = num_groups
926
+ ctx.has_residual = residual is not None
927
+ ctx.prenorm = prenorm
928
+ ctx.x_dtype = x.dtype
929
+ ctx.linear_bias_is_none = linear_bias is None
930
+ return out if not prenorm else (out, residual_out.reshape(x_shape_og))
931
+
932
+ @staticmethod
933
+ @input_guard
934
+ def backward(ctx, dout, *args):
935
+ x, norm_weight, norm_bias, linear_weight, mean, rstd = ctx.saved_tensors
936
+ dout = dout.reshape(-1, dout.shape[-1])
937
+ dy = F.linear(dout, linear_weight.t())
938
+ dy = dy.reshape(-1, (dy.shape[-1] // ctx.num_groups))
939
+ dlinear_bias = None if ctx.linear_bias_is_none else dout.sum(0)
940
+ assert dy.shape == x.shape
941
+ if ctx.prenorm:
942
+ dresidual = args[0]
943
+ dresidual = dresidual.reshape(-1, x.shape[-1])
944
+ assert dresidual.shape == x.shape
945
+ else:
946
+ dresidual = None
947
+ dx, dnorm_weight, dnorm_bias, dresidual_in, y = layer_norm_bwd(
948
+ dy,
949
+ x,
950
+ norm_weight,
951
+ norm_bias,
952
+ ctx.eps,
953
+ mean,
954
+ rstd,
955
+ dresidual,
956
+ ctx.has_residual,
957
+ ctx.is_rms_norm,
958
+ x_dtype=ctx.x_dtype,
959
+ recompute_output=True,
960
+ num_groups=ctx.num_groups
961
+ )
962
+ dlinear_weight = torch.einsum("bo,bi->oi", dout, y.view(-1, linear_weight.shape[-1]))
963
+ return (
964
+ dx.reshape(ctx.x_shape_og),
965
+ dnorm_weight,
966
+ dnorm_bias,
967
+ dlinear_weight,
968
+ dlinear_bias,
969
+ dresidual_in.reshape(ctx.x_shape_og) if ctx.has_residual else None,
970
+ None,
971
+ None,
972
+ None,
973
+ None,
974
+ None
975
+ )
976
+
977
+
978
+ class LayerNormLinear(nn.Module):
979
+
980
+ def __init__(
981
+ self,
982
+ hidden_size,
983
+ elementwise_affine: bool = True,
984
+ bias: bool = False,
985
+ eps: float = 1e-5
986
+ ) -> LayerNormLinear:
987
+ super().__init__()
988
+
989
+ self.hidden_size = hidden_size
990
+ self.elementwise_affine = elementwise_affine
991
+ self.eps = eps
992
+
993
+ self.register_parameter("weight", None)
994
+ self.register_parameter("bias", None)
995
+ if elementwise_affine:
996
+ self.weight = nn.Parameter(torch.empty(hidden_size))
997
+ if bias:
998
+ self.bias = nn.Parameter(torch.empty(hidden_size))
999
+
1000
+ self.reset_parameters()
1001
+
1002
+ def reset_parameters(self):
1003
+ if self.elementwise_affine:
1004
+ nn.init.ones_(self.weight)
1005
+ if self.bias is not None:
1006
+ nn.init.zeros_(self.bias)
1007
+
1008
+ def __repr__(self) -> str:
1009
+ s = f"{self.__class__.__name__}({self.hidden_size}"
1010
+ if not self.elementwise_affine:
1011
+ s += f", elementwise_affine={self.elementwise_affine}"
1012
+ s += f", eps={self.eps}"
1013
+ s += ")"
1014
+ return s
1015
+
1016
+ def forward(self, x, weight, bias, residual=None, prenorm=False, residual_in_fp32=False):
1017
+ return layer_norm_linear(
1018
+ x=x,
1019
+ norm_weight=self.weight,
1020
+ norm_bias=self.bias,
1021
+ linear_weight=weight,
1022
+ linear_bias=bias,
1023
+ residual=residual,
1024
+ eps=self.eps,
1025
+ prenorm=prenorm,
1026
+ residual_in_fp32=residual_in_fp32,
1027
+ is_rms_norm=False
1028
+ )
1029
+
1030
+
1031
+ class GroupNormLinear(nn.Module):
1032
+
1033
+ def __init__(
1034
+ self,
1035
+ num_groups: int,
1036
+ hidden_size: int,
1037
+ elementwise_affine: bool = True,
1038
+ bias: bool = False,
1039
+ eps: float = 1e-5,
1040
+ is_rms_norm: bool = False
1041
+ ) -> GroupNormLinear:
1042
+ super().__init__()
1043
+
1044
+ if hidden_size % num_groups != 0:
1045
+ raise ValueError('num_channels must be divisible by num_groups')
1046
+
1047
+ self.num_groups = num_groups
1048
+ self.hidden_size = hidden_size
1049
+ self.elementwise_affine = elementwise_affine
1050
+ self.eps = eps
1051
+ self.is_rms_norm = is_rms_norm
1052
+
1053
+ self.register_parameter("weight", None)
1054
+ self.register_parameter("bias", None)
1055
+ if elementwise_affine:
1056
+ self.weight = nn.Parameter(torch.empty(hidden_size))
1057
+ if bias:
1058
+ self.bias = nn.Parameter(torch.empty(hidden_size))
1059
+
1060
+ self.reset_parameters()
1061
+
1062
+ def reset_parameters(self):
1063
+ if self.elementwise_affine:
1064
+ nn.init.ones_(self.weight)
1065
+ if self.bias is not None:
1066
+ nn.init.zeros_(self.bias)
1067
+
1068
+ def __repr__(self) -> str:
1069
+ s = f"{self.__class__.__name__}({self.num_groups}, {self.hidden_size}"
1070
+ if not self.elementwise_affine:
1071
+ s += f", elementwise_affine={self.elementwise_affine}"
1072
+ if self.is_rms_norm:
1073
+ s += f", is_rms_norm={self.is_rms_norm}"
1074
+ s += f", eps={self.eps}"
1075
+ s += ")"
1076
+ return s
1077
+
1078
+ def forward(self, x, weight, bias, residual=None, prenorm=False, residual_in_fp32=False):
1079
+ return layer_norm_linear(
1080
+ x=x,
1081
+ norm_weight=self.weight,
1082
+ norm_bias=self.bias,
1083
+ linear_weight=weight,
1084
+ linear_bias=bias,
1085
+ residual=residual,
1086
+ eps=self.eps,
1087
+ prenorm=prenorm,
1088
+ residual_in_fp32=residual_in_fp32,
1089
+ is_rms_norm=self.is_rms_norm,
1090
+ num_groups=self.num_groups
1091
+ )
1092
+
1093
+
1094
+ class RMSNormLinear(nn.Module):
1095
+
1096
+ def __init__(
1097
+ self,
1098
+ hidden_size,
1099
+ elementwise_affine: bool = True,
1100
+ bias: bool = False,
1101
+ eps: float = 1e-5
1102
+ ) -> RMSNormLinear:
1103
+ super().__init__()
1104
+
1105
+ self.hidden_size = hidden_size
1106
+ self.elementwise_affine = elementwise_affine
1107
+ self.eps = eps
1108
+
1109
+ self.register_parameter("weight", None)
1110
+ self.register_parameter("bias", None)
1111
+ if elementwise_affine:
1112
+ self.weight = nn.Parameter(torch.empty(hidden_size))
1113
+ if bias:
1114
+ self.bias = nn.Parameter(torch.empty(hidden_size))
1115
+
1116
+ self.reset_parameters()
1117
+
1118
+ def reset_parameters(self):
1119
+ if self.elementwise_affine:
1120
+ nn.init.ones_(self.weight)
1121
+ if self.bias is not None:
1122
+ nn.init.zeros_(self.bias)
1123
+
1124
+ def __repr__(self) -> str:
1125
+ s = f"{self.__class__.__name__}({self.hidden_size}"
1126
+ if not self.elementwise_affine:
1127
+ s += f", elementwise_affine={self.elementwise_affine}"
1128
+ s += f", eps={self.eps}"
1129
+ s += ")"
1130
+ return s
1131
+
1132
+ def forward(self, x, weight, bias, residual=None, prenorm=False, residual_in_fp32=False):
1133
+ return layer_norm_linear(
1134
+ x=x,
1135
+ norm_weight=self.weight,
1136
+ norm_bias=self.bias,
1137
+ linear_weight=weight,
1138
+ linear_bias=bias,
1139
+ residual=residual,
1140
+ eps=self.eps,
1141
+ prenorm=prenorm,
1142
+ residual_in_fp32=residual_in_fp32,
1143
+ is_rms_norm=True
1144
+ )
1145
+
1146
+
1147
+ class NormParallel(ParallelStyle):
1148
+
1149
+ def __init__(self, *, sequence_dim: int = 1, use_local_output: bool = False):
1150
+ super().__init__()
1151
+ self.sequence_sharding = (Shard(sequence_dim),)
1152
+ self.use_local_output = use_local_output
1153
+
1154
+ def _replicate_module_fn(
1155
+ self, name: str, module: nn.Module, device_mesh: DeviceMesh
1156
+ ):
1157
+ for p_name, param in module.named_parameters():
1158
+ # simple replication with fixed ones_ init from LayerNorm/RMSNorm, which allow
1159
+ # us to simply just use from_local
1160
+ replicated_param = torch.nn.Parameter(
1161
+ DTensor.from_local(param, device_mesh, [Replicate()], run_check=False)
1162
+ )
1163
+ module.register_parameter(p_name, replicated_param)
1164
+
1165
+ @staticmethod
1166
+ def _prepare_input_fn(sequence_sharding, mod, inputs, device_mesh):
1167
+ input_tensor = inputs[0]
1168
+ if isinstance(input_tensor, DTensor):
1169
+ # if the passed in input DTensor is not sharded on the sequence dim, we need to redistribute it
1170
+ if input_tensor.placements != sequence_sharding:
1171
+ input_tensor = input_tensor.redistribute(
1172
+ placements=sequence_sharding, async_op=True
1173
+ )
1174
+ return input_tensor
1175
+ elif isinstance(input_tensor, torch.Tensor):
1176
+ # assume the input passed in already sharded on the sequence dim and create the DTensor
1177
+ return DTensor.from_local(
1178
+ input_tensor, device_mesh, sequence_sharding, run_check=False
1179
+ )
1180
+ else:
1181
+ raise ValueError(
1182
+ f"expecting input of {mod} to be a torch.Tensor or DTensor, but got {input_tensor}"
1183
+ )
1184
+
1185
+ @staticmethod
1186
+ def _prepare_output_fn(use_local_output, mod, outputs, device_mesh):
1187
+ return outputs.to_local() if use_local_output else outputs
1188
+
1189
+ def _apply(self, module: nn.Module, device_mesh: DeviceMesh) -> nn.Module:
1190
+ return distribute_module(
1191
+ module,
1192
+ device_mesh,
1193
+ self._replicate_module_fn,
1194
+ partial(self._prepare_input_fn, self.sequence_sharding),
1195
+ partial(self._prepare_output_fn, self.use_local_output),
1196
+ )
fla/modules/layernorm_gated.py ADDED
@@ -0,0 +1,528 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2024, Tri Dao.
2
+ # Based on the Triton LayerNorm tutorial: https://triton-lang.org/main/getting-started/tutorials/05-layer-norm.html
3
+ # For the backward pass, we keep weight_grad and bias_grad in registers and accumulate.
4
+ # This backward pass is faster for dimensions up to 8k, but after that it's much slower due to register spilling.
5
+ # The models we train have hidden dim up to 8k anyway (e.g. Llama 70B), so this is fine.
6
+
7
+ import math
8
+ from typing import Optional
9
+
10
+ import torch
11
+ import torch.nn as nn
12
+ import torch.nn.functional as F
13
+ import triton
14
+ import triton.language as tl
15
+ from einops import rearrange
16
+
17
+ from fla.utils import get_multiprocessor_count, input_guard
18
+
19
+
20
+ def rms_norm_ref(x, weight, bias, z=None, eps=1e-6, group_size=None, norm_before_gate=True, upcast=True):
21
+ dtype = x.dtype
22
+ weight = weight.float()
23
+ bias = bias.float() if bias is not None else None
24
+ if upcast:
25
+ x = x.float()
26
+ z = z.float() if z is not None else z
27
+ if z is not None and not norm_before_gate:
28
+ x = x * F.silu(z)
29
+ if group_size is None:
30
+ rstd = 1 / torch.sqrt((x.square()).mean(dim=-1, keepdim=True) + eps)
31
+ out = (x * rstd * weight) + bias if bias is not None else (x * rstd * weight)
32
+ else:
33
+ x_group = rearrange(x, "... (g d) -> ... g d", d=group_size)
34
+ rstd = 1 / torch.sqrt((x_group.square()).mean(dim=-1, keepdim=True) + eps)
35
+ out = rearrange(x_group * rstd, "... g d -> ... (g d)") * weight
36
+ if bias is not None:
37
+ out = out + bias
38
+ if z is not None and norm_before_gate:
39
+ out *= F.silu(z)
40
+ return out.to(dtype)
41
+
42
+
43
+ @triton.heuristics({
44
+ "HAS_BIAS": lambda args: args["B"] is not None,
45
+ "HAS_Z": lambda args: args["Z"] is not None,
46
+ })
47
+ @triton.jit
48
+ def layer_norm_fwd_kernel(
49
+ X, # pointer to the input
50
+ Y, # pointer to the output
51
+ W, # pointer to the weights
52
+ B, # pointer to the biases
53
+ Z, # pointer to the other branch
54
+ Mean, # pointer to the mean
55
+ Rstd, # pointer to the 1/std
56
+ stride_x_row, # how much to increase the pointer when moving by 1 row
57
+ stride_y_row,
58
+ stride_z_row,
59
+ M, # number of rows in X
60
+ N, # number of columns in X
61
+ eps, # epsilon to avoid division by zero
62
+ BLOCK_N: tl.constexpr,
63
+ HAS_BIAS: tl.constexpr,
64
+ HAS_Z: tl.constexpr,
65
+ NORM_BEFORE_GATE: tl.constexpr,
66
+ IS_RMS_NORM: tl.constexpr,
67
+ ):
68
+ # Map the program id to the row of X and Y it should compute.
69
+ row = tl.program_id(0)
70
+ group = tl.program_id(1)
71
+ X += row * stride_x_row + group * N
72
+ Y += row * stride_y_row + group * N
73
+ if HAS_Z:
74
+ Z += row * stride_z_row + group * N
75
+ if not IS_RMS_NORM:
76
+ Mean += group * M
77
+ Rstd += group * M
78
+ W += group * N
79
+ if HAS_BIAS:
80
+ B += group * N
81
+ # Compute mean and variance
82
+ cols = tl.arange(0, BLOCK_N)
83
+ x = tl.load(X + cols, mask=cols < N, other=0.).to(tl.float32)
84
+ if HAS_Z and not NORM_BEFORE_GATE:
85
+ z = tl.load(Z + cols, mask=cols < N).to(tl.float32)
86
+ x *= z * tl.sigmoid(z)
87
+ if not IS_RMS_NORM:
88
+ mean = tl.sum(x, axis=0) / N
89
+ tl.store(Mean + row, mean)
90
+ xbar = tl.where(cols < N, x - mean, 0.)
91
+ var = tl.sum(xbar * xbar, axis=0) / N
92
+ else:
93
+ xbar = tl.where(cols < N, x, 0.)
94
+ var = tl.sum(xbar * xbar, axis=0) / N
95
+ rstd = 1 / tl.sqrt(var + eps)
96
+ tl.store(Rstd + row, rstd)
97
+ # Normalize and apply linear transformation
98
+ mask = cols < N
99
+ w = tl.load(W + cols, mask=mask).to(tl.float32)
100
+ if HAS_BIAS:
101
+ b = tl.load(B + cols, mask=mask).to(tl.float32)
102
+ x_hat = (x - mean) * rstd if not IS_RMS_NORM else x * rstd
103
+ y = x_hat * w + b if HAS_BIAS else x_hat * w
104
+ if HAS_Z and NORM_BEFORE_GATE:
105
+ z = tl.load(Z + cols, mask=mask).to(tl.float32)
106
+ y *= z * tl.sigmoid(z)
107
+ # Write output
108
+ tl.store(Y + cols, y, mask=mask)
109
+
110
+
111
+ def layer_norm_fwd(
112
+ x: torch.Tensor,
113
+ weight: torch.Tensor,
114
+ bias: torch.Tensor,
115
+ eps: float,
116
+ z: torch.Tensor = None,
117
+ out: torch.Tensor = None,
118
+ group_size: int = None,
119
+ norm_before_gate: bool = True,
120
+ is_rms_norm: bool = False,
121
+ ):
122
+ M, N = x.shape
123
+ if group_size is None:
124
+ group_size = N
125
+ assert N % group_size == 0
126
+ ngroups = N // group_size
127
+ assert x.stride(-1) == 1
128
+ if z is not None:
129
+ assert z.stride(-1) == 1
130
+ assert z.shape == (M, N)
131
+ assert weight.shape == (N,)
132
+ assert weight.stride(-1) == 1
133
+ if bias is not None:
134
+ assert bias.stride(-1) == 1
135
+ assert bias.shape == (N,)
136
+ # allocate output
137
+ if out is not None:
138
+ assert out.shape == x.shape
139
+ else:
140
+ out = torch.empty_like(x)
141
+ assert out.stride(-1) == 1
142
+ mean = torch.empty((ngroups * M, ), dtype=torch.float32, device=x.device) if not is_rms_norm else None
143
+ rstd = torch.empty((ngroups * M, ), dtype=torch.float32, device=x.device)
144
+ # Less than 64KB per feature: enqueue fused kernel
145
+ MAX_FUSED_SIZE = 65536 // x.element_size()
146
+ BLOCK_N = min(MAX_FUSED_SIZE, triton.next_power_of_2(group_size))
147
+ if group_size > BLOCK_N:
148
+ raise RuntimeError("This layer norm doesn't support feature dim >= 64KB.")
149
+ # heuristics for number of warps
150
+ num_warps = min(max(BLOCK_N // 256, 1), 8)
151
+ grid = (M, ngroups)
152
+ layer_norm_fwd_kernel[grid](
153
+ x,
154
+ out,
155
+ weight,
156
+ bias,
157
+ z,
158
+ mean,
159
+ rstd,
160
+ x.stride(0),
161
+ out.stride(0),
162
+ z.stride(0) if z is not None else 0,
163
+ M,
164
+ group_size,
165
+ eps,
166
+ BLOCK_N=BLOCK_N,
167
+ NORM_BEFORE_GATE=norm_before_gate,
168
+ IS_RMS_NORM=is_rms_norm,
169
+ num_warps=num_warps
170
+ )
171
+ return out, mean, rstd
172
+
173
+
174
+ @triton.heuristics({
175
+ "HAS_BIAS": lambda args: args["B"] is not None,
176
+ "HAS_Z": lambda args: args["Z"] is not None,
177
+ "RECOMPUTE_OUTPUT": lambda args: args["Y"] is not None,
178
+ })
179
+ @triton.jit
180
+ def layer_norm_bwd_kernel(
181
+ X, # pointer to the input
182
+ W, # pointer to the weights
183
+ B, # pointer to the biases
184
+ Z, # pointer to the other branch
185
+ Y, # pointer to the output to be recomputed
186
+ DY, # pointer to the output gradient
187
+ DX, # pointer to the input gradient
188
+ DW, # pointer to the partial sum of weights gradient
189
+ DB, # pointer to the partial sum of biases gradient
190
+ DZ, # pointer to the other branch
191
+ Mean, # pointer to the mean
192
+ Rstd, # pointer to the 1/std
193
+ stride_x_row, # how much to increase the pointer when moving by 1 row
194
+ stride_z_row,
195
+ stride_y_row,
196
+ stride_dy_row,
197
+ stride_dx_row,
198
+ stride_dz_row,
199
+ stride_dw_row,
200
+ stride_db_row,
201
+ M, # number of rows in X
202
+ N, # number of columns in X
203
+ eps, # epsilon to avoid division by zero
204
+ rows_per_program,
205
+ NORM_BEFORE_GATE: tl.constexpr,
206
+ IS_RMS_NORM: tl.constexpr,
207
+ HAS_BIAS: tl.constexpr,
208
+ HAS_Z: tl.constexpr,
209
+ RECOMPUTE_OUTPUT: tl.constexpr,
210
+ BLOCK_N: tl.constexpr,
211
+ ):
212
+ # Map the program id to the elements of X, DX, and DY it should compute.
213
+ row_block_id = tl.program_id(0)
214
+ group = tl.program_id(1)
215
+ row_start = row_block_id * rows_per_program
216
+ cols = tl.arange(0, BLOCK_N)
217
+ mask = cols < N
218
+ X += row_start * stride_x_row + group * N
219
+ if HAS_Z:
220
+ Z += row_start * stride_z_row + group * N
221
+ DZ += row_start * stride_dz_row + group * N
222
+ DY += row_start * stride_dy_row + group * N
223
+ DX += row_start * stride_dx_row + group * N
224
+ if RECOMPUTE_OUTPUT:
225
+ Y += row_start * stride_y_row + group * N
226
+ if not IS_RMS_NORM:
227
+ Mean += group * M
228
+ Rstd += group * M
229
+ W += group * N
230
+ w = tl.load(W + cols, mask=mask).to(tl.float32)
231
+ if (RECOMPUTE_OUTPUT or HAS_Z) and HAS_BIAS:
232
+ B += group * N
233
+ b = tl.load(B + cols, mask=mask, other=0.).to(tl.float32)
234
+ dw = tl.zeros((BLOCK_N,), dtype=tl.float32)
235
+ if HAS_BIAS:
236
+ db = tl.zeros((BLOCK_N,), dtype=tl.float32)
237
+ row_end = min((row_block_id + 1) * rows_per_program, M)
238
+ for row in range(row_start, row_end):
239
+ # Load data to SRAM
240
+ x = tl.load(X + cols, mask=mask, other=0).to(tl.float32)
241
+ dy = tl.load(DY + cols, mask=mask, other=0).to(tl.float32)
242
+ if not IS_RMS_NORM:
243
+ mean = tl.load(Mean + row)
244
+ if HAS_Z and not NORM_BEFORE_GATE:
245
+ z = tl.load(Z + cols, mask=mask, other=0.).to(tl.float32)
246
+ x_og = x
247
+ x = x_og * z * tl.sigmoid(z)
248
+ rstd = tl.load(Rstd + row)
249
+ # Compute dx
250
+ xhat = (x - mean) * rstd if not IS_RMS_NORM else x * rstd
251
+ xhat = tl.where(mask, xhat, 0.)
252
+ if HAS_Z and NORM_BEFORE_GATE:
253
+ z = tl.load(Z + cols, mask=mask, other=0.).to(tl.float32)
254
+ z_sigmoid = tl.sigmoid(z)
255
+ y = xhat * w + b if HAS_BIAS else xhat * w
256
+ if RECOMPUTE_OUTPUT:
257
+ tl.store(Y + cols, y * z * z_sigmoid, mask=mask)
258
+ dz = dy * y * z_sigmoid * (1 + z * (1 - z_sigmoid))
259
+ tl.store(DZ + cols, dz, mask=mask)
260
+ dy *= z * z_sigmoid
261
+ else:
262
+ if RECOMPUTE_OUTPUT:
263
+ y = xhat * w + b if HAS_BIAS else xhat * w
264
+ tl.store(Y + cols, y, mask=mask)
265
+ wdy = w * dy
266
+ c1 = tl.sum(xhat * wdy, axis=0) / N
267
+ if not IS_RMS_NORM:
268
+ c2 = tl.sum(wdy, axis=0) / N
269
+ dx = (wdy - (xhat * c1 + c2)) * rstd
270
+ else:
271
+ dx = (wdy - xhat * c1) * rstd
272
+ dw += dy * xhat
273
+ if HAS_BIAS:
274
+ db += dy
275
+ if HAS_Z and not NORM_BEFORE_GATE:
276
+ z_sigmoid = tl.sigmoid(z)
277
+ dz = dx * x_og * z_sigmoid * (1 + z * (1 - z_sigmoid))
278
+ tl.store(DZ + cols, dz, mask=mask)
279
+ dx *= z * z_sigmoid
280
+ # Write dx
281
+ tl.store(DX + cols, dx, mask=mask)
282
+
283
+ X += stride_x_row
284
+ if HAS_Z:
285
+ Z += stride_z_row
286
+ DZ += stride_dz_row
287
+ if RECOMPUTE_OUTPUT:
288
+ Y += stride_y_row
289
+ DY += stride_dy_row
290
+ DX += stride_dx_row
291
+ tl.store(DW + row_block_id * stride_dw_row + group * N + cols, dw, mask=mask)
292
+ if HAS_BIAS:
293
+ tl.store(DB + row_block_id * stride_db_row + group * N + cols, db, mask=mask)
294
+
295
+
296
+ def layer_norm_bwd(
297
+ dy: torch.Tensor,
298
+ x: torch.Tensor,
299
+ weight: torch.Tensor,
300
+ bias: torch.Tensor,
301
+ eps: float,
302
+ mean: torch.Tensor,
303
+ rstd: torch.Tensor,
304
+ z: torch.Tensor = None,
305
+ group_size: int = None,
306
+ norm_before_gate: bool = True,
307
+ is_rms_norm: bool = False,
308
+ recompute_output: bool = False,
309
+ dz: torch.Tensor = None,
310
+ out: torch.Tensor = None,
311
+ ):
312
+ M, N = x.shape
313
+ if group_size is None:
314
+ group_size = N
315
+ assert N % group_size == 0
316
+ ngroups = N // group_size
317
+ assert x.stride(-1) == 1
318
+ assert dy.stride(-1) == 1
319
+ assert dy.shape == (M, N)
320
+ if z is not None:
321
+ assert z.stride(-1) == 1
322
+ assert z.shape == (M, N)
323
+ assert weight.shape == (N,)
324
+ assert weight.stride(-1) == 1
325
+ if bias is not None:
326
+ assert bias.stride(-1) == 1
327
+ assert bias.shape == (N,)
328
+ # allocate output
329
+ dx = torch.empty_like(x)
330
+ if dz is not None:
331
+ assert z is not None
332
+ assert dz.shape == z.shape
333
+ assert dz.stride(-1) == 1
334
+ else:
335
+ dz = torch.empty_like(z) if z is not None else None
336
+ if recompute_output:
337
+ if out is None:
338
+ out = torch.empty_like(x)
339
+ assert out.shape == x.shape
340
+
341
+ # Less than 64KB per feature: enqueue fused kernel
342
+ MAX_FUSED_SIZE = 65536 // x.element_size()
343
+ BLOCK_N = min(MAX_FUSED_SIZE, triton.next_power_of_2(group_size))
344
+ if group_size > BLOCK_N:
345
+ raise RuntimeError("This layer norm doesn't support feature dim >= 64KB.")
346
+ # heuristics for number of warps
347
+ num_warps = min(max(BLOCK_N // 256, 1), 8)
348
+ sm_count = get_multiprocessor_count(x.device.index)
349
+ # If group size is small (e.g., 64), we're only using 1 warp. So having just 108 programs
350
+ # would limit the occupancy.
351
+ nrow_groups = math.ceil(sm_count * math.ceil(4 / num_warps) / ngroups)
352
+ _dw = torch.empty((nrow_groups, N), dtype=torch.float32, device=weight.device)
353
+ _db = torch.empty((nrow_groups, N), dtype=torch.float32, device=bias.device) if bias is not None else None
354
+ rows_per_program = math.ceil(M / nrow_groups)
355
+ grid = (nrow_groups, ngroups)
356
+ layer_norm_bwd_kernel[grid](
357
+ x,
358
+ weight,
359
+ bias,
360
+ z,
361
+ out if recompute_output else None,
362
+ dy,
363
+ dx,
364
+ _dw,
365
+ _db,
366
+ dz,
367
+ mean,
368
+ rstd,
369
+ x.stride(0),
370
+ z.stride(0) if z is not None else 0,
371
+ 0 if not recompute_output else out.stride(0),
372
+ dy.stride(0),
373
+ dx.stride(0),
374
+ dz.stride(0) if dz is not None else 0,
375
+ _dw.stride(0),
376
+ _db.stride(0) if _db is not None else 0,
377
+ M, group_size, eps,
378
+ rows_per_program,
379
+ BLOCK_N=BLOCK_N,
380
+ NORM_BEFORE_GATE=norm_before_gate,
381
+ IS_RMS_NORM=is_rms_norm,
382
+ num_warps=num_warps
383
+ )
384
+ dw = _dw.sum(0).to(weight.dtype)
385
+ db = _db.sum(0).to(bias.dtype) if bias is not None else None
386
+ return (dx, dw, db, dz) if not recompute_output else (dx, dw, db, dz, out)
387
+
388
+
389
+ class LayerNormFn(torch.autograd.Function):
390
+
391
+ @input_guard
392
+ @staticmethod
393
+ def forward(ctx, x, weight, bias, z=None, eps=1e-6, group_size=None, norm_before_gate=True,
394
+ is_rms_norm=False):
395
+ """If z is not None, we do norm(x) * silu(z) if norm_before_gate, else norm(x * silu(z))
396
+ """
397
+
398
+ x_shape_og = x.shape
399
+ # reshape input data into 2D tensor
400
+ x = x.reshape(-1, x.shape[-1])
401
+ if x.stride(-1) != 1:
402
+ x = x.contiguous()
403
+ if z is not None:
404
+ assert z.shape == x_shape_og
405
+ z = z.reshape(-1, z.shape[-1])
406
+ if z.stride(-1) != 1:
407
+ z = z.contiguous()
408
+ weight = weight.contiguous()
409
+ if bias is not None:
410
+ bias = bias.contiguous()
411
+ y, mean, rstd = layer_norm_fwd(
412
+ x,
413
+ weight,
414
+ bias,
415
+ eps,
416
+ z=z,
417
+ group_size=group_size,
418
+ norm_before_gate=norm_before_gate,
419
+ is_rms_norm=is_rms_norm,
420
+ )
421
+ ctx.save_for_backward(x, weight, bias, mean, rstd, z)
422
+ ctx.x_shape_og = x_shape_og
423
+ ctx.eps = eps
424
+ ctx.group_size = group_size
425
+ ctx.norm_before_gate = norm_before_gate
426
+ ctx.is_rms_norm = is_rms_norm
427
+ return y.reshape(x_shape_og)
428
+
429
+ @input_guard
430
+ @staticmethod
431
+ def backward(ctx, dy):
432
+ x, weight, bias, mean, rstd, z = ctx.saved_tensors
433
+ dy = dy.reshape(-1, dy.shape[-1])
434
+ if dy.stride(-1) != 1:
435
+ dy = dy.contiguous()
436
+ assert dy.shape == x.shape
437
+ dx, dw, db, dz = layer_norm_bwd(
438
+ dy,
439
+ x,
440
+ weight,
441
+ bias,
442
+ ctx.eps,
443
+ mean,
444
+ rstd,
445
+ z,
446
+ ctx.group_size,
447
+ ctx.norm_before_gate,
448
+ ctx.is_rms_norm
449
+ )
450
+ dx = dx.reshape(ctx.x_shape_og)
451
+ dz = dz.reshape(ctx.x_shape_og) if dz is not None else None
452
+ return dx, dw, db, dz, None, None, None, None
453
+
454
+
455
+ def layernorm_fn(x, weight, bias, z=None, eps=1e-6, group_size=None, norm_before_gate=True, is_rms_norm=False):
456
+ return LayerNormFn.apply(x, weight, bias, z, eps, group_size, norm_before_gate, is_rms_norm)
457
+
458
+
459
+ def rmsnorm_fn(x, weight, bias, z=None, eps=1e-6, group_size=None, norm_before_gate=True):
460
+ return LayerNormFn.apply(x, weight, bias, z, eps, group_size, norm_before_gate, True)
461
+
462
+
463
+ class LayerNormGated(nn.Module):
464
+
465
+ def __init__(
466
+ self,
467
+ hidden_size,
468
+ eps: float = 1e-5,
469
+ group_size: Optional[int] = None,
470
+ norm_before_gate: bool = True,
471
+ device: Optional[torch.device] = None,
472
+ dtype: Optional[torch.dtype] = None,
473
+ ):
474
+ """If group_size is not None, we do GroupNorm with each group having group_size elements.
475
+ group_size=None is equivalent to group_size=hidden_size (i.e. there's only 1 group).
476
+ """
477
+
478
+ factory_kwargs = {"device": device, "dtype": dtype}
479
+ super().__init__()
480
+ self.eps = eps
481
+ self.weight = nn.Parameter(torch.empty(hidden_size, **factory_kwargs))
482
+ self.bias = nn.Parameter(torch.empty(hidden_size, **factory_kwargs))
483
+ self.group_size = group_size
484
+ self.norm_before_gate = norm_before_gate
485
+ self.reset_parameters()
486
+
487
+ def reset_parameters(self):
488
+ torch.nn.init.ones_(self.weight)
489
+ torch.nn.init.zeros_(self.bias)
490
+
491
+ def forward(self, x, z=None):
492
+ """If z is not None, we do norm(x) * silu(z) if norm_before_gate, else norm(x * silu(z))
493
+ """
494
+ return layernorm_fn(x, self.weight, self.bias, z=z, group_size=self.group_size, eps=self.eps,
495
+ norm_before_gate=self.norm_before_gate)
496
+
497
+
498
+ class RMSNormGated(nn.Module):
499
+
500
+ def __init__(
501
+ self,
502
+ hidden_size,
503
+ eps: float = 1e-5,
504
+ group_size: Optional[int] = None,
505
+ norm_before_gate: bool = False,
506
+ device: Optional[torch.device] = None,
507
+ dtype: Optional[torch.dtype] = None,
508
+ ):
509
+ """If group_size is not None, we do GroupNorm with each group having group_size elements.
510
+ group_size=None is equivalent to group_size=hidden_size (i.e. there's only 1 group).
511
+ """
512
+ factory_kwargs = {"device": device, "dtype": dtype}
513
+ super().__init__()
514
+ self.eps = eps
515
+ self.weight = nn.Parameter(torch.empty(hidden_size, **factory_kwargs))
516
+ self.register_parameter("bias", None)
517
+ self.group_size = group_size
518
+ self.norm_before_gate = norm_before_gate
519
+ self.reset_parameters()
520
+
521
+ def reset_parameters(self):
522
+ torch.nn.init.ones_(self.weight)
523
+
524
+ def forward(self, x, z=None):
525
+ """If z is not None, we do norm(x) * silu(z) if norm_before_gate, else norm(x * silu(z))
526
+ """
527
+ return rmsnorm_fn(x, self.weight, self.bias, z=z, eps=self.eps, group_size=self.group_size,
528
+ norm_before_gate=self.norm_before_gate)
fla/modules/mlp.py ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright (c) 2023-2025, Songlin Yang, Yu Zhang
3
+
4
+ from __future__ import annotations
5
+
6
+ from functools import partial
7
+ from typing import TYPE_CHECKING, Any, Optional
8
+
9
+ import torch
10
+ import torch.nn as nn
11
+ from torch.distributed import DeviceMesh
12
+ from torch.distributed.tensor import DTensor, Placement, Replicate, Shard, distribute_module
13
+ from torch.distributed.tensor.parallel import ParallelStyle
14
+
15
+ from fla.modules.activations import swiglu, swiglu_linear
16
+
17
+ if TYPE_CHECKING:
18
+ from transformers.processing_utils import Unpack
19
+
20
+
21
+ class GatedMLP(nn.Module):
22
+
23
+ def __init__(
24
+ self,
25
+ hidden_size: int,
26
+ hidden_ratio: Optional[int] = None,
27
+ intermediate_size: Optional[int] = None,
28
+ hidden_act: str = 'swish',
29
+ fuse_swiglu: bool = True
30
+ ) -> GatedMLP:
31
+ super().__init__()
32
+
33
+ self.hidden_size = hidden_size
34
+ # the final number of params is `hidden_ratio * hidden_size^2`
35
+ # `intermediate_size` is chosen to be a multiple of 256 closest to `2/3 * hidden_size * hidden_ratio`
36
+ if hidden_ratio is None:
37
+ hidden_ratio = 4
38
+ if intermediate_size is None:
39
+ intermediate_size = int(hidden_size * hidden_ratio * 2 / 3)
40
+ intermediate_size = 256 * ((intermediate_size + 256 - 1) // 256)
41
+ self.hidden_ratio = hidden_ratio
42
+ self.intermediate_size = intermediate_size
43
+ self.hidden_act = hidden_act
44
+ self.fuse_swiglu = fuse_swiglu
45
+
46
+ if hidden_act != 'swish':
47
+ raise ValueError(f'Unsupported hidden_act: {hidden_act}')
48
+
49
+ self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
50
+ self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
51
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
52
+ if self.fuse_swiglu:
53
+ self.swiglu_linear = SwiGLULinear()
54
+
55
+ def forward(
56
+ self,
57
+ x: torch.Tensor,
58
+ **kwargs: Unpack[Any]
59
+ ) -> torch.Tensor:
60
+ gate, y = self.gate_proj(x), self.up_proj(x)
61
+ if self.fuse_swiglu:
62
+ return self.swiglu_linear(gate, y, self.down_proj.weight, self.down_proj.bias)
63
+ else:
64
+ return self.down_proj(swiglu(gate, y))
65
+
66
+
67
+ class SwiGLULinear(nn.Module):
68
+
69
+ def forward(self, x, y, weight, bias):
70
+ return swiglu_linear(x, y, weight, bias)
71
+
72
+
73
+ class SwiGLULinearParallel(ParallelStyle):
74
+ def __init__(
75
+ self,
76
+ *,
77
+ input_layouts: Optional[Placement] = None,
78
+ output_layouts: Optional[Placement] = None,
79
+ use_local_output: bool = True,
80
+ ):
81
+ super().__init__()
82
+ self.input_layouts = (input_layouts or Shard(-1),)
83
+ self.output_layouts = (output_layouts or Replicate(),)
84
+ self.desired_input_layouts = (Shard(-1),)
85
+ self.use_local_output = use_local_output
86
+
87
+ @staticmethod
88
+ def _prepare_input_fn(
89
+ input_layouts, desired_input_layouts, mod, inputs, device_mesh
90
+ ):
91
+ x, y, weight, bias = inputs
92
+ if not isinstance(x, DTensor):
93
+ x = DTensor.from_local(x, device_mesh, input_layouts, run_check=False)
94
+ if x.placements != desired_input_layouts:
95
+ x = x.redistribute(placements=desired_input_layouts, async_op=True)
96
+
97
+ if not isinstance(y, DTensor):
98
+ y = DTensor.from_local(y, device_mesh, input_layouts, run_check=False)
99
+ if y.placements != desired_input_layouts:
100
+ y = y.redistribute(placements=desired_input_layouts, async_op=True)
101
+
102
+ if not isinstance(weight, DTensor):
103
+ weight = DTensor.from_local(weight, device_mesh, (Shard(1),))
104
+
105
+ if bias is not None and not isinstance(bias, DTensor):
106
+ bias = DTensor.from_local(bias, device_mesh, (Replicate(),))
107
+
108
+ return x, y, weight, bias
109
+
110
+ @staticmethod
111
+ def _prepare_output_fn(output_layouts, use_local_output, mod, outputs, device_mesh):
112
+ # Rowwise sharding produces partial output, depending on output layouts:
113
+ # 1. to replicate -> allreduce
114
+ # 2. to shard -> reduce_scatter
115
+ if outputs.placements != output_layouts:
116
+ outputs = outputs.redistribute(placements=output_layouts, async_op=True)
117
+ # back to local tensor if use_local_output is True
118
+ return outputs.to_local() if use_local_output else outputs
119
+
120
+ def _apply(self, module: nn.Module, device_mesh: DeviceMesh) -> nn.Module:
121
+ return distribute_module(
122
+ module,
123
+ device_mesh,
124
+ partition_fn=None,
125
+ input_fn=partial(self._prepare_input_fn, self.input_layouts, self.desired_input_layouts),
126
+ output_fn=partial(self._prepare_output_fn, self.output_layouts, self.use_local_output)
127
+ )
fla/modules/parallel.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright (c) 2023-2025, Songlin Yang, Yu Zhang
3
+
4
+ from typing import Optional
5
+
6
+ import torch.nn as nn
7
+ from torch.distributed import DeviceMesh
8
+ from torch.distributed.tensor import DTensor, distribute_module
9
+ from torch.distributed.tensor.parallel import ParallelStyle
10
+ from torch.distributed.tensor.placement_types import Placement
11
+
12
+
13
+ class PrepareModuleWeight(ParallelStyle):
14
+ def __init__(self, *, layouts: Optional[Placement] = None):
15
+ super().__init__()
16
+ self.layouts = layouts
17
+
18
+ def _replicate_module_fn(
19
+ self,
20
+ name: str,
21
+ module: nn.Module,
22
+ device_mesh: DeviceMesh
23
+ ):
24
+ for p_name, param in module.named_parameters():
25
+ replicated_param = nn.Parameter(
26
+ DTensor.from_local(param, device_mesh, [self.layouts], run_check=False)
27
+ )
28
+ module.register_parameter(p_name, replicated_param)
29
+
30
+ def _apply(self, module: nn.Module, device_mesh: DeviceMesh) -> nn.Module:
31
+ return distribute_module(
32
+ module,
33
+ device_mesh,
34
+ partition_fn=self._replicate_module_fn,
35
+ input_fn=None,
36
+ output_fn=None
37
+ )
fla/modules/rotary.py ADDED
@@ -0,0 +1,512 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+
3
+ # Copyright (c) 2023, Tri Dao.
4
+ # https://github.com/Dao-AILab/flash-attention/blob/main/flash_attn/ops/triton/rotary.py
5
+
6
+ from typing import Optional, Tuple, Union
7
+
8
+ import torch
9
+ import torch.nn as nn
10
+ import triton
11
+ import triton.language as tl
12
+ from einops import rearrange, repeat
13
+
14
+ from fla.utils import get_multiprocessor_count, input_guard
15
+
16
+
17
+ def rotate_half(x, interleaved=False):
18
+ if not interleaved:
19
+ x1, x2 = x.chunk(2, dim=-1)
20
+ return torch.cat((-x2, x1), dim=-1)
21
+ else:
22
+ x1, x2 = x[..., ::2], x[..., 1::2]
23
+ return rearrange(torch.stack((-x2, x1), dim=-1), '... d two -> ... (d two)', two=2)
24
+
25
+
26
+ def rotary_embedding_ref(x, cos, sin, interleaved=False):
27
+ ro_dim = cos.shape[-1] * 2
28
+ assert ro_dim <= x.shape[-1]
29
+ cos = repeat(cos, '... d -> ... 1 (2 d)' if not interleaved else '... d -> ... 1 (d 2)')
30
+ sin = repeat(sin, '... d -> ... 1 (2 d)' if not interleaved else '... d -> ... 1 (d 2)')
31
+ return torch.cat([x[..., :ro_dim] * cos + rotate_half(x[..., :ro_dim], interleaved) * sin, x[..., ro_dim:]], -1)
32
+
33
+
34
+ @triton.autotune(
35
+ configs=[
36
+ triton.Config({}, num_warps=num_warps, num_stages=num_stages)
37
+ for num_warps in [2, 4, 8, 16, 32]
38
+ for num_stages in [2, 3, 4]
39
+ ],
40
+ key=['B', 'H', 'D', 'INTERLEAVED'],
41
+ )
42
+ @triton.jit
43
+ def rotary_embedding_kernel(
44
+ x,
45
+ cos,
46
+ sin,
47
+ y,
48
+ cu_seqlens,
49
+ seq_offsets, # this could be int or a pointer
50
+ # Matrix dimensions
51
+ B: tl.constexpr,
52
+ T: tl.constexpr,
53
+ H: tl.constexpr,
54
+ D: tl.constexpr,
55
+ R: tl.constexpr,
56
+ TR: tl.constexpr,
57
+ BT: tl.constexpr,
58
+ BD: tl.constexpr,
59
+ IS_SEQLEN_OFFSETS_TENSOR: tl.constexpr,
60
+ IS_VARLEN: tl.constexpr,
61
+ INTERLEAVED: tl.constexpr,
62
+ CONJUGATE: tl.constexpr
63
+ ):
64
+ i_t, i_b, i_h = tl.program_id(0), tl.program_id(1), tl.program_id(2)
65
+
66
+ if not IS_VARLEN:
67
+ x = x + i_b * T*H*D + i_h * D
68
+ y = y + i_b * T*H*D + i_h * D
69
+ else:
70
+ bos, eos = tl.load(cu_seqlens + i_b), tl.load(cu_seqlens + i_b + 1)
71
+ T = eos - bos
72
+ x = x + bos * H*D + i_h * D
73
+ y = y + bos * H*D + i_h * D
74
+
75
+ if i_t * BT >= T:
76
+ return
77
+
78
+ o_t = i_t * BT + tl.arange(0, BT)
79
+ if not IS_SEQLEN_OFFSETS_TENSOR:
80
+ o_cs = o_t + seq_offsets
81
+ else:
82
+ o_cs = o_t + tl.load(seq_offsets + i_b)
83
+
84
+ if not INTERLEAVED:
85
+ # Load the 1st and 2nd halves of x, do calculation, then store to 1st and 2nd halves of out
86
+ o_r = tl.arange(0, BD // 2)
87
+ p_x = x + o_t[:, None] * H*D + o_r[None, :]
88
+ p_cos = cos + (o_cs[:, None] * R + o_r[None, :])
89
+ p_sin = sin + (o_cs[:, None] * R + o_r[None, :])
90
+ mask = (o_t[:, None] >= 0) & (o_t[:, None] < T) & (o_r[None, :] < R)
91
+
92
+ b_cos = tl.load(p_cos, mask=mask, other=1.0).to(tl.float32)
93
+ b_sin = tl.load(p_sin, mask=mask, other=0.0).to(tl.float32)
94
+ b_x0 = tl.load(p_x, mask=mask, other=0.0).to(tl.float32)
95
+ b_x1 = tl.load(p_x + R, mask=mask, other=0.0).to(tl.float32)
96
+ if CONJUGATE:
97
+ b_sin = -b_sin
98
+ b_o0 = b_x0 * b_cos - b_x1 * b_sin
99
+ b_o1 = b_x0 * b_sin + b_x1 * b_cos
100
+ # write back result
101
+ p_y = y + (o_t[:, None] * H*D + o_r[None, :])
102
+ tl.store(p_y, b_o0, mask=mask)
103
+ tl.store(p_y + R, b_o1, mask=mask)
104
+ else:
105
+ # We don't want to load x[0, 2, 4, ...] and x[1, 3, 5, ...] separately since both are slow.
106
+ # Instead, we load x0 = x[0, 1, 2, 3, ...] and x1 = x[1, 0, 3, 2, ...].
107
+ # Loading x0 will be fast but x1 will be slow.
108
+ # Then we load cos = cos[0, 0, 1, 1, ...] and sin = sin[0, 0, 1, 1, ...].
109
+ # Then we do the calculation and use tl.where to pick put the right outputs for the even
110
+ # and for the odd indices.
111
+ o_d = tl.arange(0, BD)
112
+ o_d_swap = o_d + ((o_d + 1) % 2) * 2 - 1 # 1, 0, 3, 2, 5, 4, ...
113
+ o_d_repeat = tl.arange(0, BD) // 2
114
+ p_x0 = x + o_t[:, None] * H*D + o_d[None, :]
115
+ p_x1 = x + o_t[:, None] * H*D + o_d_swap[None, :]
116
+ p_cos = cos + (o_cs[:, None] * R + o_d_repeat[None, :])
117
+ p_sin = sin + (o_cs[:, None] * R + o_d_repeat[None, :])
118
+ mask = (o_cs[:, None] >= 0) & (o_cs[:, None] < TR) & (o_d_repeat[None, :] < R)
119
+
120
+ b_cos = tl.load(p_cos, mask=mask, other=1.0).to(tl.float32)
121
+ b_sin = tl.load(p_sin, mask=mask, other=0.0).to(tl.float32)
122
+ b_x0 = tl.load(p_x0, mask=mask, other=0.0).to(tl.float32)
123
+ b_x1 = tl.load(p_x1, mask=mask, other=0.0).to(tl.float32)
124
+ if CONJUGATE:
125
+ b_sin = -b_sin
126
+ b_o0 = b_x0 * b_cos
127
+ b_o1 = b_x1 * b_sin
128
+ b_y = tl.where(o_d[None, :] % 2 == 0, b_o0 - b_o1, b_o0 + b_o1)
129
+ p_y = y + (o_t[:, None] * H*D + o_d[None, :])
130
+ tl.store(p_y, b_y, mask=mask)
131
+
132
+
133
+ def rotary_embedding_fwdbwd(
134
+ x: torch.Tensor,
135
+ cos: torch.Tensor,
136
+ sin: torch.Tensor,
137
+ seqlen_offsets: Union[int, torch.Tensor] = 0,
138
+ cu_seqlens: Optional[torch.Tensor] = None,
139
+ max_seqlen: Optional[int] = None,
140
+ interleaved: bool = False,
141
+ inplace: bool = False,
142
+ conjugate: bool = False
143
+ ) -> torch.Tensor:
144
+ """
145
+ Args:
146
+ x: [B, T, H, D].
147
+ cos: [TR, R / 2]
148
+ sin: [TR, R / 2]
149
+ seqlen_offsets: integer or integer tensor of size (N,)
150
+ cu_seqlens: (N + 1,) or None
151
+ max_seqlen: int
152
+
153
+ Returns:
154
+ y: [B, T, H, D]
155
+ """
156
+ is_varlen = cu_seqlens is not None
157
+
158
+ B, T, H, D = x.shape
159
+ if not is_varlen:
160
+ N = B
161
+ else:
162
+ assert max_seqlen is not None, "If cu_seqlens is passed in, then max_seqlen must be passed"
163
+ N, T = cu_seqlens.shape[0] - 1, max_seqlen
164
+ TR, R = cos.shape
165
+ assert sin.shape == cos.shape
166
+ R2 = R * 2
167
+
168
+ assert D <= 256, "Only support D <= 256"
169
+ assert TR >= T, "TR must be >= T"
170
+
171
+ assert cos.dtype == sin.dtype, f"cos and sin must have the same dtype, got {cos.dtype} and {sin.dtype}"
172
+ assert x.dtype == cos.dtype, f"Input and cos/sin must have the same dtype, got {x.dtype} and {cos.dtype}"
173
+
174
+ if isinstance(seqlen_offsets, torch.Tensor):
175
+ assert seqlen_offsets.shape == (N,)
176
+ assert seqlen_offsets.dtype in [torch.int32, torch.int64]
177
+ else:
178
+ assert seqlen_offsets + T <= TR
179
+
180
+ y = torch.empty_like(x) if not inplace else x
181
+ if R2 < D and not inplace:
182
+ y[..., R2:].copy_(x[..., R2:])
183
+
184
+ BD = triton.next_power_of_2(R2)
185
+ BT = min(128, triton.next_power_of_2(triton.cdiv(T, get_multiprocessor_count(x.device.index))))
186
+
187
+ def grid(meta): return (triton.cdiv(T, meta['BT']), N, H) # noqa
188
+ rotary_embedding_kernel[grid](
189
+ x,
190
+ cos,
191
+ sin,
192
+ y,
193
+ cu_seqlens,
194
+ seqlen_offsets,
195
+ B=B,
196
+ T=T,
197
+ H=H,
198
+ D=D,
199
+ R=R,
200
+ TR=TR,
201
+ BT=BT,
202
+ BD=BD,
203
+ IS_SEQLEN_OFFSETS_TENSOR=isinstance(seqlen_offsets, torch.Tensor),
204
+ IS_VARLEN=is_varlen,
205
+ INTERLEAVED=interleaved,
206
+ CONJUGATE=conjugate
207
+ )
208
+ return y
209
+
210
+
211
+ class RotaryEmbeddingFunction(torch.autograd.Function):
212
+
213
+ @staticmethod
214
+ @input_guard
215
+ def forward(
216
+ ctx,
217
+ x,
218
+ cos,
219
+ sin,
220
+ interleaved=False,
221
+ inplace=False,
222
+ seqlen_offsets: Union[int, torch.Tensor] = 0,
223
+ cu_seqlens: Optional[torch.Tensor] = None,
224
+ max_seqlen: Optional[int] = None,
225
+ ):
226
+ y = rotary_embedding_fwdbwd(
227
+ x,
228
+ cos,
229
+ sin,
230
+ seqlen_offsets=seqlen_offsets,
231
+ cu_seqlens=cu_seqlens,
232
+ max_seqlen=max_seqlen,
233
+ interleaved=interleaved,
234
+ inplace=inplace,
235
+ )
236
+ if isinstance(seqlen_offsets, int):
237
+ # Can't save int with save_for_backward
238
+ ctx.save_for_backward(cos, sin, cu_seqlens)
239
+ ctx.seqlen_offsets = seqlen_offsets
240
+ else:
241
+ ctx.save_for_backward(cos, sin, cu_seqlens, seqlen_offsets)
242
+ ctx.seqlen_offsets = None
243
+ ctx.interleaved = interleaved
244
+ ctx.inplace = inplace
245
+ ctx.max_seqlen = max_seqlen
246
+ return y if not inplace else x
247
+
248
+ @staticmethod
249
+ @input_guard
250
+ def backward(ctx, do):
251
+ seqlen_offsets = ctx.seqlen_offsets
252
+ if seqlen_offsets is None:
253
+ cos, sin, cu_seqlens, seqlen_offsets = ctx.saved_tensors
254
+ else:
255
+ cos, sin, cu_seqlens = ctx.saved_tensors
256
+ # TD [2023-09-02]: For some reason Triton (2.0.0.post1) errors with
257
+ # "[CUDA]: invalid device context", and cloning makes it work. Idk why. Triton 2.1.0 works.
258
+ if not ctx.interleaved and not ctx.inplace:
259
+ do = do.clone()
260
+ dx = rotary_embedding_fwdbwd(
261
+ do,
262
+ cos,
263
+ sin,
264
+ seqlen_offsets=seqlen_offsets,
265
+ cu_seqlens=cu_seqlens,
266
+ max_seqlen=ctx.max_seqlen,
267
+ interleaved=ctx.interleaved,
268
+ inplace=ctx.inplace,
269
+ conjugate=True,
270
+ )
271
+ return dx, None, None, None, None, None, None, None
272
+
273
+
274
+ def rotary_embedding(
275
+ x,
276
+ cos,
277
+ sin,
278
+ interleaved=False,
279
+ inplace=False,
280
+ seqlen_offsets: Union[int, torch.Tensor] = 0,
281
+ cu_seqlens: Optional[torch.Tensor] = None,
282
+ max_seqlen: Optional[int] = None,
283
+ ):
284
+ """
285
+ Args:
286
+ x: [B, T, H, D]
287
+ cos, sin: [TR, R//2]
288
+ interleaved:
289
+ If True, rotate pairs of even and odd dimensions (GPT-J style) instead of 1st half and 2nd half (GPT-NeoX style).
290
+ inplace:
291
+ If True, apply rotary embedding in-place.
292
+ seqlen_offsets: [N,] or int.
293
+ Each sequence in x is shifted by this amount.
294
+ Most commonly used in inference when we have KV cache.
295
+ cu_seqlens: [N + 1,] or None
296
+ max_seqlen: int
297
+
298
+ Returns:
299
+ out: [B, T, H, D]
300
+ """
301
+ return RotaryEmbeddingFunction.apply(
302
+ x,
303
+ cos,
304
+ sin,
305
+ interleaved,
306
+ inplace,
307
+ seqlen_offsets,
308
+ cu_seqlens,
309
+ max_seqlen
310
+ )
311
+
312
+
313
+ class RotaryEmbedding(nn.Module):
314
+ """
315
+ The rotary position embeddings from RoFormer_ (Su et. al).
316
+ A crucial insight from the method is that the query and keys are
317
+ transformed by rotation matrices which depend on the relative positions.
318
+
319
+ Other implementations are available in the Rotary Transformer repo_ and in
320
+ GPT-NeoX_, GPT-NeoX was an inspiration
321
+
322
+ .. _RoFormer: https://arxiv.org/abs/2104.09864
323
+ .. _repo: https://github.com/ZhuiyiTechnology/roformer
324
+ .. _GPT-NeoX: https://github.com/EleutherAI/gpt-neox
325
+
326
+ If scale_base is not None, this implements XPos (Sun et al., https://arxiv.org/abs/2212.10554).
327
+ A recommended value for scale_base is 512: https://github.com/HazyResearch/flash-attention/issues/96
328
+ Reference: https://github.com/sunyt32/torchscale/blob/main/torchscale/component/xpos_relative_position.py
329
+ """
330
+
331
+ def __init__(
332
+ self,
333
+ dim: int,
334
+ base: float = 10000.0,
335
+ scale_base: Optional[float] = None,
336
+ interleaved: bool = False,
337
+ pos_idx_in_fp32: bool = True,
338
+ device: Optional[torch.device] = None,
339
+ ):
340
+ """
341
+ interleaved:
342
+ If True, rotate pairs of even and odd dimensions (GPT-J style) instead of 1st half and 2nd half (GPT-NeoX style).
343
+ pos_idx_in_fp32:
344
+ If True, the position indices [0.0, ..., seqlen - 1] are in fp32, otherwise they might be in lower precision.
345
+ This option was added because previously (before 2023-07-02), when we construct
346
+ the position indices, we use the dtype of self.inv_freq.
347
+ In most cases this would be fp32, but if the model is trained in pure bf16 (not mixed precision), then
348
+ self.inv_freq would be bf16, and the position indices are also in bf16.
349
+ Because of the limited precision of bf16 (e.g. 1995.0 is rounded to 2000.0), the
350
+ embeddings for some positions will coincide.
351
+ To maintain compatibility with models previously trained in pure bf16, we add this option.
352
+ """
353
+ super().__init__()
354
+
355
+ self.dim = dim
356
+ self.base = float(base)
357
+ self.scale_base = scale_base
358
+ self.interleaved = interleaved
359
+ self.pos_idx_in_fp32 = pos_idx_in_fp32
360
+ self.device = device
361
+
362
+ # Generate and save the inverse frequency buffer (non trainable)
363
+ self.register_buffer("inv_freq", torch.empty(-(dim // -2), dtype=torch.float32, device=device), persistent=False)
364
+
365
+ scale = None
366
+ if scale_base is not None:
367
+ scale = torch.empty(-(dim // -2), dtype=torch.float32, device=device)
368
+ self.register_buffer("scale", scale, persistent=False)
369
+
370
+ self._seq_len_cached = 0
371
+ self._cos_cached = None
372
+ self._sin_cached = None
373
+ self._cos_k_cached = None
374
+ self._sin_k_cached = None
375
+
376
+ self.reset_parameters()
377
+
378
+ def reset_parameters(self):
379
+ with torch.no_grad():
380
+ self.inv_freq.copy_(self._compute_inv_freq(device=self.inv_freq.device))
381
+ if self.scale_base is not None:
382
+ self.scale.copy_(self._compute_scale(device=self.scale.device))
383
+
384
+ def __repr__(self):
385
+ s = f"{self.__class__.__name__}("
386
+ s += f"dim={self.dim}, "
387
+ s += f"base={self.base}, "
388
+ s += f"interleaved={self.interleaved}, "
389
+ if self.scale_base is not None:
390
+ s += f"scale_base={self.scale_base}, "
391
+ s += f"pos_idx_in_fp32={self.pos_idx_in_fp32})"
392
+ return s
393
+
394
+ def _compute_inv_freq(self, device=None):
395
+ return 1.0 / (
396
+ self.base
397
+ ** (torch.arange(0, self.dim, 2, device=device, dtype=torch.float32) / self.dim)
398
+ )
399
+
400
+ def _compute_scale(self, device=None):
401
+ return (torch.arange(0, self.dim, 2, device=device, dtype=torch.float32) + 0.4 * self.dim) / (1.4 * self.dim)
402
+
403
+ def _update_cos_sin_cache(self, seqlen, device=None, dtype=None):
404
+ # Reset the tables if the sequence length has changed,
405
+ # if we're on a new device (possibly due to tracing for instance),
406
+ # or if we're switching from inference mode to training
407
+ if (
408
+ seqlen > self._seq_len_cached
409
+ or self._cos_cached is None
410
+ or self._cos_cached.device != device
411
+ or self._cos_cached.dtype != dtype
412
+ or (self.training and self._cos_cached.is_inference())
413
+ ):
414
+ self._seq_len_cached = seqlen
415
+ # We want fp32 here, not self.inv_freq.dtype, since the model could be loaded in bf16
416
+ # And the output of arange can be quite large, so bf16 would lose a lot of precision.
417
+ # However, for compatibility reason, we add an option to use the dtype of self.inv_freq.
418
+ if self.pos_idx_in_fp32:
419
+ t = torch.arange(seqlen, device=device, dtype=torch.float32)
420
+ # We want fp32 here as well since inv_freq will be multiplied with t, and the output
421
+ # will be large. Having it in bf16 will lose a lot of precision and cause the
422
+ # cos & sin output to change significantly.
423
+ # We want to recompute self.inv_freq if it was not loaded in fp32
424
+ if self.inv_freq.dtype != torch.float32:
425
+ inv_freq = self._compute_inv_freq(device=device)
426
+ else:
427
+ inv_freq = self.inv_freq
428
+ else:
429
+ t = torch.arange(seqlen, device=device, dtype=self.inv_freq.dtype)
430
+ inv_freq = self.inv_freq
431
+ # Don't do einsum, it converts fp32 to fp16 under AMP
432
+ # freqs = torch.einsum("i,j->ij", t, self.inv_freq)
433
+ freqs = torch.outer(t, inv_freq)
434
+ if self.scale is None:
435
+ self._cos_cached = torch.cos(freqs).to(dtype)
436
+ self._sin_cached = torch.sin(freqs).to(dtype)
437
+ else:
438
+ power = (
439
+ torch.arange(seqlen, dtype=self.scale.dtype, device=self.scale.device)
440
+ - seqlen // 2
441
+ ) / self.scale_base
442
+ scale = self.scale.to(device=power.device) ** rearrange(power, "s -> s 1")
443
+ # We want the multiplication by scale to happen in fp32
444
+ self._cos_cached = (torch.cos(freqs) * scale).to(dtype)
445
+ self._sin_cached = (torch.sin(freqs) * scale).to(dtype)
446
+ self._cos_k_cached = (torch.cos(freqs) / scale).to(dtype)
447
+ self._sin_k_cached = (torch.sin(freqs) / scale).to(dtype)
448
+
449
+ def forward(
450
+ self,
451
+ q: torch.Tensor,
452
+ k: torch.Tensor,
453
+ seqlen_offset: Union[int, torch.Tensor] = 0,
454
+ cu_seqlens: Optional[torch.Tensor] = None,
455
+ max_seqlen: Optional[int] = None,
456
+ ) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:
457
+ """
458
+ q: [B, T, H, D]
459
+ k: [B, T, H, D]
460
+ seqlen_offset:
461
+ (N,) or int. Each sequence in x is shifted by this amount.
462
+ Most commonly used in inference when we have KV cache.
463
+ If it's a tensor of shape (N,), then to update the cos / sin cache, one
464
+ should pass in max_seqlen, which will update the cos / sin cache up to that length.
465
+ cu_seqlens: (N + 1,) or None
466
+ max_seqlen: int
467
+ """
468
+ if max_seqlen is not None:
469
+ self._update_cos_sin_cache(max_seqlen, device=q.device, dtype=q.dtype)
470
+ elif isinstance(seqlen_offset, int):
471
+ self._update_cos_sin_cache(q.shape[1] + seqlen_offset, device=q.device, dtype=q.dtype)
472
+ if self.scale is None:
473
+ q = rotary_embedding(
474
+ q,
475
+ self._cos_cached,
476
+ self._sin_cached,
477
+ interleaved=self.interleaved,
478
+ seqlen_offsets=seqlen_offset,
479
+ cu_seqlens=cu_seqlens,
480
+ max_seqlen=max_seqlen
481
+ )
482
+ k = rotary_embedding(
483
+ k,
484
+ self._cos_cached,
485
+ self._sin_cached,
486
+ interleaved=self.interleaved,
487
+ seqlen_offsets=seqlen_offset,
488
+ cu_seqlens=cu_seqlens,
489
+ max_seqlen=max_seqlen
490
+ )
491
+
492
+ else:
493
+ q = rotary_embedding(
494
+ q,
495
+ self._cos_cached,
496
+ self._sin_cached,
497
+ interleaved=self.interleaved,
498
+ seqlen_offsets=seqlen_offset,
499
+ cu_seqlens=cu_seqlens,
500
+ max_seqlen=max_seqlen
501
+ )
502
+ k = rotary_embedding(
503
+ k,
504
+ self._cos_k_cached,
505
+ self._sin_k_cached,
506
+ interleaved=self.interleaved,
507
+ seqlen_offsets=seqlen_offset,
508
+ cu_seqlens=cu_seqlens,
509
+ max_seqlen=max_seqlen
510
+ )
511
+
512
+ return q, k
torchtitan/__init__.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+ #
7
+ # Copyright (c) Meta Platforms, Inc. All Rights Reserved.
8
+
9
+ # Import to register Float8Converter.
10
+ import torchtitan.components.float8 # noqa: F401
11
+
12
+ # Import the built-in models here so that the corresponding register_model_spec()
13
+ # will be called.
14
+ import torchtitan.experiments # noqa: F401
15
+ import torchtitan.models # noqa: F401
torchtitan/config_manager.py ADDED
@@ -0,0 +1,947 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import argparse
8
+ import importlib
9
+ import inspect
10
+ import os
11
+ import sys
12
+ from collections import defaultdict
13
+ from typing import Tuple, Union
14
+
15
+ import torch
16
+
17
+ try:
18
+ import tomllib
19
+ except ModuleNotFoundError:
20
+ import tomli as tomllib
21
+
22
+ from torchtitan.tools.logging import logger
23
+
24
+ TORCH_DTYPE_MAP = {
25
+ "float16": torch.float16,
26
+ "float32": torch.float32,
27
+ "bfloat16": torch.bfloat16,
28
+ }
29
+
30
+
31
+ def string_list(raw_arg):
32
+ """Comma-separated string list argument."""
33
+ return [s.strip() for s in raw_arg.split(",") if s.strip()]
34
+
35
+
36
+ def check_string_list_argument(args_dict: dict[str, any], fullargname: str):
37
+ section, name = fullargname.split(".")
38
+ # Split string list which are still raw strings.
39
+ if (
40
+ section in args_dict
41
+ and name in args_dict[section]
42
+ and isinstance(args_dict[section][name], str)
43
+ ):
44
+ sec = args_dict[section]
45
+ sec[name] = string_list(sec[name])
46
+
47
+
48
+ class JobConfig:
49
+ """
50
+ A helper class to manage the train configuration.
51
+ Semantics:
52
+ - Default config is loaded from a toml file. If no toml file is provided,
53
+ then the default config is loaded from argparse defaults.
54
+ - if toml file has missing keys, they are filled with argparse defaults.
55
+ - if additional explicit cmd args are provided in addition to the toml
56
+ file, they will override the toml config and the argparse defaults
57
+
58
+ precedence order: cmdline > toml > argparse default
59
+
60
+ Arg parsing semantics:
61
+
62
+ Each argument starts with <prefix>_ which is the section name in the toml file
63
+ followed by name of the option in the toml file. For ex,
64
+ model.name translates to:
65
+ [model]
66
+ name
67
+ in the toml file
68
+ """
69
+
70
+ def __init__(self):
71
+ self.args_dict = None
72
+ # main parser
73
+ self.parser = argparse.ArgumentParser(description="torchtitan arg parser.")
74
+
75
+ self.parser.add_argument(
76
+ "--job.config_file",
77
+ type=str,
78
+ default=None,
79
+ help="Job config file",
80
+ )
81
+
82
+ # job level configs
83
+ self.parser.add_argument(
84
+ "--job.dump_folder",
85
+ type=str,
86
+ default="./torchtitan/outputs",
87
+ help="Folder to dump job outputs",
88
+ )
89
+ self.parser.add_argument(
90
+ "--job.description",
91
+ type=str,
92
+ default="default job",
93
+ help="Description of the job",
94
+ )
95
+ self.parser.add_argument(
96
+ "--job.use_for_integration_test",
97
+ action="store_true",
98
+ help="Add this config to the integration test suite",
99
+ )
100
+ self.parser.add_argument(
101
+ "--job.print_args",
102
+ action="store_true",
103
+ help="Print the args to terminal",
104
+ )
105
+
106
+ # profiling configs
107
+ self.parser.add_argument(
108
+ "--profiling.enable_profiling",
109
+ action="store_true",
110
+ help="Whether to enable pytorch profiler",
111
+ )
112
+ self.parser.add_argument(
113
+ "--profiling.save_traces_folder",
114
+ type=str,
115
+ default="profile_traces",
116
+ help="Trace files location",
117
+ )
118
+ self.parser.add_argument(
119
+ "--profiling.profile_freq",
120
+ type=int,
121
+ default=10,
122
+ help="How often to collect profiler traces, in iterations",
123
+ )
124
+ self.parser.add_argument(
125
+ "--profiling.enable_memory_snapshot",
126
+ action="store_true",
127
+ help="Whether to dump memory snapshot",
128
+ )
129
+ self.parser.add_argument(
130
+ "--profiling.save_memory_snapshot_folder",
131
+ type=str,
132
+ default="memory_snapshot",
133
+ help="Memeory snapshot files location",
134
+ )
135
+
136
+ # metrics configs
137
+ self.parser.add_argument(
138
+ "--metrics.log_freq",
139
+ type=int,
140
+ default=10,
141
+ help="How often to log metrics to TensorBoard, in iterations",
142
+ )
143
+ self.parser.add_argument(
144
+ "--metrics.enable_tensorboard",
145
+ action="store_true",
146
+ help="Whether to log metrics to TensorBoard",
147
+ )
148
+ self.parser.add_argument(
149
+ "--metrics.disable_color_printing",
150
+ action="store_true",
151
+ help="Whether to disable color printing in logs",
152
+ )
153
+ self.parser.add_argument(
154
+ "--metrics.save_tb_folder",
155
+ type=str,
156
+ default="tb",
157
+ help="Folder to dump TensorBoard states",
158
+ )
159
+ self.parser.add_argument(
160
+ "--metrics.save_for_all_ranks",
161
+ action="store_true",
162
+ default=False,
163
+ help="""
164
+ Whether to save TensorBoard/Wandb metrics only for rank 0 or for all ranks.
165
+ When this option is False and pipeline_parallel_degree is > 1, the metrics
166
+ component uses the 0th rank of the last stage pipeline group, which is the
167
+ only stage that computes loss metrics.
168
+ """,
169
+ )
170
+ self.parser.add_argument(
171
+ "--metrics.enable_wandb",
172
+ action="store_true",
173
+ help="Whether to log metrics to Weights & Biases",
174
+ )
175
+
176
+ # model configs
177
+ self.parser.add_argument(
178
+ "--model.name",
179
+ type=str,
180
+ default="llama3",
181
+ help="Which model to train",
182
+ )
183
+ self.parser.add_argument(
184
+ "--model.flavor",
185
+ type=str,
186
+ default="debugmodel",
187
+ help="Which model config to train",
188
+ )
189
+ self.parser.add_argument(
190
+ "--model.norm_type",
191
+ type=str,
192
+ default="rmsnorm",
193
+ choices=["layernorm", "np_layernorm", "rmsnorm"],
194
+ help="Type of layer normalization to use [layernorm, np_layernorm, rmsnorm]",
195
+ )
196
+ self.parser.add_argument(
197
+ "--model.use_flex_attn",
198
+ action="store_true",
199
+ help="""
200
+ Whether to use Flex Attention.
201
+ Mixed usage of SDPA and FlexAttention is not upported yet.
202
+ """,
203
+ )
204
+ self.parser.add_argument(
205
+ "--model.attn_mask_type",
206
+ type=str,
207
+ default="causal",
208
+ choices=["causal", "block_causal"],
209
+ help="""
210
+ Specifies the type of bias/mask used for attention. If SDPA is used,
211
+ only the causal mask is supported by default. If FlexAttention is used,
212
+ both causal and block_causal masks are supported.
213
+ """,
214
+ )
215
+ self.parser.add_argument(
216
+ "--model.tokenizer_path",
217
+ type=str,
218
+ default="./assets/tokenizer/original/tokenizer.model",
219
+ help="Tokenizer path",
220
+ )
221
+ self.parser.add_argument(
222
+ "--model.converters",
223
+ type=string_list,
224
+ nargs="+",
225
+ default=[],
226
+ help="""
227
+ Comma separated list of converters to apply to the model.
228
+
229
+ For instance, the `float8` converter swaps `torch.nn.Linear`
230
+ with `Float8Linear`. This feature requires you to install 'torchao'
231
+ which can be found here: https://github.com/pytorch/ao
232
+ """,
233
+ )
234
+ self.parser.add_argument(
235
+ "--model.print_after_conversion",
236
+ action="store_true",
237
+ help="""
238
+ If true, model definition will be printed to stdout after all model
239
+ converters have been applied.
240
+ """,
241
+ )
242
+
243
+ # optimizer configs
244
+ self.parser.add_argument(
245
+ "--optimizer.name", type=str, default="AdamW", help="Optimizer to use"
246
+ )
247
+ self.parser.add_argument(
248
+ "--optimizer.lr", type=float, default=8e-4, help="Learning rate to use"
249
+ )
250
+ self.parser.add_argument(
251
+ "--optimizer.eps", type=float, default=1e-8, help="Epsilon value to use"
252
+ )
253
+ self.parser.add_argument(
254
+ "--optimizer.implementation",
255
+ type=str,
256
+ default="fused",
257
+ choices=["for-loop", "foreach", "fused"],
258
+ help="""
259
+ Specify which optimizer implementation to use:
260
+ - 'fused': Use fused implementation (CUDA only) for best performance.
261
+ - 'foreach': Use some horizontal fusion of tensors for better performance.
262
+ - 'for-loop': Use the default implementation for the optimizer (slowest).
263
+ - more info: https://pytorch.org/docs/stable/optim.html
264
+ """,
265
+ )
266
+ self.parser.add_argument(
267
+ "--optimizer.early_step_in_backward",
268
+ action="store_true",
269
+ help="""
270
+ Whether to apply optimizer in the backward. Caution, optimizer_in_backward
271
+ is not compatible with gradients clipping, users should not call
272
+ register_post_accumulate_grad_hook after the optimizer is built.""",
273
+ )
274
+
275
+ # lr scheduler configs
276
+ self.parser.add_argument(
277
+ "--lr_scheduler.warmup_steps",
278
+ type=int,
279
+ default=200,
280
+ help="Steps for lr scheduler warmup, normally 1/5 of --training.steps",
281
+ )
282
+ self.parser.add_argument(
283
+ "--lr_scheduler.decay_ratio",
284
+ type=float,
285
+ default=None,
286
+ help="""
287
+ Controls the proportion of the training steps allocated to the learning rate decay phase.
288
+
289
+ If `None`, the learning rate will begin decaying immediately after the warmup period.
290
+ Otherwise, the learning rate will remain stable after the warmup period and
291
+ only start decaying during the last `decay_ratio` portion of the total training steps.
292
+
293
+ This is known as the Warmup-Stable-Decay (WSD) schedule, as described in https://arxiv.org/abs/2404.06395.
294
+ """,
295
+ )
296
+ self.parser.add_argument(
297
+ "--lr_scheduler.decay_type",
298
+ type=str,
299
+ default="linear",
300
+ choices=["linear", "sqrt", "cosine"],
301
+ help="""
302
+ Learning rate decay type to use during training:
303
+ - 'linear': linearly decays learning rate from initial to final value
304
+ - 'sqrt': decays learning rate following a 1 minus square root curve
305
+ - 'cosine': smoothly decays learning rate following a cosine curve
306
+ """,
307
+ )
308
+ self.parser.add_argument(
309
+ "--lr_scheduler.lr_min",
310
+ type=float,
311
+ default=0.0,
312
+ help="""
313
+ Min lr ratio for lr scheduler.
314
+
315
+ If provided, the range of decay factor is scaled from 1 to `lr_min`
316
+ to ensure the learning rate does not drop below `optimizer.lr * lr_scheduler.lr_min`.
317
+ """,
318
+ )
319
+
320
+ # training configs
321
+ self.parser.add_argument(
322
+ "--training.dataset", type=str, default="c4_test", help="Dataset to use"
323
+ )
324
+ self.parser.add_argument(
325
+ "--training.dataset_path",
326
+ type=str,
327
+ help="""
328
+ Path to the dataset in the file system. If provided, data will be
329
+ loaded from this path instead of downloaded.""",
330
+ )
331
+ self.parser.add_argument(
332
+ "--training.batch_size", type=int, default=8, help="Batch size"
333
+ )
334
+ self.parser.add_argument(
335
+ "--training.seq_len", type=int, default=2048, help="Sequence length"
336
+ )
337
+ self.parser.add_argument(
338
+ "--training.max_norm",
339
+ type=Union[float, int],
340
+ default=1.0,
341
+ help="Max norm for gradient clipping",
342
+ )
343
+ self.parser.add_argument(
344
+ "--training.steps",
345
+ type=int,
346
+ default=10000,
347
+ help="How many train steps to run",
348
+ )
349
+ self.parser.add_argument(
350
+ "--training.enable_cpu_offload",
351
+ action="store_true",
352
+ help="""
353
+ Whether to apply CPU offloading of parameters, gradients, and optimizer states in FSDP""",
354
+ )
355
+ self.parser.add_argument(
356
+ "--training.mixed_precision_param",
357
+ type=str,
358
+ default="bfloat16",
359
+ choices=["bfloat16", "float32"],
360
+ help="""
361
+ torch dtype to use for parameters when applying mixed precision via FSDP.
362
+ This feature only takes effect when data_parallel_shard_degree > 1
363
+ """,
364
+ )
365
+ self.parser.add_argument(
366
+ "--training.mixed_precision_reduce",
367
+ type=str,
368
+ default="float32",
369
+ choices=["float32"],
370
+ help="""
371
+ torch dtype to use for reductions when applying mixed precision via FSDP.
372
+ This feature only takes effect when data_parallel_shard_degree > 1
373
+ """,
374
+ )
375
+ self.parser.add_argument(
376
+ "--training.compile",
377
+ action="store_true",
378
+ help="Whether to compile the model",
379
+ )
380
+ self.parser.add_argument(
381
+ "--training.gc_freq",
382
+ type=int,
383
+ default=50,
384
+ help="Python garbage control scheduling interval, in steps",
385
+ )
386
+ self.parser.add_argument(
387
+ "--training.seed",
388
+ type=int,
389
+ default=None,
390
+ help="Choose the base RNG seed used for training",
391
+ )
392
+ self.parser.add_argument(
393
+ "--training.deterministic",
394
+ action="store_true",
395
+ help="Use deterministic algorithms wherever possible, may be slower",
396
+ )
397
+
398
+ # parallelism configs
399
+ self.parser.add_argument(
400
+ "--parallelism.data_parallel_replicate_degree",
401
+ type=int,
402
+ default=1,
403
+ help="""
404
+ The `data_parallel_replicate_degree` argument specifies the degree of
405
+ data parallelism for weight replication. When this value is greater
406
+ than 1, weights will be replicated across `data_parallel_replicate_degree`
407
+ ranks. If `data_parallel_shard_degree` is also greater than 1, the parallelism
408
+ method used is HSDP (Hybrid Sharded Data Parallelism). Otherwise, the
409
+ parallelism method used is DDP (Distributed Data Parallelism).
410
+ 1 means disabled.""",
411
+ )
412
+ self.parser.add_argument(
413
+ "--parallelism.enable_compiled_autograd",
414
+ action="store_true",
415
+ help="Enable CompiledAutograd to compile the backward.",
416
+ )
417
+ self.parser.add_argument(
418
+ "--parallelism.data_parallel_shard_degree",
419
+ type=int,
420
+ default=-1,
421
+ help="""
422
+ The `data_parallel_shard_degree` argument specifies the degree of data
423
+ parallelism for weight sharding. When this value is greater than 1, weights
424
+ will be sharded across `data_parallel_shard_degree` ranks. If
425
+ `data_parallel_replicate_degree` is also greater than 1, the parallelism
426
+ method used is HSDP (Hybrid Sharded Data Parallelism). Otherwise, the
427
+ parallelism method used is FSDP (Fully Sharded Data Parallelism).
428
+
429
+ -1 means leftover ranks will be used (After DP_REPLICATE/SP/PP). Note that
430
+ only `data_parallel_shard_degree` can be negative. 1 means disabled.""",
431
+ )
432
+ self.parser.add_argument(
433
+ "--parallelism.fsdp_reshard_after_forward",
434
+ type=str,
435
+ default="default",
436
+ choices=["default", "always", "never"],
437
+ help="""
438
+ `reshard_after_forward` specifies the policy for applying `reshard_after_forward`
439
+ within an FSDP setup. `reshard_after_forward` controls parameter behavior after forward,
440
+ trading off memory and communication. See torch's `fully_shard` API for more documentation
441
+ on `reshard_after_forward`.
442
+ The supported policies include "default", "always" and "never":
443
+ - "default" applies default resharding behavior, implementing "smart defaults" for known optimal
444
+ scenarios.
445
+ - "always" will enable `reshard_after_forward` for all forward passes.
446
+ - "never" will disable `reshard_after_forward` for all forward passes.
447
+ """,
448
+ )
449
+ self.parser.add_argument(
450
+ "--parallelism.tensor_parallel_degree",
451
+ type=int,
452
+ default=1,
453
+ help="Tensor Parallelism degree. 1 means disabled.",
454
+ )
455
+ self.parser.add_argument(
456
+ "--parallelism.disable_loss_parallel",
457
+ action="store_true",
458
+ help="Whether to apply loss parallel when sequence parallel is enabled",
459
+ )
460
+ self.parser.add_argument(
461
+ "--parallelism.enable_async_tensor_parallel",
462
+ action="store_true",
463
+ help="Whether to apply async tensor parallel (currently only effective when compile is enabled)",
464
+ )
465
+ self.parser.add_argument(
466
+ "--parallelism.pipeline_parallel_degree",
467
+ type=int,
468
+ default=1,
469
+ help="""
470
+ Pipeline Parallelism degree, or number of ranks. 1 means disabled.
471
+ If using looped schedules, this still specifies the number of physical ranks, not the number
472
+ of stages. Stages per rank are inferred from split points degree, and schedule.""",
473
+ )
474
+ self.parser.add_argument(
475
+ "--parallelism.pipeline_parallel_split_points",
476
+ type=string_list,
477
+ nargs="+",
478
+ default=[],
479
+ help="""
480
+ Specify comma-separated names of modules to use as the beginning of a split point.
481
+
482
+ e.g. "layers.0,layers.2" will cause the model to be split into 3 stages,
483
+ the first containing all the layers up to layers.0,
484
+ the second containing layers.0 and up to layers.2,
485
+ the third containing layers.2 and all the remaining layers.
486
+
487
+ Note: fully-automated splitting may be enabled in the future,
488
+ but currently the split points must be specified manually.""",
489
+ )
490
+ self.parser.add_argument(
491
+ "--parallelism.pipeline_parallel_layers_per_stage",
492
+ type=int,
493
+ default=None,
494
+ help="""
495
+ The number of layers per stage. If specified, the split points will be calculated from
496
+ the number of layers and pipeline_parallel_degree. If not specified, the layers per stage will
497
+ be inferred from the model, schedule, and pipeline_parallel_degree.""",
498
+ )
499
+ self.parser.add_argument(
500
+ "--parallelism.pipeline_parallel_schedule",
501
+ type=str,
502
+ default="1F1B",
503
+ help="""
504
+ Specify the Pipeline Parallel schedule to use. The supported schedules are:
505
+ https://github.com/pytorch/pytorch/blob/de4c2a3b4e89d96334dc678d1c3f2ae51a6630a0/torch/distributed/pipelining/schedules.py#L2161.
506
+ The schedule must be compatible with the split points and stages_per_rank.
507
+
508
+ Looped schedules (e.g. Interleaved1F1B) require specifying pipeline_parallel_degree = number of ranks,
509
+ and split_points = number of stages - 1
510
+ """,
511
+ )
512
+ self.parser.add_argument(
513
+ "--parallelism.pipeline_parallel_schedule_csv",
514
+ type=str,
515
+ default="",
516
+ help="""
517
+ Specify the path to the pipeline parallel schedule csv file to use.
518
+ The pipeline_parallel_schedule argument must be either
519
+ PipelineScheduleSingle, PipelineScheduleMulti, or _PipelineScheduleRuntime.
520
+ """,
521
+ )
522
+ self.parser.add_argument(
523
+ "--parallelism.pipeline_parallel_microbatch_size",
524
+ type=int,
525
+ default=1,
526
+ help="""
527
+ The size of each pipeline parallel microbatch (default 1).
528
+
529
+ This value is used to compute the total number of microbatches by dividing batch_size with
530
+ pipeline_parallel_microbatch_size.
531
+
532
+ The global training batch size must be evenly divisible by pipeline_parallel_microbatch_size.
533
+ """,
534
+ )
535
+ self.parser.add_argument(
536
+ "--parallelism.context_parallel_degree",
537
+ type=int,
538
+ default=1,
539
+ help="Context parallelism degree. 1 means disabled.",
540
+ )
541
+ self.parser.add_argument(
542
+ "--parallelism.context_parallel_rotate_method",
543
+ type=str,
544
+ default="allgather",
545
+ help="""
546
+ The collective to use in context parallel SDPA for kv shards exchange.
547
+
548
+ 'allgather' means to all-gather all kv shards on ranks after the first sub-SDPA computation,
549
+
550
+ 'alltoall' means to all-to-all shuffle the kv shards.
551
+
552
+ The default value is 'allgather'.
553
+ """,
554
+ )
555
+
556
+ # checkpointing configs
557
+ self.parser.add_argument(
558
+ "--checkpoint.enable_checkpoint",
559
+ action="store_true",
560
+ help="Whether to enable checkpoint",
561
+ )
562
+ self.parser.add_argument(
563
+ "--checkpoint.folder",
564
+ type=str,
565
+ default="checkpoint",
566
+ help="""
567
+ The folder to store the checkpoints.
568
+ When enable_checkpoint is set to true, checkpoints will be in {--job.dump_folder}/{--checkpoint.folder}.
569
+ """,
570
+ )
571
+ self.parser.add_argument(
572
+ "--checkpoint.interval",
573
+ type=int,
574
+ default=500,
575
+ help="Checkpointing interval in steps.",
576
+ )
577
+ self.parser.add_argument(
578
+ "--checkpoint.model_weights_only",
579
+ action="store_true",
580
+ help="""
581
+ When model_weights_only=True, only model weights will be saved at the end of training.
582
+ With this, checkpoints can be loaded using `torch.load(..., weights_only=True)` after conversion.
583
+ When model_weights_only=False, the full checkpoint will be saved.
584
+ A full checkpoint includes model, optimizer and train_state, which can be used to resume training.
585
+ The default value is false.
586
+ """,
587
+ )
588
+ self.parser.add_argument(
589
+ "--checkpoint.export_dtype",
590
+ type=str,
591
+ default="float32",
592
+ choices=["float16", "bfloat16", "float32"],
593
+ help="""
594
+ Converts to the specified precision when training completes and model_weights_only=true.
595
+ Currently supports float32, float16, and bfloat16.
596
+ The default value is float32.
597
+ """,
598
+ )
599
+ self.parser.add_argument(
600
+ "--checkpoint.create_seed_checkpoint",
601
+ action="store_true",
602
+ help="""
603
+ Initializes the full model without applying parallelisms, and then saves it as a seed checkpoint.
604
+ Note: requires user to call train.py without specifying any parallelisms, e.g. NGPU=1.
605
+ Could be implemented as a separate script, but this way shares more code.
606
+ """,
607
+ )
608
+ self.parser.add_argument(
609
+ "--checkpoint.async_mode",
610
+ type=str,
611
+ default="disabled",
612
+ help="""
613
+ Which async checkpoint mode to use. Currently there are 3 different modes.
614
+ 1. "disabled": synchronized checkpointing will be used.
615
+ 2. "async": torch.distributed.checkpoint.async_save will be used.
616
+ 3. "async_with_pinned_mem": this option utilizes a dedicated pinned memory
617
+ space and creates a separate process for faster GPU->CPU transfer
618
+ performance and eliminating GIL contention. The cost is increased CPU
619
+ memory usage. If insufficient CPU memory is available, performance may
620
+ degrade due to memory paging. For most users, "async" should suffice as
621
+ the performance overhead is typically small (on the order of tens of
622
+ seconds) compared to checkpointing frequency. This mode can be employed
623
+ to pursue near-zero checkpointing times (e.g., < 1 second) given
624
+ appropriate hardware support such as ample CPU memory and fast PCIe.
625
+
626
+ "disabled" is the default mode.
627
+ """,
628
+ )
629
+ self.parser.add_argument(
630
+ "--checkpoint.keep_latest_k",
631
+ type=int,
632
+ default=10,
633
+ help="""
634
+ Keeps only the latest k checkpoints, and purging older ones. If 0, keep all checkpoints.
635
+ K cannot be 1 as the last one may be in the process of being saved. As a result,
636
+ the metadata of the last one may not be ready yet. The default value is 10 to avoid
637
+ filling up the disk.
638
+ """,
639
+ )
640
+ self.parser.add_argument(
641
+ "--checkpoint.load_step",
642
+ type=int,
643
+ default=-1,
644
+ help="Load the checkpoint at the specified step. If -1, load the latest checkpoint.",
645
+ )
646
+ self.parser.add_argument(
647
+ "--checkpoint.exclude_from_loading",
648
+ type=string_list,
649
+ nargs="*",
650
+ default=[],
651
+ help="""
652
+ Exclude specific keys from being loaded from the checkpoint.
653
+ Provide a comma-separated list of keys to exclude, e.g. 'optimizer,lr_scheduler,dataloader'.
654
+ This will load the model only, excluding the specified keys.
655
+ """,
656
+ )
657
+
658
+ # activation checkpointing configs
659
+ self.parser.add_argument(
660
+ "--activation_checkpoint.mode",
661
+ type=str,
662
+ default="selective",
663
+ help="Type of activation checkpointing to use ['none', 'full', 'selective']",
664
+ )
665
+ self.parser.add_argument(
666
+ "--activation_checkpoint.selective_ac_option",
667
+ type=str,
668
+ default="2", # 2 = checkpoint every other layer
669
+ help="""
670
+ Selective activation checkpointing options ['int', 'op'].
671
+ 'int' (e.g., 2) for every nth layer, or 'op' for op level ac.
672
+ """,
673
+ )
674
+
675
+ # float8 configs
676
+ self.parser.add_argument(
677
+ "--float8.enable_fsdp_float8_all_gather",
678
+ action="store_true",
679
+ help="Whether enable float8 all-gather in FSDP, recommended for tensorwise scaling",
680
+ )
681
+ self.parser.add_argument(
682
+ "--float8.precompute_float8_dynamic_scale_for_fsdp",
683
+ action="store_true",
684
+ help="Whether precompute float8 scales dynamically for FSDP, recommended for tensorwise scaling",
685
+ )
686
+ self.parser.add_argument(
687
+ "--float8.force_recompute_fp8_weight_in_bwd",
688
+ action="store_true",
689
+ help="""
690
+ Whether to force the recomputation of FP8 weights during backward pass.
691
+ When using FSDP with tensorwise scaling, it is recommended to enable
692
+ `force_recompute_fp8_weight_in_bwd` to prevent saving unsharded FP8 weights
693
+ for backward computation.
694
+ """,
695
+ )
696
+ self.parser.add_argument(
697
+ "--float8.recipe_name",
698
+ type=str,
699
+ default=None,
700
+ choices=["tensorwise", "rowwise", "rowwise_with_gw_hp"],
701
+ help="""
702
+ If specified, creates float8 config from recipe name, valid choices are
703
+ `tensorwise`, `rowwise` and `rowwise_with_gw_hp`.
704
+ """,
705
+ )
706
+ self.parser.add_argument(
707
+ "--float8.filter_fqns",
708
+ type=string_list,
709
+ default=[],
710
+ nargs="+",
711
+ help="""
712
+ Comma-separated list of fully qualified names of modules to skip applying float8 training to.
713
+ nn.Linear modules with any dim size not divisible by 16 are always skipped due to hardware requirements.
714
+ Example: --float8.module_filter_fqns "attention.wq,attention.wk,attention.wv,output"
715
+ """,
716
+ )
717
+
718
+ # communications library settings
719
+ self.parser.add_argument(
720
+ "--comm.init_timeout_seconds",
721
+ type=int,
722
+ default=300,
723
+ help="Timeout for communication operations, during initialization and first train step.",
724
+ )
725
+ self.parser.add_argument(
726
+ "--comm.train_timeout_seconds",
727
+ type=int,
728
+ default=100,
729
+ help=(
730
+ "Timeout for communication operations after the first train step -- "
731
+ "usually a tighter bound than during initialization."
732
+ ),
733
+ )
734
+ self.parser.add_argument(
735
+ "--comm.trace_buf_size",
736
+ type=int,
737
+ default=20000,
738
+ help="Flight recorder ring buffer size, >0 means recording by default, 0 means disabled",
739
+ )
740
+
741
+ # memory estimation configs
742
+ self.parser.add_argument(
743
+ "--memory_estimation.enabled",
744
+ help="Whether to estimate memory usage for FSDP",
745
+ action="store_true",
746
+ )
747
+
748
+ self.parser.add_argument(
749
+ "--memory_estimation.disable_fake_mode",
750
+ help="Whether to estimate memory under FakeTensorMode",
751
+ action="store_true",
752
+ )
753
+
754
+ self.parser.add_argument(
755
+ "--fault_tolerance.enable",
756
+ action="store_true",
757
+ help="""
758
+ Enable TorchFT integration. When TorchFT is enabled, HSDP will be used.
759
+ And --fault_tolerance.data_parallel_replicate_degree should be 1 and
760
+ --fault_tolerance.group_size will be used to control the maximum
761
+ replicate group size as the replicate group size is dynamic.
762
+
763
+ Note that this is still an experimental feature.
764
+ """,
765
+ )
766
+
767
+ # torchft configs
768
+ self.parser.add_argument(
769
+ "--fault_tolerance.replica_id",
770
+ type=int,
771
+ default=0,
772
+ help="The TorchFT replica ID of this run.",
773
+ )
774
+ self.parser.add_argument(
775
+ "--fault_tolerance.group_size",
776
+ type=int,
777
+ default=0,
778
+ help="""
779
+ The number of TorchFT replicate groups. This number will be used for
780
+ dataloader to split the dataset across the replicate groups and FSDP
781
+ dimension
782
+ """,
783
+ )
784
+ self.parser.add_argument(
785
+ "--fault_tolerance.min_replica_size",
786
+ type=int,
787
+ default=1,
788
+ help="The minimum number of FT replica for each step.",
789
+ )
790
+
791
+ self.parser.add_argument(
792
+ "--experimental.custom_import",
793
+ type=str,
794
+ default="",
795
+ help="""
796
+ This option enables the importation of external modules.
797
+ Currently, it only supports dotted import modules (e.g., some_package.model_x).
798
+ It is the user's responsibility to ensure that the specified path can be
799
+ successfully imported. One method to achieve this, you can place your module
800
+ inside the ``torchtitan/torchtitan`` folder and execute ``pip install -e .`` to
801
+ make it available for import.
802
+ """,
803
+ )
804
+
805
+ self.parser.add_argument(
806
+ "--experimental.custom_args_module",
807
+ type=str,
808
+ default="",
809
+ help="""
810
+ This option allows users to extend TorchTitan's existing JobConfig by importing
811
+ a customized module. Similar to ``--experimental.custom_model_path``, the user
812
+ needs to ensure that the path can be imported. The module should contain exactly
813
+ one public function and the function has the signature
814
+ ``def func(parser: argparse.ArgumentParser) -> None:``. The user can use the
815
+ given parser to add new argument by calling``parser.add_argument``, as wish.
816
+ """,
817
+ )
818
+
819
+ self._is_parsed = False
820
+ self._allow_unkown_args = False
821
+
822
+ def maybe_add_custom_args(self) -> None:
823
+ """Add custom arguments to the parser if --experimental.custom_args_module is set.
824
+
825
+ Note: This function should be called before the parser is used to parse arguments.
826
+ """
827
+ if self._is_parsed:
828
+ raise RuntimeError(
829
+ "JobConfig has already been parsed. We could not add new arguments."
830
+ )
831
+
832
+ self._allow_unkown_args = True
833
+ self.parse_args(sys.argv[1:])
834
+ self._allow_unkown_args = False
835
+
836
+ if self.experimental.custom_args_module:
837
+ module = importlib.import_module(self.experimental.custom_args_module)
838
+ public_functions = [
839
+ name
840
+ for name, func in inspect.getmembers(module)
841
+ if inspect.isfunction(func) and not name.startswith("_")
842
+ ]
843
+ func = getattr(module, public_functions[0])
844
+ func(self.parser)
845
+
846
+ def to_dict(self):
847
+ return self.args_dict
848
+
849
+ def parse_args(self, args_list: list = sys.argv[1:]):
850
+ self._is_parsed = True
851
+ args, cmd_args = self.parse_args_from_command_line(args_list)
852
+ config_file = getattr(args, "job.config_file", None)
853
+ # build up a two level dict
854
+ args_dict = self._args_to_two_level_dict(args)
855
+ if config_file is not None:
856
+ try:
857
+ with open(config_file, "rb") as f:
858
+ for k, v in tomllib.load(f).items():
859
+ # to prevent overwrite of non-specified keys
860
+ args_dict[k] |= v
861
+ except (FileNotFoundError, tomllib.TOMLDecodeError) as e:
862
+ logger.exception(
863
+ f"Error while loading the configuration file: {config_file}"
864
+ )
865
+ logger.exception(f"Error details: {str(e)}")
866
+ raise e
867
+
868
+ # Checking string-list arguments are properly split into a list
869
+ # if split-points came from 'args' (from cmd line) it would have already been parsed into a list by that parser
870
+ string_list_argnames = self._get_string_list_argument_names()
871
+ for n in string_list_argnames:
872
+ check_string_list_argument(args_dict, n)
873
+
874
+ # override args dict with cmd_args
875
+ cmd_args_dict = self._args_to_two_level_dict(cmd_args)
876
+ for section, section_args in cmd_args_dict.items():
877
+ for k, v in section_args.items():
878
+ args_dict[section][k] = v
879
+
880
+ self.args_dict = args_dict
881
+
882
+ for k, v in args_dict.items():
883
+ class_type = type(k.title(), (), v)
884
+ setattr(self, k, class_type())
885
+ self._validate_config()
886
+
887
+ def _args_to_two_level_dict(self, args: argparse.Namespace) -> defaultdict:
888
+ args_dict = defaultdict(defaultdict)
889
+ for k, v in vars(args).items():
890
+ first_level_key, second_level_key = k.split(".", 1)
891
+ args_dict[first_level_key][second_level_key] = v
892
+ return args_dict
893
+
894
+ def _validate_config(self) -> None:
895
+ # TODO: temporary mitigation of BC breaking change in
896
+ # tokenizer default path, need to remove later
897
+ if not os.path.exists(self.model.tokenizer_path):
898
+ logger.warning(
899
+ f"Tokenizer path {self.model.tokenizer_path} does not exist!"
900
+ )
901
+ old_tokenizer_path = (
902
+ "torchtitan/datasets/tokenizer/original/tokenizer.model"
903
+ )
904
+ if os.path.exists(old_tokenizer_path):
905
+ self.model.tokenizer_path = old_tokenizer_path
906
+ logger.warning(
907
+ f"Temporarily switching to previous default tokenizer path {old_tokenizer_path}. "
908
+ "Please update your config."
909
+ )
910
+
911
+ def _get_string_list_argument_names(self) -> list[str]:
912
+ """Get the parser argument names of type `string_list`."""
913
+ string_list_args = [
914
+ v.dest for v in self.parser._actions if v.type is string_list
915
+ ]
916
+ return string_list_args
917
+
918
+ def parse_args_from_command_line(
919
+ self, args_list
920
+ ) -> Tuple[argparse.Namespace, argparse.Namespace]:
921
+ """
922
+ Parse command line arguments and return the parsed args and the command line only args
923
+ """
924
+ if self._allow_unkown_args:
925
+ args, _ = self.parser.parse_known_args(args_list)
926
+ else:
927
+ args = self.parser.parse_args(args_list)
928
+ string_list_argnames = set(self._get_string_list_argument_names())
929
+
930
+ # aux parser to parse the command line only args, with no defaults from main parser
931
+ aux_parser = argparse.ArgumentParser(argument_default=argparse.SUPPRESS)
932
+ for arg, val in vars(args).items():
933
+ if isinstance(val, bool):
934
+ aux_parser.add_argument(
935
+ "--" + arg, action="store_true" if val else "store_false"
936
+ )
937
+ elif arg in string_list_argnames:
938
+ # without this special case, type inference breaks here,
939
+ # since the inferred type is just 'list' and it ends up flattening
940
+ # e.g. from ["layers.0", "layers.1"] into ["l", "a", "y", "e", "r", "s", ".0", ...]
941
+ aux_parser.add_argument("--" + arg, type=string_list)
942
+ else:
943
+ aux_parser.add_argument("--" + arg, type=type(val))
944
+
945
+ cmd_args, _ = aux_parser.parse_known_args(args_list)
946
+
947
+ return args, cmd_args
torchtitan/experiments/README.md ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ To accelerate contributions to and innovations around `torchtitan`, we are adding this new, experimental folder. Below are the general contributing guidelines, and we look forward to your contributions!
2
+
3
+ ## Contributing Guidelines
4
+
5
+ We provide this `experiments/` folder to host experiments that add significant value to `torchtitan`, with the following principles. We refer to the part of `torchtitan` outside `experiments` as `core`.
6
+ 1. Each subfolder in `experiments` will be an experiment, with a clear theme which can be flexible, such as
7
+ - a new model, or preferably a new model architecture, with its training infrastructure including parallelization functions;
8
+ - an enhancement or addition to the existing infrastructure of `torchtitan`.
9
+ 2. It is the contributors' responsibility to justify the value of an experiment. `torchtitan` team will review proposals on a case-by-case basis. As part of the contribution, the contributors should provide documentation that clearly showcases the motivation and innovation of an experiment, including reports on performance and loss convergence.
10
+ 3. An experiment should reuse existing `torchtitan` code as much as possible, such as modules in [`components/`](../components/) (via a new [`TrainSpec`](../protocols/train_spec.py)) and [`train.py`](../train.py). For a list of extension points we provide, please refer to [docs/extension.md](../../docs/extension.md).
11
+ - The extension points are subject to change. We kindly request that contributors provide feedback if they encounter issues reusing any components, rather than simply using a copy-and-paste approach.
12
+ - The degree to which existing components are reused and whether duplications are legit will also be a criteria of whether an experiment would be accepted.
13
+ 4. Each experiment is independent from other experiments, and can have its own dependencies (on top of [core dependencies](../../requirements.txt)), and its own tests.
14
+ 5. The dependency from `experiments` to `core` is one-way. Anything in `experiments` is optional for `core` to run successfully. In particular, development in `core` is not blocked by breakage in `experiments`. We will utilize GitHub's [CI mechanism](https://docs.github.com/en/actions/writing-workflows/workflow-syntax-for-github-actions#onpushpull_requestpull_request_targetpathspaths-ignore) to help test an experiment periodically and only if the experiment itself is affected by a PR.
15
+ 6. Each experiment needs to have an owner. The owner is responsible to work with `torchtitan` team to maintain the quality and healthiness of an experiment, which includes
16
+ - adapting an experiment to changes in `core` and fix broken tests, no later than the next official `torchtitan` release;
17
+ - responding to GitHub issues and questions in a timely manner.
18
+ 7. `torchtitan` team reserve the right to remove an experiment. In particular, an experiment should be removed if
19
+ - it has served its purpose (e.g., providing findings, or getting some features upstreamed to `core` or PyTorch, etc.), or
20
+ - it gets stale (e.g. not being maintained).
torchtitan/experiments/__init__.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import torchtitan.experiments.llama4 # noqa: F401
8
+ import torchtitan.experiments.simple_fsdp # noqa: F401
torchtitan/experiments/llama4/README.md ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ **The Llama 4 folder is still under development.**
2
+
3
+ #### Available features
4
+ - Llama 4 model definition (text-only), including the MoE architecture with token-choice routing using efficient bfloat16 Grouped MM kernels
5
+ - FSDP, TP, PP, CP support
6
+ - DCP checkpoint conversion scripts
7
+
8
+ #### Download Llama 4 tokenizer
9
+ ```bash
10
+ # Llama 4 tokenizer.model
11
+ python scripts/download_tokenizer.py --repo_id meta-llama/Llama-4-Scout-17B-16E --tokenizer_path "" --hf_token=...
12
+ ```
13
+
14
+ #### To be added
15
+ - Modeling
16
+ - iRoPE implementation
17
+ - load balance loss for token-choice MoE
18
+ - alternative expert-choice MoE
19
+ - multimodal support
20
+ - Parallelism
21
+ - Context Parallel support for FlexAttention, iRoPE, and multimodal inputs
22
+ - Expert Parallel support
23
+ - torch.compile
24
+ - for MoE layers
25
+ - Quantization
26
+ - efficient float8 GroupedGEMM kernels (from torchao)
27
+ - Testing
28
+ - perfomance and loss converging tests
29
+ - CI integration
torchtitan/experiments/llama4/__init__.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ from torchtitan.components.loss import build_cross_entropy_loss
8
+ from torchtitan.components.lr_scheduler import build_lr_schedulers
9
+ from torchtitan.components.optimizer import build_optimizers
10
+ from torchtitan.datasets.hf_datasets import build_hf_dataloader
11
+ from torchtitan.datasets.tokenizer.tiktoken import build_tiktoken_tokenizer
12
+ from torchtitan.models.llama3 import pipeline_llama
13
+ from torchtitan.protocols.train_spec import register_train_spec, TrainSpec
14
+
15
+ from .infra.parallelize_llama import parallelize_llama
16
+ from .model.args import TransformerModelArgs
17
+ from .model.model import Transformer
18
+
19
+ __all__ = [
20
+ "TransformerModelArgs",
21
+ "Transformer",
22
+ "llama4_configs",
23
+ ]
24
+
25
+
26
+ llama4_configs = {
27
+ "debugmodel": TransformerModelArgs(
28
+ dim=256,
29
+ n_layers=8,
30
+ n_heads=16,
31
+ rope_theta=500000,
32
+ ),
33
+ "17bx16e": TransformerModelArgs(
34
+ dim=5120,
35
+ n_layers=48,
36
+ n_heads=40,
37
+ n_kv_heads=8,
38
+ ffn_dim_multiplier=1.2,
39
+ multiple_of=2048,
40
+ rope_theta=500000,
41
+ num_experts=16,
42
+ interleave_moe_layer_step=1,
43
+ ),
44
+ "17bx128e": TransformerModelArgs(
45
+ dim=5120,
46
+ n_layers=48,
47
+ n_heads=40,
48
+ n_kv_heads=8,
49
+ ffn_dim_multiplier=1.2,
50
+ multiple_of=2048,
51
+ rope_theta=500000,
52
+ num_experts=128,
53
+ ),
54
+ }
55
+
56
+
57
+ register_train_spec(
58
+ TrainSpec(
59
+ name="llama4",
60
+ cls=Transformer,
61
+ config=llama4_configs,
62
+ parallelize_fn=parallelize_llama,
63
+ pipelining_fn=pipeline_llama,
64
+ build_optimizers_fn=build_optimizers,
65
+ build_lr_schedulers_fn=build_lr_schedulers,
66
+ build_dataloader_fn=build_hf_dataloader,
67
+ build_tokenizer_fn=build_tiktoken_tokenizer,
68
+ build_loss_fn=build_cross_entropy_loss,
69
+ )
70
+ )