# Copyright 2024 state-spaces/mamba2 org and HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch MAMBA2 model.""" import math import warnings from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from transformers.activations import ACT2FN from transformers.generation import GenerationMixin from transformers.modeling_utils import PreTrainedModel from transformers.utils import ModelOutput, logging from transformers.utils.deprecation import deprecate_kwarg from fla.models.mamba2.configuration_mamba2 import Mamba2Config from fla.modules import FusedCrossEntropyLoss, FusedLinearCrossEntropyLoss, RMSNorm from fla.modules.layernorm_gated import RMSNormGated logger = logging.get_logger(__name__) with warnings.catch_warnings(): warnings.simplefilter('ignore') try: from mamba_ssm.ops.triton.selective_state_update import selective_state_update from mamba_ssm.ops.triton.ssd_combined import mamba_chunk_scan_combined, mamba_split_conv1d_scan_combined except ImportError: ( selective_state_update, mamba_chunk_scan_combined, mamba_split_conv1d_scan_combined, ) = (None, None, None) try: from causal_conv1d import causal_conv1d_fn, causal_conv1d_update except ImportError: causal_conv1d_update, causal_conv1d_fn = None, None is_fast_path_available = all(( selective_state_update, causal_conv1d_fn, causal_conv1d_update )) def pad_tensor_by_size(input_tensor: torch.Tensor, pad_size: int): """ Padding x tensor with `pad_size` on the seq_len dim (dim=1) Assumes that we only have tensors of either size 4 or 3 """ pad_shape = (0, 0, 0, 0, 0, pad_size, 0, 0) if len(input_tensor.shape) == 4 else (0, 0, 0, pad_size, 0, 0) return torch.nn.functional.pad(input_tensor, pad_shape, mode="constant", value=0) def reshape_into_chunks(input_tensor, pad_size, chunk_size): """ Padding input_tensor with `pad_size` on the seq_len dim (dim=1) and simultaneously splitting it into chunk sequences. Assumes that we only have tensors of either size 4 or 3 """ # [bsz, seq_len, ...] -> [bsz, seq_len multiple of chunk_size, ...] input_tensor = pad_tensor_by_size(input_tensor, pad_size) if len(input_tensor.shape) == 3: # [bsz, seq_len multiple of chunk_size, num_heads] -> [bsz, -1, chunk_size, num_heads] return input_tensor.reshape(input_tensor.shape[0], -1, chunk_size, input_tensor.shape[2]) else: # [bsz, seq_len multiple of chunk_size, num_heads, head_dim or state_size] -> # [bsz, -1, chunk_size, num_heads, head_dim or state_size] return input_tensor.reshape( input_tensor.shape[0], -1, chunk_size, input_tensor.shape[2], input_tensor.shape[3] ) def segment_sum(input_tensor): """ More stable segment sum calculation. Uses cumulative sums and masking instead of direct subtractions. """ chunk_size = input_tensor.size(-1) # 1. expand input tensor to have an additional dimension and repeat along that dimension # [..., chunk_size] -> [..., chunk_size, chunk_size] input_tensor = input_tensor[..., None].expand(*input_tensor.size(), chunk_size) # 2. create a lower triangular mask with the diagonal set to 0 to 0 out elements above diag mask = torch.tril(torch.ones(chunk_size, chunk_size, device=input_tensor.device, dtype=torch.bool), diagonal=-1) input_tensor = input_tensor.masked_fill(~mask, 0) # 3. compute actual cumsum tensor_segsum = torch.cumsum(input_tensor, dim=-2) # 4. apply mask to keep only the lower triangular part of the cumulative sum result (incl diagonal this time) mask = torch.tril(torch.ones(chunk_size, chunk_size, device=input_tensor.device, dtype=torch.bool), diagonal=0) tensor_segsum = tensor_segsum.masked_fill(~mask, -torch.inf) return tensor_segsum def apply_mask_to_padding_states(hidden_states, attention_mask): """ Tunes out the hidden states for padding tokens, see https://github.com/state-spaces/mamba/issues/66 """ if attention_mask is not None and attention_mask.shape[1] > 1 and attention_mask.shape[0] > 1: dtype = hidden_states.dtype hidden_states = (hidden_states * attention_mask[:, :, None]).to(dtype) return hidden_states class Mamba2Cache: """ Arguments: config: Mamba2Config batch_size: int dtype: torch.dtype device: torch.device Attributes: dtype: (`torch.dtype`): The default `dtype` used to initializing the cache. conv_kernel_size: (`int`): Model's convolution kernel size taken from config. n_groups: (`int`): Model's number of groups taken from the config - similar to tensor parallel in Transformer. state_size: (`int`): Model's SSM state size taken from config. num_heads: (`int`): The number of heads used in the linear attention / SSM. head_dim: (`int`): The respective dimension of the heads used in the linear attention / SSM. intermediate_size: (`int`): Model's intermediate_size based on (expand * hidden_dim) from config. conv_states: (`torch.Tensor`): A tensor of shape `[num_layers, batch_size, conv_kernel_size, intermediate_size + 2 * n_groups * state_size]` that holds convolutional states. ssm_states: (`torch.Tensor`): A tensor of shape `[num_layers, batch_size, num_heads, head_dim, state_size]` that holds ssm states. """ def __init__( self, config: Mamba2Config, batch_size: int, dtype: torch.dtype = torch.float16, device: Optional[str] = None, ): self.dtype = dtype self.conv_kernel_size = config.conv_kernel self.n_groups = config.n_groups self.state_size = config.state_size self.num_heads = config.num_heads self.head_dim = config.head_dim self.intermediate_size = int(config.expand * config.hidden_size) self.conv_states = torch.zeros( config.num_hidden_layers, batch_size, self.intermediate_size + 2 * self.n_groups * self.state_size, self.conv_kernel_size, device=device, dtype=dtype, ) self.ssm_states = torch.zeros( config.num_hidden_layers, batch_size, self.num_heads, self.head_dim, self.state_size, device=device, dtype=dtype, ) def update_conv_state( self, layer_idx: int, new_conv_state: torch.Tensor, cache_init: bool = False ) -> torch.Tensor: if cache_init: self.conv_states[layer_idx] = new_conv_state.to(self.conv_states.device) else: self.conv_states[layer_idx] = self.conv_states[layer_idx].roll(shifts=-1, dims=-1) self.conv_states[layer_idx][:, :, -1] = new_conv_state[:, 0, :].to(self.conv_states.device) return self.conv_states[layer_idx] def update_ssm_state(self, layer_idx: int, new_ssm_state: torch.Tensor): self.ssm_states[layer_idx] = new_ssm_state.to(self.ssm_states.device) return self.ssm_states[layer_idx] def reset(self): self.conv_states.zero_() self.ssm_states.zero_() class Mamba2Mixer(nn.Module): """ Compute ∆, A, B, C, and D the state space parameters and compute the `contextualized_states`. A, D are input independent (see Mamba paper [1] Section 3.5.2 "Interpretation of A" for why A isn't selective) ∆, B, C are input-dependent (this is a key difference between Mamba and the linear time invariant S4, and is why Mamba is called **selective** state spaces) """ def __init__(self, config: Mamba2Config, layer_idx: int): super().__init__() self.num_heads = config.num_heads self.hidden_size = config.hidden_size self.ssm_state_size = config.state_size self.conv_kernel_size = config.conv_kernel self.intermediate_size = int(config.expand * self.hidden_size) self.time_step_rank = int(config.time_step_rank) self.layer_idx = layer_idx self.use_conv_bias = config.use_conv_bias self.activation = config.hidden_act self.act = ACT2FN[config.hidden_act] self.layer_norm_epsilon = config.layer_norm_epsilon self.rms_norm = config.rms_norm self.n_groups = config.n_groups self.head_dim = config.head_dim self.chunk_size = config.chunk_size self.time_step_limit = config.time_step_limit self.time_step_min = config.time_step_min self.time_step_max = config.time_step_max self.conv_dim = self.intermediate_size + 2 * self.n_groups * self.ssm_state_size self.conv1d = nn.Conv1d( in_channels=self.conv_dim, out_channels=self.conv_dim, bias=config.use_conv_bias, kernel_size=config.conv_kernel, groups=self.conv_dim, padding=config.conv_kernel - 1, ) # projection of the input hidden states projection_size = self.intermediate_size + self.conv_dim + self.num_heads self.in_proj = nn.Linear( self.hidden_size, projection_size, bias=config.use_bias, ) # selective projection used to make dt, B and C input dependant # time step projection (discretization) # instantiate once and copy inv_dt in init_weights of PretrainedModel self.dt_bias = nn.Parameter(torch.ones(self.num_heads)) # S4D real initialization. These are not discretized! # The core is to load them, compute the discrete states, then write the updated state. Keeps the memory bounded A = torch.arange(1, self.num_heads + 1) self.A_log = nn.Parameter(torch.log(A)) self.A_log._no_weight_decay = True self.norm = RMSNormGated( self.intermediate_size, eps=self.layer_norm_epsilon, norm_before_gate=False ) self.D = nn.Parameter(torch.ones(self.num_heads)) self.D._no_weight_decay = True self.out_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.use_bias) self.use_bias = config.use_bias if not is_fast_path_available: logger.warning_once( "The fast path is not available because one of " "`(selective_state_update, causal_conv1d_fn, causal_conv1d_update)` is None. " "Falling back to the naive implementation. " "To install follow https://github.com/state-spaces/mamba/#installation and" "https://github.com/Dao-AILab/causal-conv1d" ) def cuda_kernels_forward( self, hidden_states: torch.Tensor, cache_params: Optional[Mamba2Cache] = None, cache_position: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, ): # 1. Gated MLP's linear projection hidden_states = apply_mask_to_padding_states(hidden_states, attention_mask) projected_states = self.in_proj(hidden_states) # Set up dimensions for reshapes later batch_size, seq_len, _ = hidden_states.shape groups_time_state_size = self.n_groups * self.ssm_state_size d_mlp = ( projected_states.shape[-1] - 2 * self.intermediate_size - 2 * self.n_groups * self.ssm_state_size - self.num_heads ) // 2 # Single step calculations via cache if cache_params is not None and cache_position is not None and cache_position[0] > 0: _, _, gate, hidden_states_B_C, dt = projected_states.squeeze(1).split( [d_mlp, d_mlp, self.intermediate_size, self.conv_dim, self.num_heads], dim=-1 ) # 2. Convolution sequence transformation hidden_states_B_C = causal_conv1d_update( hidden_states_B_C, cache_params.conv_states[self.layer_idx], self.conv1d.weight.squeeze(1), self.conv1d.bias, self.activation, ) hidden_states, B, C = torch.split( hidden_states_B_C, [ self.intermediate_size, groups_time_state_size, groups_time_state_size, ], dim=-1, ) # 3. SSM transformation A = -torch.exp(self.A_log.float()) # (nheads,) A = A[:, None, ...][:, :, None].expand(-1, self.head_dim, self.ssm_state_size).to(dtype=torch.float32) dt = dt[:, :, None].expand(-1, -1, self.head_dim) dt_bias = self.dt_bias[:, None, ...].expand(-1, self.head_dim) D = self.D[:, None, ...].expand(-1, self.head_dim) B = B.view(batch_size, self.n_groups, B.shape[1] // self.n_groups) C = C.view(batch_size, self.n_groups, C.shape[1] // self.n_groups) hidden_states_reshaped = hidden_states.view(batch_size, self.num_heads, self.head_dim) hidden_states = selective_state_update( cache_params.ssm_states[self.layer_idx], hidden_states_reshaped, dt, A, B, C, D, z=None, dt_bias=dt_bias, dt_softplus=True, ) hidden_states = hidden_states.view(batch_size, self.num_heads * self.head_dim) hidden_states = self.norm(hidden_states, gate) # 4. Final linear projection out = self.out_proj(hidden_states)[:, None, ...] # Fused calculations or step by step if no initialized cache is found else: A = -torch.exp(self.A_log.float()) # (num_heads) or (intermediate_size, state_size) dt_limit_kwargs = {} if self.time_step_limit == (0.0, float("inf")) else {"dt_limit": self.time_step_limit} # 2-4. Fused kernel for conv1d, SSM, and the final projection if self.training and cache_params is None: out = mamba_split_conv1d_scan_combined( projected_states, self.conv1d.weight.squeeze(1), self.conv1d.bias, self.dt_bias, A, D=self.D, chunk_size=self.chunk_size, seq_idx=None, # was seq_idx activation=self.activation, rmsnorm_weight=self.norm.weight, rmsnorm_eps=self.norm.eps, outproj_weight=self.out_proj.weight, outproj_bias=self.out_proj.bias, headdim=self.head_dim, ngroups=self.n_groups, norm_before_gate=False, return_final_states=False, **dt_limit_kwargs, ) else: _, _, gate, hidden_states_B_C, dt = projected_states.split( [d_mlp, d_mlp, self.intermediate_size, self.conv_dim, self.num_heads], dim=-1 ) # 2. Convolution sequence transformation # Init cache if cache_params is not None: hidden_states_B_C_transposed = hidden_states_B_C.transpose(1, 2) conv_states = nn.functional.pad( hidden_states_B_C_transposed, (cache_params.conv_kernel_size - hidden_states_B_C_transposed.shape[-1], 0), ) cache_params.update_conv_state( layer_idx=self.layer_idx, new_conv_state=conv_states, cache_init=True ) if self.activation not in ["silu", "swish"]: hidden_states_B_C = self.act( self.conv1d(hidden_states_B_C.transpose(1, 2))[..., :seq_len].transpose(1, 2) ) else: hidden_states_B_C = causal_conv1d_fn( x=hidden_states_B_C.transpose(1, 2), weight=self.conv1d.weight.squeeze(1), bias=self.conv1d.bias, activation=self.activation, ).transpose(1, 2) hidden_states_B_C = apply_mask_to_padding_states(hidden_states_B_C, attention_mask) hidden_states, B, C = torch.split( hidden_states_B_C, [self.intermediate_size, groups_time_state_size, groups_time_state_size], dim=-1, ) # 3. SSM transformation scan_output, ssm_state = mamba_chunk_scan_combined( hidden_states.view(batch_size, seq_len, -1, self.head_dim), dt, A, B.view(batch_size, seq_len, self.n_groups, -1), C.view(batch_size, seq_len, self.n_groups, -1), chunk_size=self.chunk_size, D=self.D, z=None, seq_idx=None, return_final_states=True, dt_bias=self.dt_bias, dt_softplus=True, **dt_limit_kwargs, ) # Init cache if ssm_state is not None and cache_params is not None: cache_params.update_ssm_state(layer_idx=self.layer_idx, new_ssm_state=ssm_state) scan_output = scan_output.view(batch_size, seq_len, -1) # Multiply "gate" branch and apply extra normalization layer scan_output = self.norm(scan_output, gate) # 4. Final linear projection out = self.out_proj(scan_output) return out # fmt: off def torch_forward( self, input_states, cache_params: Optional[Mamba2Cache] = None, cache_position: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None ): batch_size, seq_len, _ = input_states.shape dtype = input_states.dtype # 1. Gated MLP's linear projection input_states = apply_mask_to_padding_states(input_states, attention_mask) projected_states = self.in_proj(input_states) d_mlp = (projected_states.shape[-1] - 2 * self.intermediate_size - 2 * self.n_groups * self.ssm_state_size - self.num_heads) // 2 _, _, gate, hidden_states_B_C, dt = projected_states.split( [d_mlp, d_mlp, self.intermediate_size, self.conv_dim, self.num_heads], dim=-1 ) # 2. Convolution sequence transformation if cache_params is not None and cache_position is not None and cache_position[0] > 0: cache_params.update_conv_state(layer_idx=self.layer_idx, new_conv_state=hidden_states_B_C, cache_init=False) # We need to guarantee that anything regarding the cache is on the same device conv_states = cache_params.conv_states[self.layer_idx].to(device=self.conv1d.weight.device) hidden_states_B_C = torch.sum( conv_states * self.conv1d.weight.squeeze(1), dim=-1 ) if self.use_conv_bias: hidden_states_B_C = hidden_states_B_C + self.conv1d.bias hidden_states_B_C = self.act(hidden_states_B_C) else: # Init cache if cache_params is not None: hidden_states_B_C_transposed = hidden_states_B_C.transpose(1, 2) conv_states = nn.functional.pad( hidden_states_B_C_transposed, (cache_params.conv_kernel_size - hidden_states_B_C_transposed.shape[-1], 0) ) cache_params.update_conv_state(layer_idx=self.layer_idx, new_conv_state=conv_states, cache_init=True) hidden_states_B_C = self.act(self.conv1d(hidden_states_B_C.transpose(1, 2))[..., :seq_len].transpose(1, 2)) hidden_states_B_C = apply_mask_to_padding_states(hidden_states_B_C, attention_mask) hidden_states, B, C = torch.split( hidden_states_B_C, [self.intermediate_size, self.n_groups * self.ssm_state_size, self.n_groups * self.ssm_state_size], dim=-1 ) # 3. SSM transformation A = -torch.exp(self.A_log.float()) # [num_heads] if cache_params is not None and cache_position is not None and cache_position[0] > 0: # We need to guarantee that anything regarding the cache is on the same device cache_device = cache_params.ssm_states.device # Note: there is no need to pad parameter matrices here, as there is just one new token # for batched generation dt = dt[:, 0, :][:, None, ...] dt = dt.transpose(1, 2).expand(batch_size, dt.shape[-1], self.head_dim) # [num_heads] -> [num_heads, head_dim] dt_bias = self.dt_bias[..., None].expand(self.dt_bias.shape[0], self.head_dim) dt = torch.nn.functional.softplus(dt + dt_bias.to(dt.dtype)) dt = torch.clamp(dt, self.time_step_limit[0], self.time_step_limit[1]) A = A[..., None, None].expand(self.num_heads, self.head_dim, self.ssm_state_size).to(dtype=torch.float32) # [bsz, num_heads, head_dim, state_size] dA = (torch.exp(dt[..., None] * A)).to(device=cache_device) # Discretize B # [bsz, n_groups * state_size] -> [bsz, n_groups, 1, state_size] -> # -> [bsz, n_groups, group to head repetition factor, state_size] -> [bsz, num_heads, state_size] B = B.reshape(batch_size, self.n_groups, -1)[..., None, :] B = B.expand(batch_size, self.n_groups, self.num_heads // self.n_groups, B.shape[-1]).contiguous() B = B.reshape(batch_size, -1, B.shape[-1]) # [bsz, num_heads, head_dim, state_size] dB = dt[..., None] * B[..., None, :] # Discretize x into dB # [bsz, intermediate_size] -> [bsz, num_heads, head_dim] hidden_states = hidden_states.reshape(batch_size, -1, self.head_dim) dBx = (dB * hidden_states[..., None]).to(device=cache_device) # State calculation cache_params.update_ssm_state( layer_idx=self.layer_idx, new_ssm_state=cache_params.ssm_states[self.layer_idx] * dA + dBx ) # Subsequent output # [bsz, n_groups * state_size] -> [bsz, num_heads, state_size] C = C.reshape(batch_size, self.n_groups, -1)[..., None, :] C = C.expand(batch_size, self.n_groups, self.num_heads // self.n_groups, C.shape[-1]).contiguous() C = C.reshape(batch_size, -1, C.shape[-1]) # [bsz, num_heads, head_dim] ssm_states = cache_params.ssm_states[self.layer_idx].to(device=C.device, dtype=C.dtype) # Shape: [b, h, d, n] # Reshape ssm_states to merge the first two dimensions # Shape: [b*h, d, n] ssm_states_reshaped = ssm_states.view(batch_size * self.num_heads, self.head_dim, self.ssm_state_size) C_reshaped = C.view(batch_size * self.num_heads, self.ssm_state_size, 1) # Shape: [b*h, n, 1] y = torch.bmm(ssm_states_reshaped, C_reshaped) y = y.view(batch_size, self.num_heads, self.head_dim) # D skip connection # [num_heads] -> [num_heads, head_dim] D = self.D[..., None].expand(self.D.shape[0], self.head_dim) y = (y + hidden_states * D).to(y.dtype) # [bsz, num_heads, head_dim] -> [bsz, 1, intermediate_size] y = y.reshape(batch_size, -1)[:, None, ...] else: # begin ssd naive implementation without einsums dt = nn.functional.softplus(dt + self.dt_bias) dt = torch.clamp(dt, self.time_step_limit[0], self.time_step_limit[1]) hidden_states = hidden_states.reshape(batch_size, seq_len, -1, self.head_dim).float() B = B.reshape(batch_size, seq_len, -1, self.ssm_state_size).float() C = C.reshape(batch_size, seq_len, -1, self.ssm_state_size).float() B = B.repeat(1, 1, self.num_heads // self.n_groups, 1) C = C.repeat(1, 1, self.num_heads // self.n_groups, 1) pad_size = (self.chunk_size - seq_len % self.chunk_size) % self.chunk_size D_residual = self.D[..., None] * pad_tensor_by_size(hidden_states, pad_size) # Discretize x and A hidden_states = hidden_states * dt[..., None] A = A.to(hidden_states.dtype) * dt # Rearrange into blocks/chunks hidden_states, A, B, C = [reshape_into_chunks(t, pad_size, self.chunk_size) for t in (hidden_states, A, B, C)] # [bsz, -1, chunk_size, num_heads] -> [bsz, num_heads, -1, chunk_size] A = A.permute(0, 3, 1, 2) A_cumsum = torch.cumsum(A, dim=-1) # 1. Compute the output for each intra-chunk (diagonal blocks) # This is the analog of a causal mask L = torch.exp(segment_sum(A)) # Contraction of C and B to get G (attention-weights like) # shape: (b, c, l, s, h, n) G_intermediate = C[:, :, :, None, :, :] * B[:, :, None, :, :, :] G = G_intermediate.sum(dim=-1) # shape: (b, c, l, s, h) # Compute M, equivalent to applying attention mask to weights M_intermediate = G[..., None] * L.permute(0, 2, 3, 4, 1)[..., None] M = M_intermediate.sum(dim=-1) # Compute Y_diag (apply to values) Y_diag = (M[..., None] * hidden_states[:, :, None]).sum(dim=3) # 2. Compute the state for each intra-chunk # (right term of low-rank factorization of off-diagonal blocks; B terms) decay_states = torch.exp((A_cumsum[:, :, :, -1:] - A_cumsum)) B_decay = B * decay_states.permute(0, -2, -1, 1)[..., None] states = (B_decay[..., None, :] * hidden_states[..., None]).sum(dim=2) # 3. Compute the inter-chunk SSM recurrence; produces correct SSM states at chunk boundaries # (middle term of factorization of off-diag blocks; A terms) if cache_params is not None and cache_position is not None and cache_position[0] > 0: previous_states = cache_params.ssm_states[self.layer_idx][:, None, ...].to(device=states.device) else: previous_states = torch.zeros_like(states[:, :1]) states = torch.cat([previous_states, states], dim=1) decay_chunk = torch.exp(segment_sum(nn.functional.pad(A_cumsum[:, :, :, -1], (1, 0)))) decay_chunk = decay_chunk.transpose(1, 3) new_states = (decay_chunk[..., None, None] * states[:, :, None, ...]).sum(dim=1) states, ssm_state = new_states[:, :-1], new_states[:, -1] # 4. Compute state -> output conversion per chunk # (left term of low-rank factorization of off-diagonal blocks; C terms) state_decay_out = torch.exp(A_cumsum) C_times_states = (C[..., None, :] * states[:, :, None, ...]) state_decay_out_permuted = state_decay_out.permute(0, 2, 3, 1) Y_off = (C_times_states.sum(-1) * state_decay_out_permuted[..., None]) # Add output of intra-chunk and inter-chunk terms (diagonal and off-diagonal blocks) y = Y_diag + Y_off # [bsz, -1, self.chunk_size, num_heads, head_dim] -> [bsz, (padded) seq_len, num_heads, head_dim] y = y.reshape(batch_size, -1, self.num_heads, self.head_dim) y = y + D_residual # Cutting off padded chunks if pad_size > 0: y = y[:, :seq_len, :, :] y = y.reshape(batch_size, seq_len, -1) # Init cache if ssm_state is not None and cache_params is not None: cache_params.update_ssm_state(layer_idx=self.layer_idx, new_ssm_state=ssm_state) scan_output = self.norm(y, gate) # end ssd naive # 4. Final linear projection contextualized_states = self.out_proj(scan_output.to(dtype)) # [batch, seq_len, hidden_size] return contextualized_states # fmt: on def forward( self, hidden_states, cache_params: Optional[Mamba2Cache] = None, cache_position: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, ): if is_fast_path_available and "cuda" in self.in_proj.weight.device.type: return self.cuda_kernels_forward(hidden_states, cache_params, cache_position, attention_mask) dtype = hidden_states.dtype if attention_mask is not None and attention_mask.shape[1] > 1 and attention_mask.shape[0] > 1: # tune out hidden states for pad tokens, see https://github.com/state-spaces/mamba/issues/66 hidden_states = (hidden_states * attention_mask[:, :, None]).to(dtype) return self.torch_forward(hidden_states, cache_params, cache_position, attention_mask) class Mamba2Block(nn.Module): def __init__(self, config, layer_idx): super().__init__() self.config = config self.layer_idx = layer_idx self.residual_in_fp32 = config.residual_in_fp32 self.norm = RMSNorm(config.hidden_size, eps=config.layer_norm_epsilon) self.mixer = Mamba2Mixer(config, layer_idx=layer_idx) def forward( self, hidden_states, cache_params: Optional[Mamba2Cache] = None, cache_position: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, ): residual = hidden_states hidden_states = self.norm(hidden_states) if self.residual_in_fp32: residual = residual.to(torch.float32) hidden_states = self.mixer( hidden_states, cache_params=cache_params, cache_position=cache_position, attention_mask=attention_mask, ) hidden_states = residual + hidden_states if self.residual_in_fp32: hidden_states = hidden_states.to(dtype=self.norm.weight.dtype) return hidden_states class Mamba2PreTrainedModel(PreTrainedModel, GenerationMixin): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = Mamba2Config base_model_prefix = "backbone" _no_split_modules = ["Mamba2Block"] supports_gradient_checkpointing = True _is_stateful = True def _init_weights( self, module: nn.Module, num_residuals_per_layer: int = 1, ): """Initialize the weights.""" if isinstance(module, Mamba2Mixer): # --- A_log --- A = torch.arange(1, module.num_heads + 1) with torch.no_grad(): if not isinstance(module.A_log, torch.distributed.tensor.DTensor): module.A_log.copy_(torch.log(A)) else: logger.warning_once("`A_log` is a DTensor, skipping initialization") module.A_log._no_weight_decay = True # --- D --- nn.init.ones_(module.D) module.D._no_weight_decay = True # --- dt_bias --- dt = torch.exp( torch.rand(self.config.num_heads) * (math.log(self.config.time_step_max) - math.log(self.config.time_step_min)) + math.log(self.config.time_step_min) ).clamp(min=self.config.time_step_floor) # Inverse of softplus: https://github.com/pytorch/pytorch/issues/72759 inv_dt = dt + torch.log(-torch.expm1(-dt)) with torch.no_grad(): if not isinstance(module.dt_bias, torch.distributed.tensor.DTensor): module.dt_bias.copy_(inv_dt) else: logger.warning_once("`dt_bias` is a DTensor, skipping initialization") module.dt_bias._no_reinit = True elif isinstance(module, (nn.Linear, nn.Conv1d)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 nn.init.normal_(module.weight, mean=0.0, std=self.config.initializer_range) if module.bias is not None: nn.init.zeros_(module.bias) # guard against deprecated behavior if hasattr(module.bias, "_no_reinit"): raise ValueError("This is not supposed to happen") elif isinstance(module, nn.Embedding): nn.init.normal_(module.weight, mean=0.0, std=self.config.initializer_range) elif hasattr(module, 'reset_parameters'): module.reset_parameters() if self.config.rescale_prenorm_residual: # Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme: # > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale # > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers. # > -- GPT-2 :: https://openai.com/blog/better-language-models/ # # Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py p = None if hasattr(module, 'o_proj'): # p = module.o_proj.weight # guard against deprecated behavior raise ValueError("This is not supposed to happen") elif hasattr(module, 'out_proj'): p = module.out_proj.weight elif hasattr(module, 'down_proj'): p = module.down_proj.weight if p is not None: # Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block # Following Pytorch init, except scale by 1/sqrt(2 * n_layer) # We need to reinit p since this code could be called multiple times # Having just p *= scale would repeatedly scale it down nn.init.kaiming_uniform_(p, a=math.sqrt(5)) with torch.no_grad(): p /= math.sqrt(num_residuals_per_layer * self.config.num_hidden_layers) @dataclass # Copied from transformers.models.mamba.modeling_mamba.MambaOutput with MAMBA->MAMBA2,Mamba->Mamba2 class Mamba2Output(ModelOutput): """ Class for the MAMBA2 model outputs. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. cache_params (`Mamba2Cache`): The state of the model at the last time step. Can be used in a forward method with the next `input_ids` to avoid providing the old `input_ids`. Includes both the State space model state matrices after the selective scan, and the Convolutional states hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. """ last_hidden_state: Optional[torch.FloatTensor] = None cache_params: Optional[Mamba2Cache] = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None @dataclass # Copied from transformers.models.mamba.modeling_mamba.MambaCausalLMOutput with Mamba->Mamba2 class Mamba2CausalLMOutput(ModelOutput): """ Base class for causal language model (or autoregressive) outputs. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss (for next-token prediction). logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). cache_params (`Mamba2Cache`): The state of the model at the last time step. Can be used in a forward method with the next `input_ids` to avoid providing the old `input_ids`. Includes both the State space model state matrices after the selective scan, and the Convolutional states hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. """ loss: Optional[torch.FloatTensor] = None logits: Optional[torch.FloatTensor] = None cache_params: Optional[Mamba2Cache] = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None class Mamba2Model(Mamba2PreTrainedModel): def __init__(self, config): super().__init__(config) self.embeddings = nn.Embedding(config.vocab_size, config.hidden_size) self.layers = nn.ModuleList([Mamba2Block(config, layer_idx=idx) for idx in range(config.num_hidden_layers)]) self.gradient_checkpointing = False self.norm_f = RMSNorm(config.hidden_size, eps=config.layer_norm_epsilon) # Initialize weights and apply final processing self._register_load_state_dict_pre_hook(self.load_hook) self.post_init() def load_hook(self, state_dict, prefix, *args): for k in state_dict: if "embedding." in k: state_dict[k.replace("embedding.", "embeddings.")] = state_dict.pop(k) break def get_input_embeddings(self): return self.embeddings def set_input_embeddings(self, new_embeddings): self.embeddings = new_embeddings def forward( self, input_ids: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.LongTensor] = None, cache_params: Optional[Mamba2Cache] = None, use_cache: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, **kwargs, ) -> Union[Tuple, Mamba2Output]: output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else (self.config.use_cache if not self.training else False) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if (input_ids is None) ^ (inputs_embeds is not None): # ^ is python for xor raise ValueError("You must specify exactly one of input_ids or inputs_embeds") if inputs_embeds is None: inputs_embeds = self.embeddings(input_ids) if self.gradient_checkpointing and self.training and use_cache: use_cache = False if use_cache: if cache_params is None: cache_params = Mamba2Cache( self.config, inputs_embeds.size(0), device=inputs_embeds.device, dtype=inputs_embeds.dtype ) cache_position = torch.arange(0, self.config.conv_kernel, device=inputs_embeds.device) elif cache_position is None: # cases when we do manual forward instead of using `model.generate` which will initiate # `cache_position` and makes sure it is not None, throw error here instead of doing some # hack to conjecture the current cache position raise ValueError( "You have to specify the `cache_position` manually when `use_cache=True` and `cache_params` is passed, " "you don't have to pass a `cache_params` if you are in prefilling stage because in that case it will " "be initialized for you automatically" ) else: cache_params = None hidden_states = inputs_embeds all_hidden_states = () if output_hidden_states else None for mixer_block in self.layers: if self.gradient_checkpointing and self.training: hidden_states = self._gradient_checkpointing_func( mixer_block.__call__, hidden_states, cache_params, cache_position, attention_mask, ) else: hidden_states = mixer_block( hidden_states, cache_params=cache_params, cache_position=cache_position, attention_mask=attention_mask, ) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) hidden_states = self.norm_f(hidden_states) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, cache_params, all_hidden_states] if v is not None) return Mamba2Output( last_hidden_state=hidden_states, cache_params=cache_params if use_cache else None, hidden_states=all_hidden_states, ) class Mamba2ForCausalLM(Mamba2PreTrainedModel): _tied_weights_keys = [] def __init__(self, config): super().__init__(config) self.backbone = Mamba2Model(config) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.criterion = None # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def get_input_embeddings(self): return self.backbone.get_input_embeddings() def set_input_embeddings(self, new_embeddings): return self.backbone.set_input_embeddings(new_embeddings) @deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep") def prepare_inputs_for_generation( self, input_ids, inputs_embeds=None, use_cache=None, cache_params: Optional[Mamba2Cache] = None, cache_position: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, logits_to_keep: Optional[int] = None, **kwargs, ): if use_cache: # `cache_position` should have been initialized in `generate` if cache_position is None: raise ValueError( "`cache_position` should not be None as it should have been initialized in " "`model.generate`, you are responsible for passing in a valid `cache_position` if " "you are calling `prepare_inputs_for_generation` directly with `use_cache=True`" ) if cache_position[0] > 0: input_ids = input_ids[:, -1][..., None] if attention_mask is not None: attention_mask = None else: # we initialize the `cache_position` to full size of `conv_states` at prefill stage # considering padding will be applied when input length is shorter, and truncation # will be applied when it is longer, so it will be equivalent to always have it match # the length of `cache_params.conv_states`, which is `config.conv_kernel` cache_position = torch.arange(0, self.config.conv_kernel, device=input_ids.device) if inputs_embeds is not None and cache_params is None: model_inputs = {"inputs_embeds": inputs_embeds} else: model_inputs = {"input_ids": input_ids} if logits_to_keep is not None: model_inputs['logits_to_keep'] = logits_to_keep model_inputs.update({ 'attention_mask': attention_mask, 'cache_params': cache_params, 'use_cache': use_cache, 'cache_position': cache_position, 'logits_to_keep': logits_to_keep }) return model_inputs @deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep") def forward( self, input_ids: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, cache_params: Optional[Mamba2Cache] = None, labels: Optional[torch.LongTensor] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, use_cache: Optional[bool] = None, cache_position: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, logits_to_keep: Optional[int] = 0, **kwargs, # for now we need this for generation ) -> Union[Tuple, Mamba2CausalLMOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.backbone( input_ids, cache_params=cache_params, inputs_embeds=inputs_embeds, output_hidden_states=output_hidden_states, return_dict=return_dict, use_cache=use_cache, cache_position=cache_position, attention_mask=attention_mask, ) hidden_states = outputs[0] fuse_linear_and_cross_entropy = self.config.fuse_cross_entropy and self.training loss, logits = None, None if not fuse_linear_and_cross_entropy or labels is None: logits = self.lm_head(hidden_states if logits_to_keep is None else hidden_states[:, -logits_to_keep:]) if labels is not None: if getattr(self, 'criterion', None) is None: if fuse_linear_and_cross_entropy: criterion = FusedLinearCrossEntropyLoss() elif self.config.fuse_cross_entropy: criterion = FusedCrossEntropyLoss(inplace_backward=True) else: criterion = nn.CrossEntropyLoss() else: criterion = self.criterion labels = labels.to(hidden_states.device) labels = torch.cat((labels[..., 1:], torch.full_like(labels[:, :1], criterion.ignore_index)), 1) if fuse_linear_and_cross_entropy: loss = criterion(hidden_states, labels, self.lm_head.weight, self.lm_head.bias) else: loss = criterion(logits.view(labels.numel(), -1), labels.view(-1)) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return Mamba2CausalLMOutput( loss=loss, logits=logits, cache_params=outputs.cache_params, hidden_states=outputs.hidden_states, )