|
""" |
|
Adapted from |
|
[MosaiclML](https://github.com/mosaicml/examples.git) and |
|
[minGPT](https://github.com/karpathy/minGPT.git) |
|
""" |
|
|
|
from __future__ import annotations |
|
|
|
import logging |
|
import math |
|
import sys |
|
from abc import abstractmethod |
|
from collections import defaultdict |
|
from functools import partial |
|
from typing import ( |
|
Callable, |
|
Dict, |
|
Iterable, |
|
List, |
|
NamedTuple, |
|
Optional, |
|
Sequence, |
|
Set, |
|
Tuple, |
|
cast, |
|
) |
|
|
|
import torch |
|
import torch.backends.cuda |
|
import torch.nn as nn |
|
import torch.nn.functional as F |
|
from torch import einsum |
|
|
|
from .aliases import PathOrStr |
|
from .beam_search import BeamSearch, Constraint, FinalSequenceScorer, Sampler |
|
from .config import ( |
|
ActivationCheckpointingStrategy, |
|
ActivationType, |
|
BlockType, |
|
CheckpointType, |
|
FSDPWrapStrategy, |
|
InitFnType, |
|
LayerNormType, |
|
ModelConfig, |
|
ShardedCheckpointerType, |
|
TrainConfig, |
|
) |
|
from .exceptions import OLMoConfigurationError |
|
from .initialization import init_normal |
|
from .torch_util import ensure_finite_, get_cumulative_document_lengths |
|
|
|
if sys.version_info.minor > 8: |
|
from collections.abc import MutableMapping |
|
elif sys.version_info.minor == 8: |
|
from typing import MutableMapping |
|
else: |
|
raise SystemExit("This script supports Python 3.8 or higher") |
|
|
|
__all__ = [ |
|
"LayerNormBase", |
|
"LayerNorm", |
|
"RMSLayerNorm", |
|
"RotaryEmbedding", |
|
"Activation", |
|
"GELU", |
|
"ReLU", |
|
"SwiGLU", |
|
"OLMoBlock", |
|
"OLMoSequentialBlock", |
|
"OLMo", |
|
"OLMoOutput", |
|
"OLMoGenerateOutput", |
|
] |
|
|
|
log = logging.getLogger(__name__) |
|
|
|
class FANLayer(nn.Module): |
|
""" |
|
FANLayer: The layer used in FAN (https://arxiv.org/abs/2410.02675). |
|
|
|
Args: |
|
input_dim (int): The number of input features. |
|
output_dim (int): The number of output features. |
|
p_ratio (float): The ratio of output dimensions used for cosine and sine parts (default: 0.25). |
|
activation (str or callable): The activation function to apply to the g component. If a string is passed, |
|
the corresponding activation from torch.nn.functional is used (default: 'gelu'). |
|
use_p_bias (bool): If True, include bias in the linear transformations of p component (default: True). |
|
There is almost no difference between bias and non-bias in our experiments. |
|
""" |
|
|
|
def __init__(self, input_dim, output_dim, p_ratio=0.25, activation='gelu', use_p_bias=True): |
|
super(FANLayer, self).__init__() |
|
|
|
|
|
assert 0 <= p_ratio <= 0.5, "p_ratio must be between 0 and 0.5" |
|
|
|
self.p_ratio = p_ratio |
|
p_output_dim = int(output_dim * self.p_ratio) |
|
g_output_dim = output_dim - p_output_dim * 2 |
|
|
|
|
|
self.input_linear = nn.Linear(input_dim, p_output_dim+g_output_dim, bias=use_p_bias) |
|
|
|
self.fused_dims = (p_output_dim, g_output_dim) |
|
|
|
|
|
if isinstance(activation, str): |
|
self.activation = getattr(F, activation) |
|
else: |
|
self.activation = activation if activation else lambda x: x |
|
|
|
def forward(self, src, norm_g=None): |
|
""" |
|
Args: |
|
src (Tensor): Input tensor of shape (batch_size, input_dim). |
|
|
|
Returns: |
|
Tensor: Output tensor of shape (batch_size, output_dim), after applying the FAN layer. |
|
""" |
|
pg = self.input_linear(src) |
|
|
|
p, g = pg.split(self.fused_dims, dim=-1) |
|
|
|
|
|
output = torch.cat((torch.cos(p), torch.sin(p), self.activation(g)), dim=-1) |
|
|
|
return output |
|
|
|
class FAN(nn.Module): |
|
def __init__(self, input_dim, output_dim, config, activation='gelu'): |
|
super(FAN, self).__init__() |
|
|
|
self.fanlayer = FANLayer(input_dim, input_dim, config.p_ratio, activation) |
|
self.linear = nn.Linear(input_dim, output_dim, bias=config.include_bias, device=config.init_device) |
|
|
|
def forward(self, src): |
|
return self.linear(self.fanlayer(src)) |
|
|
|
|
|
def activation_checkpoint_function(cfg: ModelConfig): |
|
preserve_rng_state = not ( |
|
(cfg.attention_dropout == 0.0) and (cfg.embedding_dropout == 0.0) and (cfg.residual_dropout == 0.0) |
|
) |
|
from torch.utils.checkpoint import checkpoint |
|
|
|
return partial( |
|
checkpoint, |
|
preserve_rng_state=preserve_rng_state, |
|
use_reentrant=False, |
|
) |
|
|
|
|
|
def should_checkpoint_block(strategy: Optional[ActivationCheckpointingStrategy], block_idx: int) -> bool: |
|
if strategy is None: |
|
return False |
|
elif ( |
|
(strategy == ActivationCheckpointingStrategy.whole_layer) |
|
or (strategy == ActivationCheckpointingStrategy.one_in_two and block_idx % 2 == 0) |
|
or (strategy == ActivationCheckpointingStrategy.one_in_three and block_idx % 3 == 0) |
|
or (strategy == ActivationCheckpointingStrategy.one_in_four and block_idx % 4 == 0) |
|
or (strategy == ActivationCheckpointingStrategy.one_in_eight and block_idx % 8 == 0) |
|
or (strategy == ActivationCheckpointingStrategy.two_in_three and block_idx % 3 != 0) |
|
or (strategy == ActivationCheckpointingStrategy.three_in_four and block_idx % 4 != 0) |
|
): |
|
return True |
|
else: |
|
return False |
|
|
|
|
|
class BufferCache(dict, MutableMapping[str, torch.Tensor]): |
|
""" |
|
Cache for attention biases and other things that would normally be stored as buffers. |
|
We avoid using buffers because we've run into various issues doing so with FSDP. |
|
In general it appears the way FSDP handles buffers is not well-defined. |
|
It doesn't shard them but apparently it does synchronize them across processes, which we want to avoid |
|
since (A) it isn't necessary, and (B) we sometimes have `-inf` in these biases which might get turned into |
|
NaNs when they're synchronized due to casting or some other issue. |
|
""" |
|
|
|
|
|
def _non_meta_init_device(config: ModelConfig) -> torch.device: |
|
if config.init_device is not None and config.init_device != "meta": |
|
return torch.device(config.init_device) |
|
else: |
|
if torch.backends.mps.is_available(): |
|
return torch.device("mps") |
|
elif torch.cuda.is_available(): |
|
return torch.device("cuda") |
|
else: |
|
return torch.device("cpu") |
|
|
|
|
|
class Dropout(nn.Dropout): |
|
def forward(self, input: torch.Tensor) -> torch.Tensor: |
|
if self.p == 0.0: |
|
return input |
|
else: |
|
return F.dropout(input, self.p, self.training, self.inplace) |
|
|
|
|
|
class LayerNormBase(nn.Module): |
|
def __init__( |
|
self, |
|
config: ModelConfig, |
|
*, |
|
size: Optional[int] = None, |
|
elementwise_affine: Optional[bool] = True, |
|
): |
|
super().__init__() |
|
self.config = config |
|
self.eps = config.layer_norm_eps |
|
self.normalized_shape = (size or config.d_model,) |
|
if elementwise_affine or (elementwise_affine is None and self.config.layer_norm_with_affine): |
|
self.weight = nn.Parameter(torch.ones(self.normalized_shape, device=config.init_device)) |
|
use_bias = self.config.bias_for_layer_norm |
|
if use_bias is None: |
|
use_bias = self.config.include_bias |
|
if use_bias: |
|
self.bias = nn.Parameter(torch.zeros(self.normalized_shape, device=config.init_device)) |
|
else: |
|
self.register_parameter("bias", None) |
|
else: |
|
self.register_parameter("bias", None) |
|
self.register_parameter("weight", None) |
|
|
|
@abstractmethod |
|
def forward(self, x: torch.Tensor) -> torch.Tensor: |
|
raise NotImplementedError |
|
|
|
@classmethod |
|
def build(cls, config: ModelConfig, size: Optional[int] = None, **kwargs) -> LayerNormBase: |
|
if config.layer_norm_type == LayerNormType.default: |
|
return LayerNorm(config, size=size, low_precision=False, **kwargs) |
|
elif config.layer_norm_type == LayerNormType.low_precision: |
|
return LayerNorm(config, size=size, low_precision=True, **kwargs) |
|
elif config.layer_norm_type == LayerNormType.rms: |
|
return RMSLayerNorm(config, size=size, **kwargs) |
|
else: |
|
raise NotImplementedError(f"Unknown LayerNorm type: '{config.layer_norm_type}'") |
|
|
|
def _cast_if_autocast_enabled(self, tensor: torch.Tensor, dtype: Optional[torch.dtype] = None) -> torch.Tensor: |
|
|
|
|
|
|
|
if tensor.device.type == "cuda" and torch.is_autocast_enabled(): |
|
return tensor.to(dtype=dtype if dtype is not None else torch.get_autocast_gpu_dtype()) |
|
elif tensor.device.type == "cpu" and torch.is_autocast_cpu_enabled(): |
|
return tensor.to(dtype=dtype if dtype is not None else torch.get_autocast_cpu_dtype()) |
|
else: |
|
return tensor |
|
|
|
def reset_parameters(self): |
|
if self.weight is not None: |
|
torch.nn.init.ones_(self.weight) |
|
if self.bias is not None: |
|
torch.nn.init.zeros_(self.bias) |
|
|
|
|
|
class LayerNorm(LayerNormBase): |
|
""" |
|
The default :class:`LayerNorm` implementation which can optionally run in low precision. |
|
""" |
|
|
|
def __init__( |
|
self, |
|
config: ModelConfig, |
|
size: Optional[int] = None, |
|
low_precision: bool = False, |
|
elementwise_affine: Optional[bool] = None, |
|
): |
|
super().__init__(config, size=size, elementwise_affine=elementwise_affine) |
|
self.low_precision = low_precision |
|
|
|
def forward(self, x: torch.Tensor) -> torch.Tensor: |
|
if self.low_precision: |
|
module_device = x.device |
|
downcast_x = self._cast_if_autocast_enabled(x) |
|
downcast_weight = ( |
|
self._cast_if_autocast_enabled(self.weight) if self.weight is not None else self.weight |
|
) |
|
downcast_bias = self._cast_if_autocast_enabled(self.bias) if self.bias is not None else self.bias |
|
with torch.autocast(enabled=False, device_type=module_device.type): |
|
return F.layer_norm( |
|
downcast_x, self.normalized_shape, weight=downcast_weight, bias=downcast_bias, eps=self.eps |
|
) |
|
else: |
|
return F.layer_norm(x, self.normalized_shape, weight=self.weight, bias=self.bias, eps=self.eps) |
|
|
|
|
|
class RMSLayerNorm(LayerNormBase): |
|
""" |
|
RMS layer norm, a simplified :class:`LayerNorm` implementation |
|
""" |
|
|
|
def __init__( |
|
self, |
|
config: ModelConfig, |
|
size: Optional[int] = None, |
|
elementwise_affine: Optional[bool] = None, |
|
): |
|
super().__init__(config, size=size, elementwise_affine=elementwise_affine) |
|
|
|
def forward(self, x: torch.Tensor) -> torch.Tensor: |
|
with torch.autocast(enabled=False, device_type=x.device.type): |
|
og_dtype = x.dtype |
|
x = x.to(torch.float32) |
|
variance = x.pow(2).mean(-1, keepdim=True) |
|
x = x * torch.rsqrt(variance + self.eps) |
|
x = x.to(og_dtype) |
|
|
|
if self.weight is not None: |
|
if self.bias is not None: |
|
return self.weight * x + self.bias |
|
else: |
|
return self.weight * x |
|
else: |
|
return x |
|
|
|
|
|
class RotaryEmbedding(nn.Module): |
|
""" |
|
[Rotary positional embeddings (RoPE)](https://arxiv.org/abs/2104.09864). |
|
""" |
|
|
|
def __init__(self, config: ModelConfig, cache: BufferCache): |
|
super().__init__() |
|
self.config = config |
|
self.__cache = cache |
|
|
|
self.get_rotary_embedding(config.max_sequence_length, _non_meta_init_device(config)) |
|
|
|
def get_rotary_embedding(self, seq_len: int, device: torch.device) -> Tuple[torch.Tensor, torch.Tensor]: |
|
if ( |
|
(pos_sin := self.__cache.get("rope_pos_sin")) is not None |
|
and (pos_cos := self.__cache.get("rope_pos_cos")) is not None |
|
and pos_sin.shape[-2] >= seq_len |
|
and pos_cos.shape[-2] >= seq_len |
|
): |
|
if pos_sin.device != device: |
|
pos_sin = pos_sin.to(device) |
|
self.__cache["rope_pos_sin"] = pos_sin |
|
if pos_cos.device != device: |
|
pos_cos = pos_cos.to(device) |
|
self.__cache["rope_pos_cos"] = pos_cos |
|
return pos_sin[:, :, :seq_len, :], pos_cos[:, :, :seq_len, :] |
|
|
|
with torch.autocast(device.type, enabled=False): |
|
dim = self.config.d_model // self.config.n_heads |
|
inv_freq = 1.0 / ( |
|
self.config.rope_theta ** (torch.arange(0, dim, 2, device=device, dtype=torch.float) / dim) |
|
) |
|
seq = torch.arange(seq_len, device=device, dtype=torch.float) |
|
freqs = einsum("i , j -> i j", seq, inv_freq) |
|
positions = torch.cat((freqs, freqs), dim=-1) |
|
pos_sin, pos_cos = positions.sin()[None, None, :, :], positions.cos()[None, None, :, :] |
|
self.__cache["rope_pos_sin"] = pos_sin |
|
self.__cache["rope_pos_cos"] = pos_cos |
|
return pos_sin, pos_cos |
|
|
|
def rotate_half(self, x: torch.Tensor) -> torch.Tensor: |
|
B, nh, T, hs = x.size() |
|
x = x.view(B, nh, T, 2, hs // 2) |
|
x1, x2 = x.unbind(dim=-2) |
|
return torch.cat((-x2, x1), dim=-1) |
|
|
|
def apply_rotary_pos_emb(self, pos_sin: torch.Tensor, pos_cos: torch.Tensor, t: torch.Tensor) -> torch.Tensor: |
|
return ((t * pos_cos) + (self.rotate_half(t) * pos_sin)).to(t.dtype) |
|
|
|
def forward(self, q: torch.Tensor, k: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: |
|
if self.config.rope_full_precision: |
|
q_, k_ = q.float(), k.float() |
|
else: |
|
q_, k_ = q, k |
|
|
|
with torch.autocast(q.device.type, enabled=False): |
|
query_len, key_len = q_.shape[-2], k_.shape[-2] |
|
pos_sin, pos_cos = self.get_rotary_embedding(key_len, q_.device) |
|
pos_sin = pos_sin.type_as(q_) |
|
pos_cos = pos_cos.type_as(q_) |
|
q_ = self.apply_rotary_pos_emb( |
|
pos_sin[:, :, key_len - query_len : key_len, :], |
|
pos_cos[:, :, key_len - query_len : key_len, :], |
|
q_, |
|
) |
|
k_ = self.apply_rotary_pos_emb(pos_sin, pos_cos, k_) |
|
return q_.type_as(q), k_.type_as(k) |
|
|
|
|
|
class Activation(nn.Module): |
|
def __init__(self, config: ModelConfig): |
|
super().__init__() |
|
self.config = config |
|
|
|
@abstractmethod |
|
def forward(self, x: torch.Tensor) -> torch.Tensor: |
|
raise NotImplementedError |
|
|
|
@property |
|
@abstractmethod |
|
def output_multiplier(self) -> float: |
|
raise NotImplementedError |
|
|
|
@classmethod |
|
def build(cls, config: ModelConfig) -> Activation: |
|
if config.activation_type == ActivationType.gelu: |
|
return cast(Activation, GELU(approximate="none")) |
|
elif config.activation_type == ActivationType.relu: |
|
return cast(Activation, ReLU(inplace=False)) |
|
elif config.activation_type == ActivationType.swiglu: |
|
return SwiGLU(config) |
|
else: |
|
raise NotImplementedError(f"Unknown activation: '{config.activation_type}'") |
|
|
|
|
|
class GELU(nn.GELU): |
|
@property |
|
def output_multiplier(self) -> float: |
|
return 1.0 |
|
|
|
|
|
class ReLU(nn.ReLU): |
|
@property |
|
def output_multiplier(self) -> float: |
|
return 1.0 |
|
|
|
|
|
class SwiGLU(Activation): |
|
def forward(self, x: torch.Tensor) -> torch.Tensor: |
|
x, gate = x.chunk(2, dim=-1) |
|
return F.silu(gate) * x |
|
|
|
@property |
|
def output_multiplier(self) -> float: |
|
return 0.5 |
|
|
|
|
|
def causal_attention_bias(seq_len: int, device: torch.device) -> torch.FloatTensor: |
|
att_bias = torch.triu( |
|
torch.ones(seq_len, seq_len, device=device, dtype=torch.float), |
|
diagonal=1, |
|
) |
|
att_bias.masked_fill_(att_bias == 1, torch.finfo(att_bias.dtype).min) |
|
return att_bias.view(1, 1, seq_len, seq_len) |
|
|
|
|
|
def get_causal_attention_bias(cache: BufferCache, seq_len: int, device: torch.device) -> torch.Tensor: |
|
if (causal_bias := cache.get("causal_attention_bias")) is not None and causal_bias.shape[-1] >= seq_len: |
|
if causal_bias.device != device: |
|
causal_bias = causal_bias.to(device) |
|
cache["causal_attention_bias"] = causal_bias |
|
return causal_bias |
|
with torch.autocast(device.type, enabled=False): |
|
causal_bias = causal_attention_bias(seq_len, device) |
|
cache["causal_attention_bias"] = causal_bias |
|
return causal_bias |
|
|
|
|
|
def alibi_attention_bias(seq_len: int, config: ModelConfig, device: torch.device) -> torch.FloatTensor: |
|
alibi_bias = torch.arange(1 - seq_len, 1, dtype=torch.float, device=device).view(1, 1, 1, seq_len) |
|
|
|
|
|
alibi_bias = alibi_bias - torch.arange(1 - seq_len, 1, dtype=torch.float, device=device).view(1, 1, seq_len, 1) |
|
alibi_bias.abs_().mul_(-1) |
|
|
|
|
|
m = torch.arange(1, config.n_heads + 1, dtype=torch.float, device=device) |
|
m.mul_(config.alibi_bias_max / config.n_heads) |
|
|
|
|
|
return alibi_bias * (1.0 / (2 ** m.view(1, config.n_heads, 1, 1))) |
|
|
|
|
|
class OLMoBlock(nn.Module): |
|
""" |
|
A base class for transformer block implementations. |
|
""" |
|
|
|
def __init__(self, layer_id: int, config: ModelConfig, cache: BufferCache): |
|
super().__init__() |
|
self.layer_id = layer_id |
|
self.config = config |
|
self.hidden_size = ( |
|
config.mlp_hidden_size if config.mlp_hidden_size is not None else config.mlp_ratio * config.d_model |
|
) |
|
self.__cache = cache |
|
assert config.d_model % config.n_heads == 0 |
|
|
|
self._activation_checkpoint_fn: Optional[Callable] = None |
|
|
|
|
|
self.dropout = Dropout(config.residual_dropout) |
|
|
|
|
|
self.k_norm: Optional[LayerNormBase] = None |
|
self.q_norm: Optional[LayerNormBase] = None |
|
if config.attention_layer_norm: |
|
assert config.effective_n_kv_heads is not None |
|
self.k_norm = LayerNormBase.build( |
|
config, |
|
size=(config.d_model // config.n_heads) * config.effective_n_kv_heads, |
|
elementwise_affine=config.attention_layer_norm_with_affine, |
|
) |
|
self.q_norm = LayerNormBase.build(config, elementwise_affine=config.attention_layer_norm_with_affine) |
|
|
|
|
|
if config.clip_qkv is not None: |
|
assert config.clip_qkv > 0 |
|
|
|
|
|
self.act = Activation.build(config) |
|
assert (self.act.output_multiplier * self.hidden_size) % 1 == 0 |
|
|
|
|
|
self.attn_out = nn.Linear( |
|
config.d_model, config.d_model, bias=config.include_bias, device=config.init_device |
|
) |
|
|
|
|
|
self.ff_out = nn.Linear( |
|
int(self.act.output_multiplier * self.hidden_size), |
|
config.d_model, |
|
bias=config.include_bias, |
|
device=config.init_device, |
|
) |
|
self.ff_out._is_residual = True |
|
|
|
|
|
if self.config.rope: |
|
self.rotary_emb = RotaryEmbedding(config, self.__cache) |
|
|
|
self.flash_attn_func = None |
|
self.flash_attn_varlen_func = None |
|
if config.flash_attention: |
|
try: |
|
from flash_attn import ( |
|
flash_attn_func, |
|
flash_attn_varlen_func, |
|
) |
|
|
|
self.flash_attn_func = flash_attn_func |
|
self.flash_attn_varlen_func = flash_attn_varlen_func |
|
except ModuleNotFoundError: |
|
pass |
|
|
|
def reset_parameters(self): |
|
if self.k_norm is not None: |
|
self.k_norm.reset_parameters() |
|
if self.q_norm is not None: |
|
self.q_norm.reset_parameters() |
|
|
|
if self.config.init_fn == InitFnType.normal: |
|
attn_out_std = ff_out_std = self.config.init_std |
|
cutoff_factor = self.config.init_cutoff_factor |
|
|
|
elif self.config.init_fn == InitFnType.mitchell: |
|
attn_out_std = 1 / (math.sqrt(2 * self.config.d_model * (self.layer_id + 1))) |
|
ff_out_std = 1 / (math.sqrt(2 * self.ff_out.in_features * (self.layer_id + 1))) |
|
cutoff_factor = self.config.init_cutoff_factor or 3.0 |
|
|
|
elif self.config.init_fn == InitFnType.full_megatron: |
|
attn_out_std = ff_out_std = self.config.init_std / math.sqrt(2.0 * self.config.n_layers) |
|
cutoff_factor = self.config.init_cutoff_factor or 3.0 |
|
|
|
else: |
|
raise NotImplementedError(self.config.init_fn) |
|
|
|
init_normal(self.attn_out, std=attn_out_std, init_cutoff_factor=cutoff_factor) |
|
init_normal(self.ff_out, std=ff_out_std, init_cutoff_factor=cutoff_factor) |
|
|
|
def set_activation_checkpointing( |
|
self, strategy: Optional[ActivationCheckpointingStrategy], checkpoint_func: Optional[Callable] = None |
|
): |
|
if strategy == ActivationCheckpointingStrategy.fine_grained: |
|
self._activation_checkpoint_fn = checkpoint_func or activation_checkpoint_function(self.config) |
|
else: |
|
self._activation_checkpoint_fn = None |
|
|
|
@classmethod |
|
def _cast_attn_bias(cls, bias: torch.Tensor, input_dtype: torch.dtype) -> torch.Tensor: |
|
target_dtype = input_dtype |
|
|
|
|
|
|
|
if bias.device.type == "cuda" and torch.is_autocast_enabled(): |
|
target_dtype = torch.get_autocast_gpu_dtype() |
|
elif bias.device.type == "cpu" and torch.is_autocast_cpu_enabled(): |
|
target_dtype = torch.get_autocast_cpu_dtype() |
|
elif bias.device.type == "mps": |
|
target_dtype = torch.get_autocast_dtype("mps") |
|
if bias.dtype != target_dtype: |
|
bias = bias.to(target_dtype) |
|
ensure_finite_(bias, check_neg_inf=True, check_pos_inf=False) |
|
return bias |
|
|
|
def _scaled_dot_product_attention( |
|
self, |
|
q: torch.Tensor, |
|
k: torch.Tensor, |
|
v: torch.Tensor, |
|
attn_mask: Optional[torch.Tensor] = None, |
|
dropout_p: float = 0.0, |
|
is_causal: bool = False, |
|
max_doc_len: Optional[int] = None, |
|
cu_doc_lens: Optional[torch.Tensor] = None, |
|
) -> torch.Tensor: |
|
""" |
|
Computes scaled dot product attention on query, key and value tensors, using an optional |
|
attention mask if passed, and applying dropout if a probability greater than 0.0 is specified. |
|
""" |
|
if max_doc_len is not None and cu_doc_lens is not None: |
|
assert self.flash_attn_varlen_func is not None, "flash-attn is required for document masking" |
|
assert attn_mask is None, "attn-mask is currently not supported with document masking" |
|
B, T, D = q.size(0), q.size(2), q.size(3) |
|
r = self.flash_attn_varlen_func( |
|
q.transpose(1, 2).view(B * T, -1, D), |
|
k.transpose(1, 2).view(B * T, -1, D), |
|
v.transpose(1, 2).view(B * T, -1, D), |
|
cu_doc_lens, |
|
cu_doc_lens, |
|
max_doc_len, |
|
max_doc_len, |
|
dropout_p=dropout_p, |
|
causal=is_causal, |
|
) |
|
return r.view(B, T, -1, D).transpose(1, 2) |
|
elif self.flash_attn_func is not None and attn_mask is None: |
|
r = self.flash_attn_func( |
|
q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2), dropout_p=dropout_p, causal=is_causal |
|
) |
|
return r.transpose(1, 2) |
|
else: |
|
|
|
assert k.size(1) == v.size(1) |
|
num_kv_heads = k.size(1) |
|
num_q_heads = q.size(1) |
|
if num_q_heads != num_kv_heads: |
|
assert num_q_heads % num_kv_heads == 0 |
|
k = k.repeat_interleave(num_q_heads // num_kv_heads, dim=1, output_size=num_q_heads) |
|
v = v.repeat_interleave(num_q_heads // num_kv_heads, dim=1, output_size=num_q_heads) |
|
|
|
return F.scaled_dot_product_attention( |
|
q, |
|
k, |
|
v, |
|
attn_mask=attn_mask, |
|
dropout_p=dropout_p, |
|
is_causal=is_causal, |
|
) |
|
|
|
def attention( |
|
self, |
|
q: torch.Tensor, |
|
k: torch.Tensor, |
|
v: torch.Tensor, |
|
attention_bias: Optional[torch.Tensor] = None, |
|
layer_past: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, |
|
use_cache: bool = False, |
|
max_doc_len: Optional[int] = None, |
|
cu_doc_lens: Optional[torch.Tensor] = None, |
|
) -> Tuple[torch.Tensor, Optional[Tuple[torch.Tensor, torch.Tensor]]]: |
|
B, T, C = q.size() |
|
dtype = k.dtype |
|
|
|
|
|
if self.q_norm is not None and self.k_norm is not None: |
|
q = self.q_norm(q).to(dtype=dtype) |
|
k = self.k_norm(k).to(dtype=dtype) |
|
|
|
|
|
|
|
q = q.view(B, T, self.config.n_heads, C // self.config.n_heads).transpose(1, 2) |
|
|
|
k = k.view(B, T, self.config.effective_n_kv_heads, C // self.config.n_heads).transpose(1, 2) |
|
|
|
v = v.view(B, T, self.config.effective_n_kv_heads, C // self.config.n_heads).transpose(1, 2) |
|
|
|
if layer_past is not None: |
|
past_key, past_value = layer_past |
|
k = torch.cat((past_key, k), dim=-2) |
|
v = torch.cat((past_value, v), dim=-2) |
|
|
|
present = (k, v) if use_cache else None |
|
query_len, key_len = q.shape[-2], k.shape[-2] |
|
|
|
if self.config.rope: |
|
|
|
q, k = self.rotary_emb(q, k) |
|
|
|
if attention_bias is not None: |
|
|
|
|
|
|
|
|
|
|
|
attention_bias = self._cast_attn_bias( |
|
attention_bias[:, :, key_len - query_len : key_len, :key_len], dtype |
|
) |
|
|
|
|
|
|
|
att = self._scaled_dot_product_attention( |
|
q, |
|
k, |
|
v, |
|
attn_mask=attention_bias, |
|
dropout_p=0.0 if not self.training else self.config.attention_dropout, |
|
is_causal=attention_bias is None, |
|
max_doc_len=max_doc_len, |
|
cu_doc_lens=cu_doc_lens, |
|
) |
|
|
|
|
|
att = att.transpose(1, 2).contiguous().view(B, T, C) |
|
|
|
|
|
return self.attn_out(att), present |
|
|
|
@abstractmethod |
|
def forward( |
|
self, |
|
x: torch.Tensor, |
|
attention_bias: Optional[torch.FloatTensor] = None, |
|
layer_past: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, |
|
use_cache: bool = False, |
|
max_doc_len: Optional[int] = None, |
|
cu_doc_lens: Optional[torch.Tensor] = None, |
|
) -> Tuple[torch.Tensor, Optional[Tuple[torch.Tensor, torch.Tensor]]]: |
|
raise NotImplementedError |
|
|
|
@classmethod |
|
def build(cls, layer_id: int, config: ModelConfig, cache: BufferCache) -> OLMoBlock: |
|
if config.block_type == BlockType.sequential: |
|
return OLMoSequentialBlock(layer_id, config, cache) |
|
elif config.block_type == BlockType.llama: |
|
return OLMoLlamaBlock(layer_id, config, cache) |
|
else: |
|
raise NotImplementedError(f"Unknown block type: '{config.block_type}'") |
|
|
|
|
|
class OLMoSequentialBlock(OLMoBlock): |
|
""" |
|
This is a typical transformer block where the output is computed as ``MLP(LN(x + Attention(LN(x))))`` |
|
(plus another skip connection). To compute it as ``LN(MLP(x + LN(Attention(x))))``, |
|
use the flag `norm_after`. |
|
""" |
|
|
|
def __init__(self, layer_id: int, config: ModelConfig, cache: BufferCache): |
|
super().__init__(layer_id, config, cache) |
|
|
|
self.use_ATF = config.use_ATF |
|
|
|
head_dim = config.d_model // config.n_heads |
|
self.fused_dims = ( |
|
config.d_model, |
|
config.effective_n_kv_heads * head_dim, |
|
config.effective_n_kv_heads * head_dim, |
|
) |
|
|
|
|
|
if self.use_ATF: |
|
self.att_proj = FAN(config.d_model, sum(self.fused_dims), config, activation=config.attention_activation) |
|
else: |
|
self.att_proj = nn.Linear( |
|
config.d_model, sum(self.fused_dims), bias=config.include_bias, device=config.init_device |
|
) |
|
|
|
|
|
self.ff_proj = nn.Linear( |
|
config.d_model, self.hidden_size, bias=config.include_bias, device=config.init_device |
|
) |
|
|
|
|
|
self.attn_norm = LayerNorm.build(config, size=config.d_model) |
|
self.ff_norm = LayerNorm.build(config, size=config.d_model) |
|
|
|
def reset_parameters(self): |
|
super().reset_parameters() |
|
self.attn_norm.reset_parameters() |
|
self.ff_norm.reset_parameters() |
|
|
|
|
|
if self.config.init_fn == InitFnType.normal: |
|
std = self.config.init_std |
|
cutoff_factor = self.config.init_cutoff_factor |
|
elif self.config.init_fn == InitFnType.mitchell: |
|
std = 1 / math.sqrt(self.config.d_model) |
|
cutoff_factor = self.config.init_cutoff_factor or 3.0 |
|
elif self.config.init_fn == InitFnType.full_megatron: |
|
std = self.config.init_std |
|
cutoff_factor = self.config.init_cutoff_factor or 3.0 |
|
else: |
|
raise NotImplementedError(self.config.init_fn) |
|
|
|
if self.use_ATF: |
|
init_normal(self.att_proj.fanlayer.input_linear, std, cutoff_factor) |
|
init_normal(self.att_proj.linear, std, cutoff_factor) |
|
else: |
|
init_normal(self.att_proj, std, cutoff_factor) |
|
|
|
init_normal(self.ff_proj, std, cutoff_factor) |
|
|
|
def forward( |
|
self, |
|
x: torch.Tensor, |
|
attention_bias: Optional[torch.Tensor] = None, |
|
layer_past: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, |
|
use_cache: bool = False, |
|
max_doc_len: Optional[int] = None, |
|
cu_doc_lens: Optional[torch.Tensor] = None, |
|
) -> Tuple[torch.Tensor, Optional[Tuple[torch.Tensor, torch.Tensor]]]: |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if not self.config.norm_after: |
|
if self._activation_checkpoint_fn is not None: |
|
h = self._activation_checkpoint_fn(self.attn_norm, x) |
|
else: |
|
h = self.attn_norm(x) |
|
else: |
|
h = x |
|
|
|
qkv = self.att_proj(h) |
|
|
|
if self.config.clip_qkv is not None: |
|
qkv.clamp_(min=-self.config.clip_qkv, max=self.config.clip_qkv) |
|
|
|
q, k, v = qkv.split(self.fused_dims, dim=-1) |
|
|
|
|
|
if self._activation_checkpoint_fn is not None: |
|
att, cache = self._activation_checkpoint_fn( |
|
self.attention, |
|
q, |
|
k, |
|
v, |
|
attention_bias, |
|
layer_past=layer_past, |
|
use_cache=use_cache, |
|
max_doc_len=max_doc_len, |
|
cu_doc_lens=cu_doc_lens, |
|
) |
|
else: |
|
att, cache = self.attention( |
|
q, |
|
k, |
|
v, |
|
attention_bias, |
|
layer_past=layer_past, |
|
use_cache=use_cache, |
|
max_doc_len=max_doc_len, |
|
cu_doc_lens=cu_doc_lens, |
|
) |
|
|
|
if self.config.norm_after: |
|
if self._activation_checkpoint_fn is not None: |
|
att = self._activation_checkpoint_fn(self.attn_norm, att) |
|
else: |
|
att = self.attn_norm(att) |
|
|
|
|
|
|
|
x = x + self.dropout(att) |
|
|
|
|
|
|
|
og_x = x |
|
|
|
if not self.config.norm_after: |
|
if self._activation_checkpoint_fn is not None: |
|
x = self._activation_checkpoint_fn(self.ff_norm, x) |
|
else: |
|
x = self.ff_norm(x) |
|
|
|
x = self.ff_proj(x) |
|
|
|
if self._activation_checkpoint_fn is not None: |
|
x = self._activation_checkpoint_fn(self.act, x) |
|
else: |
|
x = self.act(x) |
|
x = self.ff_out(x) |
|
|
|
if self.config.norm_after: |
|
if self._activation_checkpoint_fn is not None: |
|
x = self._activation_checkpoint_fn(self.ff_norm, x) |
|
else: |
|
x = self.ff_norm(x) |
|
|
|
x = self.dropout(x) |
|
x = og_x + x |
|
|
|
return x, cache |
|
|
|
|
|
class OLMoLlamaBlock(OLMoBlock): |
|
""" |
|
This is a transformer block where the output is computed as ``MLP(LN(x + Attention(LN(x))))`` |
|
(plus another skip connection). This block is similar to `OLMoSequentialBlock` |
|
but some operations have slightly different implementations to imitate the |
|
behavior of Llama. |
|
""" |
|
|
|
def __init__(self, layer_id: int, config: ModelConfig, cache: BufferCache): |
|
super().__init__(layer_id, config, cache) |
|
|
|
self.use_ATF = config.use_ATF |
|
self.attn_norm = LayerNorm.build(config) |
|
self.ff_norm = LayerNorm.build(config) |
|
self.__cache = cache |
|
|
|
|
|
if config.multi_query_attention: |
|
q_proj_out_dim = config.d_model |
|
k_proj_out_dim = config.d_model // config.n_heads |
|
v_proj_out_dim = config.d_model // config.n_heads |
|
else: |
|
q_proj_out_dim = config.d_model |
|
k_proj_out_dim = config.d_model |
|
v_proj_out_dim = config.d_model |
|
|
|
if self.use_ATF: |
|
self.q_proj = FAN(config.d_model, q_proj_out_dim, config) |
|
self.k_proj = FAN(config.d_model, k_proj_out_dim, config) |
|
self.v_proj = FAN(config.d_model, v_proj_out_dim, config) |
|
else: |
|
self.q_proj = nn.Linear( |
|
config.d_model, q_proj_out_dim, bias=config.include_bias, device=config.init_device |
|
) |
|
self.k_proj = nn.Linear( |
|
config.d_model, k_proj_out_dim, bias=config.include_bias, device=config.init_device |
|
) |
|
self.v_proj = nn.Linear( |
|
config.d_model, v_proj_out_dim, bias=config.include_bias, device=config.init_device |
|
) |
|
|
|
|
|
self.ff_proj = nn.Linear( |
|
config.d_model, self.hidden_size, bias=config.include_bias, device=config.init_device |
|
) |
|
|
|
def reset_parameters(self): |
|
super().reset_parameters() |
|
self.attn_norm.reset_parameters() |
|
self.ff_norm.reset_parameters() |
|
|
|
|
|
if self.config.init_fn == InitFnType.normal: |
|
std = self.config.init_std |
|
cutoff_factor = self.config.init_cutoff_factor |
|
elif self.config.init_fn == InitFnType.mitchell: |
|
std = 1 / math.sqrt(self.config.d_model) |
|
cutoff_factor = self.config.init_cutoff_factor or 3.0 |
|
elif self.config.init_fn == InitFnType.full_megatron: |
|
std = self.config.init_std |
|
cutoff_factor = self.config.init_cutoff_factor or 3.0 |
|
else: |
|
raise NotImplementedError(self.config.init_fn) |
|
|
|
init_normal(self.q_proj, std, cutoff_factor) |
|
init_normal(self.k_proj, std, cutoff_factor) |
|
init_normal(self.v_proj, std, cutoff_factor) |
|
init_normal(self.ff_proj, std, cutoff_factor) |
|
|
|
def _scaled_dot_product_attention( |
|
self, |
|
q: torch.Tensor, |
|
k: torch.Tensor, |
|
v: torch.Tensor, |
|
attn_mask: Optional[torch.Tensor] = None, |
|
dropout_p: float = 0.0, |
|
is_causal: bool = False, |
|
max_doc_len: Optional[int] = None, |
|
cu_doc_lens: Optional[torch.Tensor] = None, |
|
) -> torch.Tensor: |
|
if max_doc_len is not None or cu_doc_lens is not None: |
|
raise NotImplementedError( |
|
f"attention document masking is not implemented for {self.__class__.__name__}" |
|
) |
|
|
|
attn_weights = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(q.size(-1)) |
|
|
|
if is_causal: |
|
assert attn_mask is None |
|
|
|
query_len, key_len = q.shape[-2], k.shape[-2] |
|
attn_bias = get_causal_attention_bias(self.__cache, key_len, q.device)[:, :, :query_len, :key_len] |
|
elif attn_mask is not None: |
|
attn_bias = attn_mask.to(q.dtype) |
|
else: |
|
attn_bias = torch.zeros_like(attn_weights) |
|
|
|
attn_weights += attn_bias |
|
attn_weights = nn.functional.softmax(attn_weights, dim=-1).to(q.dtype) |
|
attn_weights = nn.functional.dropout(attn_weights, p=dropout_p) |
|
return torch.matmul(attn_weights, v) |
|
|
|
def forward( |
|
self, |
|
x: torch.Tensor, |
|
attention_bias: Optional[torch.Tensor] = None, |
|
layer_past: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, |
|
use_cache: bool = False, |
|
max_doc_len: Optional[int] = None, |
|
cu_doc_lens: Optional[torch.Tensor] = None, |
|
) -> Tuple[torch.Tensor, Optional[Tuple[torch.Tensor, torch.Tensor]]]: |
|
|
|
|
|
|
|
|
|
|
|
x_normed = self.attn_norm(x) |
|
q = self.q_proj(x_normed) |
|
k = self.k_proj(x_normed) |
|
v = self.v_proj(x_normed) |
|
|
|
if self.config.clip_qkv is not None: |
|
q.clamp_(min=-self.config.clip_qkv, max=self.config.clip_qkv) |
|
k.clamp_(min=-self.config.clip_qkv, max=self.config.clip_qkv) |
|
v.clamp_(min=-self.config.clip_qkv, max=self.config.clip_qkv) |
|
|
|
|
|
att, cache = self.attention( |
|
q, |
|
k, |
|
v, |
|
attention_bias, |
|
layer_past=layer_past, |
|
use_cache=use_cache, |
|
max_doc_len=max_doc_len, |
|
cu_doc_lens=cu_doc_lens, |
|
) |
|
|
|
|
|
|
|
x = x + self.dropout(att) |
|
|
|
|
|
|
|
og_x = x |
|
if self._activation_checkpoint_fn is not None: |
|
x = self._activation_checkpoint_fn(self.ff_norm, x) |
|
else: |
|
x = self.ff_norm(x) |
|
x = self.ff_proj(x) |
|
if self._activation_checkpoint_fn is not None: |
|
x = self._activation_checkpoint_fn(self.act, x) |
|
else: |
|
x = self.act(x) |
|
x = self.ff_out(x) |
|
x = self.dropout(x) |
|
x = og_x + x |
|
|
|
return x, cache |
|
|
|
|
|
class OLMoOutput(NamedTuple): |
|
logits: torch.FloatTensor |
|
""" |
|
A tensor of shape `(batch_size, seq_len, vocab_size)` representing the log probabilities |
|
for the next token *before* normalization via (log) softmax. |
|
""" |
|
|
|
attn_key_values: Optional[List[Tuple[torch.Tensor, torch.Tensor]]] |
|
""" |
|
Attention keys and values from each block. |
|
""" |
|
|
|
hidden_states: Optional[Tuple[torch.Tensor, ...]] |
|
""" |
|
Hidden states from each block. |
|
""" |
|
|
|
|
|
class OLMoGenerateOutput(NamedTuple): |
|
token_ids: torch.LongTensor |
|
""" |
|
The generated token IDs, a tensor of shape `(batch_size, beam_size, max_steps)`. |
|
These do *not* include the original input IDs. |
|
""" |
|
|
|
scores: torch.FloatTensor |
|
""" |
|
The scores of the generated sequences, a tensor of shape `(batch_size, beam_size)`. |
|
""" |
|
|
|
|
|
class OLMoBlockGroup(nn.ModuleList): |
|
def __init__(self, config: ModelConfig, layer_offset: int, modules: Optional[Iterable[nn.Module]] = None): |
|
super().__init__(modules) |
|
self.config = config |
|
self.layer_offset = layer_offset |
|
self.activation_checkpointing_strategy: Optional[ActivationCheckpointingStrategy] = None |
|
self._activation_checkpoint_fn = activation_checkpoint_function(self.config) |
|
|
|
def forward( |
|
self, |
|
x: torch.Tensor, |
|
attention_bias: Optional[torch.FloatTensor] = None, |
|
layers_past: Optional[List[Tuple[torch.Tensor, torch.Tensor]]] = None, |
|
use_cache: bool = False, |
|
max_doc_len: Optional[int] = None, |
|
cu_doc_lens: Optional[torch.Tensor] = None, |
|
) -> Tuple[torch.Tensor, Optional[List[Tuple[torch.Tensor, torch.Tensor]]]]: |
|
attn_key_values: Optional[List[Tuple[torch.Tensor, torch.Tensor]]] = [] if use_cache else None |
|
for block_idx, block in enumerate(self): |
|
layer_past = None if layers_past is None else layers_past[block_idx] |
|
block_idx += self.layer_offset |
|
if should_checkpoint_block(self.activation_checkpointing_strategy, block_idx): |
|
|
|
x, cache = self._activation_checkpoint_fn( |
|
block, |
|
x, |
|
attention_bias=attention_bias, |
|
layer_past=layer_past, |
|
use_cache=use_cache, |
|
max_doc_len=max_doc_len, |
|
cu_doc_lens=cu_doc_lens, |
|
) |
|
else: |
|
|
|
x, cache = block( |
|
x, |
|
attention_bias=attention_bias, |
|
layer_past=layer_past, |
|
use_cache=use_cache, |
|
max_doc_len=max_doc_len, |
|
cu_doc_lens=cu_doc_lens, |
|
) |
|
if attn_key_values is not None: |
|
assert cache is not None |
|
attn_key_values.append(cache) |
|
return x, attn_key_values |
|
|
|
def reset_parameters(self): |
|
for block in self: |
|
block.reset_parameters() |
|
|
|
def set_activation_checkpointing( |
|
self, strategy: Optional[ActivationCheckpointingStrategy], checkpoint_func: Optional[Callable] = None |
|
): |
|
self.activation_checkpointing_strategy = strategy |
|
for block in self: |
|
block.set_activation_checkpointing(strategy, checkpoint_func=checkpoint_func) |
|
|
|
|
|
class OLMo(nn.Module): |
|
def __init__(self, config: ModelConfig, init_params: bool = True): |
|
super().__init__() |
|
self.config = config |
|
self.__cache = BufferCache() |
|
|
|
|
|
if self.config.alibi and self.config.flash_attention: |
|
raise OLMoConfigurationError("ALiBi is currently not supported with FlashAttention") |
|
|
|
if self.config.alibi and self.config.rope: |
|
raise OLMoConfigurationError("ALiBi and RoPE are mutually exclusive") |
|
|
|
if self.config.embedding_size is not None and self.config.embedding_size != self.config.vocab_size: |
|
if self.config.embedding_size < self.config.vocab_size: |
|
raise OLMoConfigurationError("embedding size should be at least as big as vocab size") |
|
elif self.config.embedding_size % 128 != 0: |
|
import warnings |
|
|
|
warnings.warn( |
|
"Embedding size is not a multiple of 128! This could hurt throughput performance.", UserWarning |
|
) |
|
|
|
self.activation_checkpointing_strategy: Optional[ActivationCheckpointingStrategy] = None |
|
self._activation_checkpoint_fn: Callable = activation_checkpoint_function(self.config) |
|
|
|
if not ( |
|
0 < self.config.block_group_size <= self.config.n_layers |
|
and self.config.n_layers % self.config.block_group_size == 0 |
|
): |
|
raise OLMoConfigurationError("n layers must be divisible by block group size") |
|
|
|
torch.backends.cuda.enable_flash_sdp(True) |
|
torch.backends.cuda.enable_mem_efficient_sdp(False) |
|
|
|
self.transformer = nn.ModuleDict( |
|
dict( |
|
wte=nn.Embedding( |
|
config.embedding_size or config.vocab_size, config.d_model, device=config.init_device |
|
), |
|
emb_drop=Dropout(config.embedding_dropout), |
|
ln_f=LayerNorm.build(config), |
|
) |
|
) |
|
|
|
blocks = [OLMoBlock.build(i, config, self.__cache) for i in range(config.n_layers)] |
|
if self.config.block_group_size > 1: |
|
block_groups = [ |
|
OLMoBlockGroup(config, i, blocks[i : i + config.block_group_size]) |
|
for i in range(0, config.n_layers, config.block_group_size) |
|
] |
|
self.transformer.update({"block_groups": nn.ModuleList(block_groups)}) |
|
else: |
|
self.transformer.update({"blocks": nn.ModuleList(blocks)}) |
|
|
|
if not (self.config.alibi or self.config.rope): |
|
self.transformer.update( |
|
{"wpe": nn.Embedding(config.max_sequence_length, config.d_model, device=config.init_device)} |
|
) |
|
if not config.weight_tying: |
|
self.transformer.update( |
|
{ |
|
"ff_out": nn.Linear( |
|
config.d_model, |
|
config.embedding_size or config.vocab_size, |
|
bias=config.include_bias, |
|
device=config.init_device, |
|
) |
|
} |
|
) |
|
if config.embedding_layer_norm: |
|
self.transformer.update({"emb_norm": LayerNorm.build(config)}) |
|
|
|
|
|
if init_params and self.config.init_device != "meta": |
|
self.reset_parameters() |
|
self.__num_fwd_flops: Optional[int] = None |
|
self.__num_bck_flops: Optional[int] = None |
|
|
|
|
|
if self.config.alibi: |
|
get_causal_attention_bias(self.__cache, config.max_sequence_length, _non_meta_init_device(config)) |
|
self.get_alibi_attention_bias(config.max_sequence_length, _non_meta_init_device(config)) |
|
|
|
def set_activation_checkpointing( |
|
self, strategy: Optional[ActivationCheckpointingStrategy], checkpoint_func: Optional[Callable] = None |
|
): |
|
self.activation_checkpointing_strategy = strategy |
|
if self.config.block_group_size != 1: |
|
for block_group in self.transformer.block_groups: |
|
block_group.set_activation_checkpointing(strategy, checkpoint_func=checkpoint_func) |
|
else: |
|
for block in self.transformer.blocks: |
|
block.set_activation_checkpointing(strategy, checkpoint_func=checkpoint_func) |
|
|
|
@property |
|
def device(self) -> torch.device: |
|
device: torch.device = self.transformer.wte.weight.device |
|
if device.type == "meta": |
|
return _non_meta_init_device(self.config) |
|
else: |
|
return device |
|
|
|
def reset_parameters(self): |
|
log.info("Initializing model parameters...") |
|
|
|
|
|
if self.config.init_fn == InitFnType.normal: |
|
|
|
|
|
|
|
wte_std = self.config.emb_init_std or self.config.init_std |
|
wte_cutoff_factor = self.config.init_cutoff_factor |
|
elif self.config.init_fn == InitFnType.mitchell: |
|
wte_std = self.config.emb_init_std or 1.0 / math.sqrt(self.config.d_model) |
|
wte_cutoff_factor = self.config.init_cutoff_factor or 3.0 |
|
elif self.config.init_fn == InitFnType.full_megatron: |
|
wte_std = self.config.init_std |
|
if self.config.emb_init_std is not None: |
|
wte_std = self.config.emb_init_std |
|
elif self.config.scale_emb_init: |
|
wte_std *= math.sqrt(self.config.d_model) |
|
wte_cutoff_factor = self.config.init_cutoff_factor or 3.0 |
|
else: |
|
raise NotImplementedError(self.config.init_fn) |
|
|
|
init_normal(self.transformer.wte, std=wte_std, init_cutoff_factor=wte_cutoff_factor) |
|
|
|
if hasattr(self.transformer, "wpe"): |
|
if self.config.init_fn == InitFnType.normal: |
|
wpe_std = self.config.init_std |
|
wpe_cutoff_factor = self.config.init_cutoff_factor |
|
elif self.config.init_fn == InitFnType.mitchell: |
|
wpe_std = 1 / math.sqrt(self.config.d_model) |
|
wpe_cutoff_factor = self.config.init_cutoff_factor or 3.0 |
|
elif self.config.init_fn == InitFnType.full_megatron: |
|
wpe_std = self.config.init_std |
|
wpe_cutoff_factor = self.config.init_cutoff_factor or 3.0 |
|
else: |
|
raise NotImplementedError(self.config.init_fn) |
|
|
|
init_normal(self.transformer.wpe, std=wpe_std, init_cutoff_factor=wpe_cutoff_factor) |
|
|
|
|
|
self.transformer.ln_f.reset_parameters() |
|
|
|
|
|
if hasattr(self.transformer, "ff_out"): |
|
if self.config.init_fn == InitFnType.normal: |
|
ff_out_std = self.config.init_std |
|
ff_out_cutoff_factor = self.config.init_cutoff_factor |
|
elif self.config.init_fn == InitFnType.mitchell: |
|
ff_out_std = 1 / math.sqrt(self.config.d_model) |
|
ff_out_cutoff_factor = self.config.init_cutoff_factor or 3.0 |
|
elif self.config.init_fn == InitFnType.full_megatron: |
|
ff_out_std = 1 / math.sqrt(self.config.d_model) |
|
ff_out_cutoff_factor = self.config.init_cutoff_factor or 3.0 |
|
else: |
|
raise NotImplementedError(self.config.init_fn) |
|
|
|
init_normal(self.transformer.ff_out, ff_out_std, ff_out_cutoff_factor) |
|
|
|
|
|
if self.config.block_group_size == 1: |
|
for block in self.transformer.blocks: |
|
block.reset_parameters() |
|
else: |
|
for block_group in self.transformer.block_groups: |
|
block_group.reset_parameters() |
|
|
|
def get_alibi_attention_bias(self, seq_len: int, device: torch.device) -> torch.Tensor: |
|
if (alibi_bias := self.__cache.get("alibi_attention_bias")) is not None and alibi_bias.shape[ |
|
-1 |
|
] >= seq_len: |
|
if alibi_bias.device != device: |
|
alibi_bias = alibi_bias.to(device) |
|
self.__cache["alibi_attention_bias"] = alibi_bias |
|
return alibi_bias |
|
with torch.autocast(device.type, enabled=False): |
|
alibi_bias = alibi_attention_bias(seq_len, self.config, device) |
|
self.__cache["alibi_attention_bias"] = alibi_bias |
|
return alibi_bias |
|
|
|
def forward( |
|
self, |
|
input_ids: torch.LongTensor, |
|
input_embeddings: Optional[torch.FloatTensor] = None, |
|
attention_mask: Optional[torch.Tensor] = None, |
|
attention_bias: Optional[torch.Tensor] = None, |
|
past_key_values: Optional[Sequence[Tuple[torch.Tensor, torch.Tensor]]] = None, |
|
use_cache: bool = False, |
|
last_logits_only: bool = False, |
|
output_hidden_states: Optional[bool] = None, |
|
doc_lens: Optional[torch.Tensor] = None, |
|
max_doc_lens: Optional[Sequence[int]] = None, |
|
) -> OLMoOutput: |
|
""" |
|
:param input_ids: A tensor of shape `(batch_size, seq_len)`. |
|
:param input_embeddings: A tensor of shape `(batch_size, seq_len, d_model)` with input |
|
embeddings. When provided, it is treated as the output of the input embedding layer. |
|
:param attention_mask: A tensor of shape `(batch_size, seq_len)` that indicates |
|
which input IDs are masked. A `1` value in the mask means that |
|
the corresponding input ID should *not* be ignored. A `0` means |
|
that the corresponding input ID is masked. |
|
|
|
This has the same meaning as the `attention_mask` in HuggingFace's `transformers` |
|
library. |
|
:param attention_bias: A tensor of shape `(batch_size, 1, seq_len, seq_len)`, |
|
`(1, 1, seq_len, seq_len)`, or `(seq_len, seq_len)`. This is used |
|
to introduce causal or other biases. |
|
|
|
If the tensor is a bool or byte tensor, a `True` or `1` at `attention_bias[:, :, i, j]` |
|
indicates that the i-th element in the sequence is allowed to attend to the j-th |
|
element in the sequence. |
|
|
|
If the tensor is a float tensor, it will just be added to the attention |
|
scores before the softmax. |
|
|
|
The default is causal, which corresponds to a lower-diagonal byte matrix of ones. |
|
:param past_key_values: Pre-computed keys and values for each attention block. |
|
Can be used to speed up sequential decoding. The `input_ids` which have |
|
their past given to this model should not be passed as `input_ids` as they have already been computed. |
|
:param use_cache: If `True`, return key and value tensors for each block. |
|
:param last_logits_only: If `True`, only compute the logits for the last token of each sequence. |
|
This can speed up decoding when you only care about the next token. |
|
:param doc_lens: Document lengths to use in attention for intra-document masking. |
|
Shape `(batch_size, max_docs)`. |
|
:param max_doc_lens: Maximum document length for each instance in the batch. |
|
""" |
|
output_hidden_states = output_hidden_states if output_hidden_states is not None else False |
|
|
|
if past_key_values: |
|
assert len(past_key_values) == self.config.n_layers |
|
|
|
batch_size, seq_len = input_ids.size() if input_embeddings is None else input_embeddings.size()[:2] |
|
if past_key_values is None: |
|
past_length = 0 |
|
else: |
|
past_length = past_key_values[0][0].size(-2) |
|
|
|
max_doc_len: Optional[int] = None |
|
cu_doc_lens: Optional[torch.Tensor] = None |
|
if doc_lens is not None and max_doc_lens is not None: |
|
max_doc_len = max(max_doc_lens) |
|
cu_doc_lens = get_cumulative_document_lengths(doc_lens) |
|
|
|
|
|
|
|
x = self.transformer.wte(input_ids) if input_embeddings is None else input_embeddings |
|
|
|
|
|
if self.config.embedding_layer_norm: |
|
x = self.transformer.emb_norm(x) |
|
|
|
if not (self.config.alibi or self.config.rope): |
|
|
|
|
|
pos = torch.arange(past_length, past_length + seq_len, dtype=torch.long, device=x.device).unsqueeze(0) |
|
|
|
pos_emb = self.transformer.wpe(pos) |
|
x = pos_emb + x |
|
|
|
|
|
|
|
x = self.transformer.emb_drop(x) |
|
|
|
|
|
if attention_mask is not None: |
|
|
|
attention_mask = attention_mask.to(dtype=torch.float).view(batch_size, -1)[:, None, None, :] |
|
attention_mask = (1.0 - attention_mask) * torch.finfo(attention_mask.dtype).min |
|
|
|
|
|
if ( |
|
attention_bias is not None |
|
or attention_mask is not None |
|
or self.config.alibi |
|
|
|
|
|
|
|
or past_key_values is not None |
|
): |
|
if attention_bias is None and self.config.alibi: |
|
attention_bias = get_causal_attention_bias( |
|
self.__cache, past_length + seq_len, x.device |
|
) + self.get_alibi_attention_bias(past_length + seq_len, x.device) |
|
elif attention_bias is None: |
|
attention_bias = get_causal_attention_bias(self.__cache, past_length + seq_len, x.device) |
|
elif attention_bias.dtype in (torch.int8, torch.bool): |
|
attention_bias = attention_bias.to(dtype=torch.float) |
|
attention_bias.masked_fill_(attention_bias == 0.0, torch.finfo(attention_bias.dtype).min) |
|
|
|
|
|
mask_len = seq_len |
|
if attention_mask is not None: |
|
mask_len = attention_mask.shape[-1] |
|
elif past_key_values is not None: |
|
mask_len = past_key_values[0][0].shape[-2] + seq_len |
|
attention_bias = attention_bias[:, :, :mask_len, :mask_len].to(dtype=torch.float) |
|
|
|
|
|
if attention_mask is not None: |
|
attention_bias = attention_bias + attention_mask |
|
|
|
|
|
|
|
ensure_finite_(attention_bias, check_neg_inf=True, check_pos_inf=False) |
|
|
|
attn_key_values: Optional[List[Tuple[torch.Tensor, torch.Tensor]]] = [] if use_cache else None |
|
|
|
|
|
all_hidden_states = [] |
|
|
|
|
|
if self.config.block_group_size == 1: |
|
for block_idx, block in enumerate(self.transformer.blocks): |
|
if output_hidden_states: |
|
|
|
all_hidden_states.append(x) |
|
|
|
layer_past = None if past_key_values is None else past_key_values[block_idx] |
|
if should_checkpoint_block(self.activation_checkpointing_strategy, block_idx): |
|
|
|
x, cache = self._activation_checkpoint_fn( |
|
block, |
|
x, |
|
attention_bias=attention_bias, |
|
layer_past=layer_past, |
|
use_cache=use_cache, |
|
max_doc_len=max_doc_len, |
|
cu_doc_lens=cu_doc_lens, |
|
) |
|
else: |
|
|
|
x, cache = block( |
|
x, |
|
attention_bias=attention_bias, |
|
layer_past=layer_past, |
|
use_cache=use_cache, |
|
max_doc_len=max_doc_len, |
|
cu_doc_lens=cu_doc_lens, |
|
) |
|
|
|
if attn_key_values is not None: |
|
assert cache is not None |
|
attn_key_values.append(cache) |
|
else: |
|
for group_idx, block_group in enumerate(self.transformer.block_groups): |
|
if output_hidden_states: |
|
|
|
all_hidden_states.append(x) |
|
|
|
layers_past = ( |
|
None |
|
if past_key_values is None |
|
else past_key_values[ |
|
group_idx * self.config.block_group_size : (group_idx + 1) * self.config.block_group_size |
|
] |
|
) |
|
x, cache = block_group( |
|
x, |
|
attention_bias=attention_bias, |
|
layers_past=layers_past, |
|
use_cache=use_cache, |
|
max_doc_len=max_doc_len, |
|
cu_doc_lens=cu_doc_lens, |
|
) |
|
if attn_key_values is not None: |
|
assert cache is not None |
|
attn_key_values.extend(cache) |
|
|
|
if last_logits_only: |
|
|
|
x = x[:, -1, :].unsqueeze(1) |
|
|
|
|
|
|
|
x = self.transformer.ln_f(x) |
|
if output_hidden_states: |
|
|
|
all_hidden_states.append(x) |
|
|
|
|
|
|
|
if self.config.weight_tying: |
|
logits = F.linear(x, self.transformer.wte.weight, None) |
|
else: |
|
logits = self.transformer.ff_out(x) |
|
if self.config.scale_logits: |
|
logits.mul_(1 / math.sqrt(self.config.d_model)) |
|
|
|
return OLMoOutput( |
|
logits=logits, |
|
attn_key_values=attn_key_values, |
|
hidden_states=tuple(all_hidden_states) if output_hidden_states else None, |
|
) |
|
|
|
def get_fsdp_wrap_policy(self, wrap_strategy: Optional[FSDPWrapStrategy] = None): |
|
if wrap_strategy is None: |
|
return None |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
size_based_module_to_wrap = {self.transformer.wte} |
|
if hasattr(self.transformer, "ff_out"): |
|
size_based_module_to_wrap.add(self.transformer.ff_out) |
|
|
|
if wrap_strategy == FSDPWrapStrategy.by_block: |
|
|
|
def fsdp_wrap_fn(module, recurse: bool = True, nonwrapped_numel: int = 0): |
|
del nonwrapped_numel |
|
wrap = isinstance(module, OLMoBlock) |
|
if recurse: |
|
return True |
|
else: |
|
return wrap |
|
|
|
return fsdp_wrap_fn |
|
elif wrap_strategy == FSDPWrapStrategy.by_block_and_size: |
|
|
|
def fsdp_wrap_fn(module, recurse: bool = True, nonwrapped_numel: int = 0): |
|
del nonwrapped_numel |
|
wrap = isinstance(module, (OLMoBlock,)) or module in size_based_module_to_wrap |
|
if recurse: |
|
return True |
|
else: |
|
return wrap |
|
|
|
return fsdp_wrap_fn |
|
elif wrap_strategy == FSDPWrapStrategy.by_block_group: |
|
if self.config.block_group_size <= 1: |
|
raise OLMoConfigurationError( |
|
"'by_block_group' FSDP wrapping strategy requires block group size greater than 1" |
|
) |
|
|
|
def fsdp_wrap_fn(module, recurse: bool = True, nonwrapped_numel: int = 0): |
|
del nonwrapped_numel |
|
wrap = isinstance(module, OLMoBlockGroup) |
|
if recurse: |
|
return True |
|
else: |
|
return wrap |
|
|
|
return fsdp_wrap_fn |
|
elif wrap_strategy == FSDPWrapStrategy.by_block_group_and_size: |
|
if self.config.block_group_size <= 1: |
|
raise OLMoConfigurationError( |
|
"'by_block_group_and_size' FSDP wrapping strategy requires block group size greater than 1" |
|
) |
|
|
|
def fsdp_wrap_fn(module, recurse: bool = True, nonwrapped_numel: int = 0): |
|
del nonwrapped_numel |
|
wrap = isinstance(module, (OLMoBlockGroup,)) or module in size_based_module_to_wrap |
|
if recurse: |
|
return True |
|
else: |
|
return wrap |
|
|
|
return fsdp_wrap_fn |
|
elif wrap_strategy == FSDPWrapStrategy.size_based: |
|
from torch.distributed.fsdp.wrap import size_based_auto_wrap_policy |
|
|
|
return size_based_auto_wrap_policy |
|
elif wrap_strategy in { |
|
FSDPWrapStrategy.one_in_two, |
|
FSDPWrapStrategy.one_in_three, |
|
FSDPWrapStrategy.one_in_four, |
|
FSDPWrapStrategy.one_in_five, |
|
}: |
|
c = { |
|
FSDPWrapStrategy.one_in_two: 2, |
|
FSDPWrapStrategy.one_in_three: 3, |
|
FSDPWrapStrategy.one_in_four: 4, |
|
FSDPWrapStrategy.one_in_five: 5, |
|
}[wrap_strategy] |
|
|
|
def fsdp_wrap_fn(module, recurse: bool = True, nonwrapped_numel: int = 0): |
|
del nonwrapped_numel |
|
wrap = isinstance(module, OLMoBlock) and module.layer_id % c == 0 |
|
if recurse: |
|
return True |
|
else: |
|
return wrap |
|
|
|
return fsdp_wrap_fn |
|
else: |
|
raise NotImplementedError(wrap_strategy) |
|
|
|
def num_params(self, include_embedding: bool = True) -> int: |
|
""" |
|
Get the total number of parameters. |
|
""" |
|
params = (np for np in self.named_parameters()) |
|
if not include_embedding: |
|
params = filter( |
|
lambda np: ".wte." not in np[0] and ".wpe." not in np[0], |
|
params, |
|
) |
|
return sum(p.numel() for _, p in params) |
|
|
|
@property |
|
def num_fwd_flops(self): |
|
if self.__num_fwd_flops: |
|
return self.__num_fwd_flops |
|
|
|
|
|
n_params = self.num_params(include_embedding=False) |
|
|
|
|
|
|
|
params_flops_per_token = 2 * n_params |
|
|
|
attn_flops_per_token = ( |
|
self.config.n_layers * 2 * 2 * (self.config.d_model * self.config.max_sequence_length) |
|
) |
|
self.__num_fwd_flops = params_flops_per_token + attn_flops_per_token |
|
return self.__num_fwd_flops |
|
|
|
@property |
|
def num_bck_flops(self): |
|
if self.__num_bck_flops: |
|
return self.__num_bck_flops |
|
|
|
n_params = self.num_params() |
|
params_flops_per_token = 4 * n_params |
|
attn_flops_per_token = self.config.n_layers * 8 * (self.config.d_model * self.config.max_sequence_length) |
|
self.__num_bck_flops = params_flops_per_token + attn_flops_per_token |
|
return self.__num_bck_flops |
|
|
|
def generate( |
|
self, |
|
input_ids: torch.LongTensor, |
|
attention_mask: Optional[torch.Tensor] = None, |
|
attention_bias: Optional[torch.Tensor] = None, |
|
max_steps: int = 10, |
|
beam_size: int = 1, |
|
per_node_beam_size: Optional[int] = None, |
|
sampler: Optional[Sampler] = None, |
|
min_steps: Optional[int] = None, |
|
final_sequence_scorer: Optional[FinalSequenceScorer] = None, |
|
constraints: Optional[List[Constraint]] = None, |
|
) -> OLMoGenerateOutput: |
|
""" |
|
Generate token IDs using beam search. |
|
|
|
Note that by default ``beam_size`` is set to 1, which is greedy decoding. |
|
|
|
:param input_ids: A tensor of shape `(batch_size, seq_len)`. |
|
:param attention_mask: A optional tensor of shape `(batch_size, seq_len)`, the same |
|
as for the forward method. |
|
:param attention_bias: A tensor of shape |
|
`(batch_size, 1, seq_len + tokens_to_generate, seq_len + tokens_to_generate)`, |
|
the same as for the forward method except only one shape is excepted here. |
|
|
|
For an explanation of the other arguments, see :class:`BeamSearch`. |
|
""" |
|
beam_search = BeamSearch( |
|
self.config.eos_token_id, |
|
max_steps=max_steps, |
|
beam_size=beam_size, |
|
per_node_beam_size=per_node_beam_size, |
|
sampler=sampler, |
|
min_steps=min_steps, |
|
final_sequence_scorer=final_sequence_scorer, |
|
constraints=constraints, |
|
) |
|
|
|
|
|
batch_size, seq_len = input_ids.shape |
|
if attention_mask is not None: |
|
assert attention_mask.shape == (batch_size, seq_len) |
|
if attention_bias is not None: |
|
assert len(attention_bias.shape) == 4 |
|
assert attention_bias.shape[:2] == (batch_size, 1) |
|
assert ( |
|
seq_len + beam_search.max_steps |
|
<= attention_bias.shape[2] |
|
== attention_bias.shape[3] |
|
<= self.config.max_sequence_length |
|
) |
|
|
|
tokens_generated = 0 |
|
|
|
def flatten_past_key_values( |
|
past_key_values: List[Tuple[torch.Tensor, torch.Tensor]], |
|
) -> Dict[str, torch.Tensor]: |
|
out = {} |
|
for i, (key, value) in enumerate(past_key_values): |
|
out[f"past_key_{i}"] = key |
|
out[f"past_value_{i}"] = value |
|
return out |
|
|
|
def unflatten_past_key_values( |
|
past_key_values: Dict[str, torch.Tensor], |
|
) -> List[Tuple[torch.Tensor, torch.Tensor]]: |
|
out = [] |
|
for i in range(self.config.n_layers): |
|
past_key = past_key_values[f"past_key_{i}"] |
|
past_value = past_key_values[f"past_value_{i}"] |
|
out.append((past_key, past_value)) |
|
return out |
|
|
|
def step( |
|
last_predictions: torch.Tensor, state: dict[str, torch.Tensor] |
|
) -> tuple[torch.Tensor, dict[str, torch.Tensor]]: |
|
nonlocal tokens_generated |
|
|
|
attention_mask = state.get("attention_mask") |
|
attention_bias = state.get("attention_bias") |
|
|
|
if tokens_generated > 0: |
|
past_key_values = unflatten_past_key_values(state) |
|
input_ids = last_predictions.unsqueeze(1) |
|
if attention_mask is not None: |
|
group_size = input_ids.shape[0] |
|
attention_mask = torch.cat((attention_mask, attention_mask.new_ones((group_size, 1))), dim=-1) |
|
else: |
|
past_key_values = None |
|
input_ids = state["input_ids"] |
|
|
|
tokens_generated += 1 |
|
|
|
|
|
output = self( |
|
input_ids, |
|
attention_mask=attention_mask, |
|
attention_bias=attention_bias, |
|
past_key_values=past_key_values, |
|
use_cache=True, |
|
last_logits_only=True, |
|
) |
|
log_probs = F.log_softmax(output.logits[:, -1, :], dim=-1) |
|
|
|
|
|
state = flatten_past_key_values(output.attn_key_values) |
|
if attention_mask is not None: |
|
state["attention_mask"] = attention_mask |
|
if attention_bias is not None: |
|
state["attention_bias"] = attention_bias |
|
|
|
return log_probs, state |
|
|
|
initial_preds = input_ids.new_zeros((batch_size,)) |
|
state: dict[str, torch.Tensor] = {"input_ids": input_ids} |
|
if attention_mask is not None: |
|
state["attention_mask"] = attention_mask |
|
if attention_bias is not None: |
|
state["attention_bias"] = attention_bias |
|
with torch.no_grad(): |
|
token_ids, scores = beam_search.search(initial_preds, state, step) |
|
|
|
return OLMoGenerateOutput( |
|
token_ids=token_ids, |
|
scores=scores, |
|
) |
|
|
|
@classmethod |
|
def from_checkpoint( |
|
cls, checkpoint_dir: PathOrStr, device: str = "cpu", checkpoint_type: Optional[CheckpointType] = None |
|
) -> OLMo: |
|
""" |
|
Load an OLMo model from a checkpoint. |
|
""" |
|
from .util import resource_path |
|
|
|
|
|
if checkpoint_type is None: |
|
try: |
|
if resource_path(checkpoint_dir, "model.pt").is_file(): |
|
checkpoint_type = CheckpointType.unsharded |
|
else: |
|
checkpoint_type = CheckpointType.sharded |
|
except FileNotFoundError: |
|
checkpoint_type = CheckpointType.sharded |
|
|
|
|
|
config_path = resource_path(checkpoint_dir, "config.yaml") |
|
model_config = ModelConfig.load(config_path, key="model", validate_paths=False) |
|
|
|
if checkpoint_type == CheckpointType.unsharded: |
|
|
|
model_config.init_device = "cpu" |
|
model = OLMo(model_config) |
|
|
|
|
|
state_dict_path = resource_path(checkpoint_dir, "model.pt") |
|
state_dict = torch.load(state_dict_path, map_location="cpu") |
|
model.load_state_dict(model._make_state_dict_compatible(state_dict)[0]) |
|
model = model.to(torch.device(device)) |
|
else: |
|
train_config = TrainConfig.load(config_path) |
|
if train_config.sharded_checkpointer == ShardedCheckpointerType.olmo_core: |
|
from olmo_core.distributed.checkpoint import ( |
|
load_model_and_optim_state, |
|
) |
|
|
|
model_config.init_device = device |
|
model = OLMo(model_config) |
|
load_model_and_optim_state(checkpoint_dir, model) |
|
else: |
|
|
|
from .checkpoint import load_model_state |
|
|
|
|
|
|
|
model_config.init_device = device |
|
model = OLMo(model_config) |
|
|
|
|
|
load_model_state(checkpoint_dir, model) |
|
|
|
return model.eval() |
|
|
|
def _make_state_dict_compatible( |
|
self, state_dict: Dict[str, torch.Tensor] |
|
) -> Tuple[Dict[str, torch.Tensor], Dict[str, Set[str]]]: |
|
""" |
|
Handles some cases where the state dict is valid yet may need to be transformed in order to |
|
be loaded. |
|
|
|
This modifies the state dict in-place and also returns it, along with a mapping of original key |
|
names to new key names in cases where the keys were simply renamed. That mapping can be used |
|
to make a corresponding optimizer state dict compatible as well. |
|
""" |
|
import re |
|
from fnmatch import fnmatch |
|
|
|
new_keys_to_og_keys: Dict[str, str] = {} |
|
|
|
|
|
|
|
|
|
for key in list(state_dict.keys()): |
|
state_dict[(new_key := key.replace("_fsdp_wrapped_module.", ""))] = state_dict.pop(key) |
|
new_keys_to_og_keys[new_key] = key |
|
|
|
|
|
if self.config.block_type == BlockType.sequential: |
|
for key in list(state_dict.keys()): |
|
if fnmatch(key, "transformer.*.norm.weight"): |
|
tensor = state_dict.pop(key) |
|
state_dict[(new_key := key.replace("norm.weight", "attn_norm.weight"))] = tensor |
|
new_keys_to_og_keys[new_key] = new_keys_to_og_keys[key] |
|
state_dict[(new_key := key.replace("norm.weight", "ff_norm.weight"))] = tensor.clone() |
|
new_keys_to_og_keys[new_key] = new_keys_to_og_keys[key] |
|
del new_keys_to_og_keys[key] |
|
elif fnmatch(key, "transformer.*.norm.bias"): |
|
tensor = state_dict.pop(key) |
|
state_dict[(new_key := key.replace("norm.bias", "attn_norm.bias"))] = tensor |
|
new_keys_to_og_keys[new_key] = new_keys_to_og_keys[key] |
|
state_dict[(new_key := key.replace("norm.bias", "ff_norm.bias"))] = tensor.clone() |
|
new_keys_to_og_keys[new_key] = new_keys_to_og_keys[key] |
|
del new_keys_to_og_keys[key] |
|
|
|
|
|
if "transformer.block_groups.0.0.attn_out.weight" in state_dict.keys(): |
|
state_dict_block_group_size = len( |
|
[k for k in state_dict.keys() if fnmatch(k, "transformer.block_groups.0.*.attn_out.weight")] |
|
) |
|
else: |
|
state_dict_block_group_size = 1 |
|
if self.config.block_group_size != state_dict_block_group_size: |
|
log.info( |
|
f"Regrouping state dict blocks from group size {state_dict_block_group_size} to " |
|
f"group size {self.config.block_group_size}" |
|
) |
|
|
|
|
|
if state_dict_block_group_size > 1: |
|
for key in list(state_dict.keys()): |
|
if (m := re.match(r"transformer.block_groups\.(\d+)\.(\d+)\..*", key)) is not None: |
|
group_idx, group_block_idx = int(m.group(1)), int(m.group(2)) |
|
block_idx = (group_idx * state_dict_block_group_size) + group_block_idx |
|
state_dict[ |
|
( |
|
new_key := key.replace( |
|
f"block_groups.{group_idx}.{group_block_idx}.", f"blocks.{block_idx}." |
|
) |
|
) |
|
] = state_dict.pop(key) |
|
new_keys_to_og_keys[new_key] = new_keys_to_og_keys.pop(key) |
|
|
|
if self.config.block_group_size > 1: |
|
|
|
for key in list(state_dict.keys()): |
|
if (m := re.match(r"transformer.blocks\.(\d+)\..*", key)) is not None: |
|
block_idx = int(m.group(1)) |
|
group_idx, group_block_idx = ( |
|
block_idx // self.config.block_group_size, |
|
block_idx % self.config.block_group_size, |
|
) |
|
state_dict[ |
|
( |
|
new_key := key.replace( |
|
f"blocks.{block_idx}.", f"block_groups.{group_idx}.{group_block_idx}." |
|
) |
|
) |
|
] = state_dict.pop(key) |
|
new_keys_to_og_keys[new_key] = new_keys_to_og_keys.pop(key) |
|
|
|
og_keys_to_new: Dict[str, Set[str]] = defaultdict(set) |
|
for new_key, og_key in new_keys_to_og_keys.items(): |
|
og_keys_to_new[og_key].add(new_key) |
|
|
|
return state_dict, og_keys_to_new |
|
|