Spaces:
Build error
Build error
| # coding=utf-8 | |
| # Copyright 2022 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved. | |
| # | |
| # Licensed under the Apache License, Version 2.0 (the "License"); | |
| # you may not use this file except in compliance with the License. | |
| # You may obtain a copy of the License at | |
| # | |
| # http://www.apache.org/licenses/LICENSE-2.0 | |
| # | |
| # Unless required by applicable law or agreed to in writing, software | |
| # distributed under the License is distributed on an "AS IS" BASIS, | |
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
| # See the License for the specific language governing permissions and | |
| # limitations under the License. | |
| """ PyTorch OPT model.""" | |
| import random | |
| from typing import List, Optional, Tuple, Union | |
| import torch | |
| import torch.utils.checkpoint | |
| from torch import nn | |
| from torch.nn import CrossEntropyLoss | |
| from transformers.activations import ACT2FN | |
| from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast | |
| from transformers.utils import ( | |
| ContextManagers, | |
| add_code_sample_docstrings, | |
| add_start_docstrings, | |
| add_start_docstrings_to_model_forward, | |
| replace_return_docstrings, | |
| ) | |
| from m4.models import DecoupledEmbedding, DecoupledLinear | |
| from m4.models.common import ( | |
| expand_inputs_for_generation, | |
| prepare_inputs_for_generation, | |
| update_model_kwargs_for_generation, | |
| ) | |
| from m4.models.custom_modules import VLOOMPreTrainedModelBase | |
| from m4.models.perceiver.perceiver import PerceiverResampler | |
| from m4.models.vopt.configuration_vopt import VOPTConfig | |
| from m4.training.utils import ( | |
| compute_perceiver_tflops_per_batch_per_gpu, | |
| compute_tflops_per_batch_per_gpu, | |
| deepspeed_gathered_parameters_context_manager, | |
| freeze_model, | |
| ) | |
| from m4.utils import logging | |
| logger = logging.get_logger(__name__) | |
| _CHECKPOINT_FOR_DOC = "facebook/opt-350m" | |
| _CONFIG_FOR_DOC = "VOPTConfig" | |
| _TOKENIZER_FOR_DOC = "GPT2Tokenizer" | |
| # Base model docstring | |
| _EXPECTED_OUTPUT_SHAPE = [1, 8, 1024] | |
| # SequenceClassification docstring | |
| _CHECKPOINT_FOR_SEQUENCE_CLASSIFICATION = "ArthurZ/opt-350m-dummy-sc" | |
| _SEQ_CLASS_EXPECTED_LOSS = 1.71 | |
| _SEQ_CLASS_EXPECTED_OUTPUT = "'LABEL_0'" | |
| OPT_PRETRAINED_MODEL_ARCHIVE_LIST = [ | |
| "facebook/opt-125m", | |
| "facebook/opt-350m", | |
| "facebook/opt-1.3b", | |
| "facebook/opt-2.7b", | |
| "facebook/opt-6.7b", | |
| "facebook/opt-13b", | |
| "facebook/opt-30b", | |
| # See all OPT models at https://huggingface.co/models?filter=opt | |
| ] | |
| class SwiGLUActivation(nn.Module): | |
| def __init__(self, in_features: int, out_features: int): | |
| super().__init__() | |
| self.gate = nn.Linear(in_features, out_features, bias=False) | |
| def forward(self, hidden_states_to_gate, hidden_states): | |
| gate = self.gate(hidden_states) | |
| return nn.functional.silu(gate) * hidden_states_to_gate | |
| # Taken from LLaMA codebase | |
| class RMSNorm(torch.nn.Module): | |
| def __init__(self, dim: int, eps: float = 1e-6): | |
| super().__init__() | |
| self.eps = eps | |
| self.weight = nn.Parameter(torch.ones(dim)) | |
| def _norm(self, x): | |
| return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps) | |
| def forward(self, x): | |
| output = self._norm(x.float()).type_as(x) | |
| return output * self.weight | |
| def _make_causal_mask(input_ids_shape: torch.Size, dtype: torch.dtype, past_key_values_length: int = 0): | |
| """ | |
| Make causal mask used for bi-directional self-attention. | |
| """ | |
| bsz, tgt_len = input_ids_shape | |
| mask = torch.full((tgt_len, tgt_len), torch.tensor(torch.finfo(dtype).min)) | |
| mask_cond = torch.arange(mask.size(-1)) | |
| mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0) | |
| mask = mask.to(dtype) | |
| if past_key_values_length > 0: | |
| mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype), mask], dim=-1) | |
| return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length) | |
| def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): | |
| """ | |
| Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. | |
| """ | |
| bsz, src_len = mask.size() | |
| tgt_len = tgt_len if tgt_len is not None else src_len | |
| expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) | |
| inverted_mask = 1.0 - expanded_mask | |
| return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min) | |
| class OPTLearnedPositionalEmbedding(nn.Embedding): | |
| """ | |
| This module learns positional embeddings up to a fixed maximum size. | |
| """ | |
| def __init__(self, num_embeddings: int, embedding_dim: int): | |
| # OPT is set up so that if padding_idx is specified then offset the embedding ids by 2 | |
| # and adjust num_embeddings appropriately. Other models don't have this hack | |
| self.offset = 2 | |
| super().__init__(num_embeddings + self.offset, embedding_dim) | |
| def forward(self, attention_mask: torch.LongTensor, past_key_values_length: int = 0): | |
| """`input_ids_shape` is expected to be [bsz x seqlen].""" | |
| attention_mask = attention_mask.long() | |
| # create positions depending on attention_mask | |
| positions = (torch.cumsum(attention_mask, dim=1).type_as(attention_mask) * attention_mask).long() - 1 | |
| # cut positions if `past_key_values_length` is > 0 | |
| positions = positions[:, past_key_values_length:] | |
| return super().forward(positions + self.offset) | |
| class OPTAttention(nn.Module): | |
| """Multi-headed attention from 'Attention Is All You Need' paper""" | |
| def __init__( | |
| self, | |
| embed_dim: int, | |
| num_heads: int, | |
| dropout: float = 0.0, | |
| is_decoder: bool = False, | |
| bias: bool = True, | |
| is_cross_attention=False, | |
| config=None, | |
| qk_layer_norms=False, | |
| ): | |
| super().__init__() | |
| self.embed_dim = embed_dim | |
| self.num_heads = num_heads | |
| self.dropout = dropout | |
| self.head_dim = embed_dim // num_heads | |
| if (self.head_dim * num_heads) != self.embed_dim: | |
| raise ValueError( | |
| f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" | |
| f" and `num_heads`: {num_heads})." | |
| ) | |
| self.scaling = self.head_dim**-0.5 | |
| self.is_decoder = is_decoder | |
| self.is_cross_attention = is_cross_attention | |
| if self.is_cross_attention: | |
| kv_input_dim = self.hidden_size if not hasattr(config, "vision_embed_dim") else config.vision_embed_dim | |
| self.k_proj = nn.Linear(kv_input_dim, embed_dim, bias=bias) | |
| self.v_proj = nn.Linear(kv_input_dim, embed_dim, bias=bias) | |
| self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) | |
| else: | |
| self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) | |
| self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) | |
| self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) | |
| self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) | |
| self.qk_layer_norms = qk_layer_norms | |
| if self.qk_layer_norms and config.rms_norm: | |
| self.q_layer_norm = RMSNorm(self.head_dim, eps=1e-6) | |
| self.k_layer_norm = RMSNorm(self.head_dim, eps=1e-6) | |
| elif self.qk_layer_norms: | |
| self.q_layer_norm = nn.LayerNorm(self.head_dim) | |
| self.k_layer_norm = nn.LayerNorm(self.head_dim) | |
| def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): | |
| return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() | |
| def forward( | |
| self, | |
| hidden_states: torch.Tensor, | |
| key_value_states: Optional[torch.Tensor] = None, | |
| past_key_value: Optional[Tuple[torch.Tensor]] = None, | |
| attention_mask: Optional[torch.Tensor] = None, | |
| layer_head_mask: Optional[torch.Tensor] = None, | |
| output_attentions: bool = False, | |
| ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: | |
| """Input shape: Batch x Time x Channel""" | |
| # if key_value_states are provided this layer is used as a cross-attention layer | |
| # for the decoder | |
| is_cross_attention = self.is_cross_attention or key_value_states is not None | |
| bsz, tgt_len, _ = hidden_states.size() | |
| # get query proj | |
| query_states = self._shape(self.q_proj(hidden_states), -1, bsz) | |
| # get key, value proj | |
| if is_cross_attention and past_key_value is not None: | |
| # reuse k,v, cross_attentions | |
| key_states = past_key_value[0] | |
| value_states = past_key_value[1] | |
| elif is_cross_attention: | |
| # cross_attentions | |
| key_states = self._shape(self.k_proj(key_value_states), -1, bsz) | |
| value_states = self._shape(self.v_proj(key_value_states), -1, bsz) | |
| elif past_key_value is not None: | |
| # reuse k, v, self_attention | |
| key_states = self._shape(self.k_proj(hidden_states), -1, bsz) | |
| value_states = self._shape(self.v_proj(hidden_states), -1, bsz) | |
| key_states = torch.cat([past_key_value[0], key_states], dim=2) | |
| value_states = torch.cat([past_key_value[1], value_states], dim=2) | |
| else: | |
| # self_attention | |
| key_states = self._shape(self.k_proj(hidden_states), -1, bsz) | |
| value_states = self._shape(self.v_proj(hidden_states), -1, bsz) | |
| if self.is_decoder: | |
| # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. | |
| # Further calls to cross_attention layer can then reuse all cross-attention | |
| # key/value_states (first "if" case) | |
| # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of | |
| # all previous decoder key/value_states. Further calls to uni-directional self-attention | |
| # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) | |
| # if encoder bi-directional self-attention `past_key_value` is always `None` | |
| past_key_value = (key_states, value_states) | |
| if self.qk_layer_norms: | |
| query_states = self.q_layer_norm(query_states) | |
| key_states = self.k_layer_norm(key_states) | |
| src_len = key_states.size(2) | |
| if attention_mask is not None: | |
| if attention_mask.size() != (bsz, 1, tgt_len, src_len): | |
| raise ValueError( | |
| f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" | |
| ) | |
| if layer_head_mask is not None: | |
| if layer_head_mask.size() != (self.num_heads,): | |
| raise ValueError( | |
| f"Head mask for a single layer should be of size {(self.num_heads,)}, but is" | |
| f" {layer_head_mask.size()}" | |
| ) | |
| attention_mask = attention_mask.expand(-1, self.num_heads, -1, -1) | |
| attention_mask = attention_mask + layer_head_mask.view(1, -1, 1, 1) | |
| attn_output = nn.functional.scaled_dot_product_attention( | |
| query_states, | |
| key_states, | |
| value_states, | |
| attn_mask=attention_mask, | |
| dropout_p=self.dropout, | |
| ) | |
| attn_weights_reshaped = None | |
| logger.warning_once( | |
| "attn_weights are not extracted in scaled_dot_product_attention. The model returns None instead" | |
| ) | |
| attn_output = attn_output.transpose(1, 2) | |
| # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be | |
| # partitioned aross GPUs when using tensor-parallelism. | |
| attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim) | |
| attn_output = self.out_proj(attn_output) | |
| return attn_output, attn_weights_reshaped, past_key_value | |
| class OPTDecoderLayer(nn.Module): | |
| def __init__(self, config: VOPTConfig): | |
| super().__init__() | |
| self.embed_dim = config.hidden_size | |
| self.self_attn = OPTAttention( | |
| embed_dim=self.embed_dim, | |
| num_heads=config.num_attention_heads, | |
| dropout=config.attention_dropout, | |
| is_decoder=True, | |
| config=config, | |
| ) | |
| self.do_layer_norm_before = config.do_layer_norm_before | |
| self.dropout = config.dropout | |
| self.activation_fn = ACT2FN[config.activation_function] | |
| self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) | |
| self.fc1 = nn.Linear(self.embed_dim, config.ffn_dim) | |
| self.fc2 = nn.Linear(config.ffn_dim, self.embed_dim) | |
| self.final_layer_norm = nn.LayerNorm(self.embed_dim) | |
| def forward( | |
| self, | |
| hidden_states: torch.Tensor, | |
| attention_mask: Optional[torch.Tensor] = None, | |
| layer_head_mask: Optional[torch.Tensor] = None, | |
| output_attentions: Optional[bool] = False, | |
| use_cache: Optional[bool] = False, | |
| past_key_value: Optional[Tuple[torch.Tensor]] = None, | |
| ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: | |
| """ | |
| Args: | |
| hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` | |
| attention_mask (`torch.FloatTensor`, *optional*): attention mask of size | |
| `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. | |
| layer_head_mask (`torch.FloatTensor`, *optional*): mask for attention heads in a given layer of size | |
| `(encoder_attention_heads,)`. | |
| output_attentions (`bool`, *optional*): | |
| Whether or not to return the attentions tensors of all attention layers. See `attentions` under | |
| returned tensors for more detail. | |
| use_cache (`bool`, *optional*): | |
| If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding | |
| (see `past_key_values`). | |
| past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states | |
| """ | |
| residual = hidden_states | |
| # 125m, 1.7B, ..., 175B applies layer norm BEFORE attention | |
| if self.do_layer_norm_before: | |
| hidden_states = self.self_attn_layer_norm(hidden_states) | |
| # Self Attention | |
| hidden_states, self_attn_weights, present_key_value = self.self_attn( | |
| hidden_states=hidden_states, | |
| past_key_value=past_key_value, | |
| attention_mask=attention_mask, | |
| layer_head_mask=layer_head_mask, | |
| output_attentions=output_attentions, | |
| ) | |
| hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) | |
| hidden_states = residual + hidden_states | |
| # 350m applies layer norm AFTER attention | |
| if not self.do_layer_norm_before: | |
| hidden_states = self.self_attn_layer_norm(hidden_states) | |
| # Fully Connected | |
| hidden_states_shape = hidden_states.shape | |
| hidden_states = hidden_states.reshape(-1, hidden_states.size(-1)) | |
| residual = hidden_states | |
| # 125m, 1.7B, ..., 175B applies layer norm BEFORE attention | |
| if self.do_layer_norm_before: | |
| hidden_states = self.final_layer_norm(hidden_states) | |
| hidden_states = self.fc1(hidden_states) | |
| hidden_states = self.activation_fn(hidden_states) | |
| hidden_states = self.fc2(hidden_states) | |
| hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) | |
| hidden_states = (residual + hidden_states).view(hidden_states_shape) | |
| # 350m applies layer norm AFTER attention | |
| if not self.do_layer_norm_before: | |
| hidden_states = self.final_layer_norm(hidden_states) | |
| outputs = (hidden_states,) | |
| if output_attentions: | |
| outputs += (self_attn_weights,) | |
| if use_cache: | |
| outputs += (present_key_value,) | |
| return outputs | |
| class VOPTGatedAttentionLayer(nn.Module): | |
| def __init__(self, config: VOPTConfig): | |
| """ | |
| Note: Based on `tr_101_cm401xPMD09_nobias`, setting the biases to False in all of the nn.Linear for the gated cross attention. | |
| Provide a small stability gain at opt-13b scale. | |
| """ | |
| super().__init__() | |
| self.embed_dim = config.hidden_size | |
| self.cross_attn = OPTAttention( | |
| embed_dim=self.embed_dim, | |
| num_heads=config.num_attention_heads, | |
| dropout=config.attention_dropout, | |
| is_decoder=True, | |
| config=config, | |
| is_cross_attention=True, | |
| bias=False, | |
| qk_layer_norms=config.qk_layer_norms, | |
| ) | |
| self.do_layer_norm_before = config.do_layer_norm_before | |
| self.normformer_layer_norms = config.normformer_layer_norms | |
| self.dropout = config.dropout | |
| if config.cross_layer_activation_function == "swiglu": | |
| # We cannot put `SwiGLUActivation` in `ACT2FN` because it takes two arguments (`in_features` and | |
| # `out_features`) that we don't know until entering this module. | |
| self.activation_fn = SwiGLUActivation(self.embed_dim, config.ffn_dim) | |
| else: | |
| self.activation_fn = ACT2FN[config.cross_layer_activation_function] | |
| if config.rms_norm: | |
| self.self_attn_layer_norm = RMSNorm(self.embed_dim, eps=1e-6) | |
| else: | |
| self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) | |
| if self.normformer_layer_norms: | |
| self.self_attn_post_layer_norm = nn.LayerNorm(self.embed_dim) | |
| self.fc1 = nn.Linear(self.embed_dim, config.ffn_dim, bias=False) | |
| self.fc2 = nn.Linear(config.ffn_dim, self.embed_dim, bias=False) | |
| if config.rms_norm: | |
| self.final_layer_norm = RMSNorm(self.embed_dim, eps=1e-6) | |
| else: | |
| self.final_layer_norm = nn.LayerNorm(self.embed_dim) | |
| if self.normformer_layer_norms: | |
| self.mlp_post_layer_norm = nn.LayerNorm(config.ffn_dim) | |
| self.act_cross_attn = nn.Tanh() | |
| self.act_dense = nn.Tanh() | |
| if config.alpha_initializer == "zeros": | |
| if config.alpha_type == "vector": | |
| self.alpha_cross_attn = nn.Parameter(torch.zeros(1, 1, self.embed_dim)) | |
| self.alpha_dense = nn.Parameter(torch.zeros(1, 1, self.embed_dim)) | |
| elif config.alpha_type == "float": | |
| self.alpha_cross_attn = nn.Parameter(torch.zeros(1)) | |
| self.alpha_dense = nn.Parameter(torch.zeros(1)) | |
| else: | |
| raise ValueError(f"Unknown value for `alpha_type` ({config.alpha_type})") | |
| elif config.alpha_initializer == "ones": | |
| if config.alpha_type == "vector": | |
| self.alpha_cross_attn = nn.Parameter(torch.ones(1, 1, self.embed_dim)) | |
| self.alpha_dense = nn.Parameter(torch.ones(1, 1, self.embed_dim)) | |
| elif config.alpha_type == "float": | |
| self.alpha_cross_attn = nn.Parameter(torch.ones(1)) | |
| self.alpha_dense = nn.Parameter(torch.ones(1)) | |
| else: | |
| raise ValueError(f"Unknown value for `alpha_type` ({config.alpha_type})") | |
| elif config.alpha_initializer in {"normal", "gaussian", "random"}: | |
| if config.alpha_type == "vector": | |
| self.alpha_cross_attn = nn.Parameter( | |
| torch.normal(mean=0.0, std=config.alphas_initializer_range, size=(1, 1, self.embed_dim)) | |
| ) | |
| self.alpha_dense = nn.Parameter( | |
| torch.normal(mean=0.0, std=config.alphas_initializer_range, size=(1, 1, self.embed_dim)) | |
| ) | |
| elif config.alpha_type == "float": | |
| self.alpha_cross_attn = nn.Parameter( | |
| torch.normal(mean=0.0, std=config.alphas_initializer_range, size=(1)) | |
| ) | |
| self.alpha_dense = nn.Parameter(torch.normal(mean=0.0, std=config.alphas_initializer_range, size=(1))) | |
| else: | |
| raise ValueError(f"Unknown value for `alpha_type` ({config.alpha_type})") | |
| else: | |
| raise NotImplementedError(f"Alpha initialization scheme {config.alpha_initializer} not yet implemented!") | |
| assert hasattr(self, "alpha_cross_attn") and hasattr(self, "alpha_dense") | |
| def forward( | |
| self, | |
| hidden_states: torch.Tensor, | |
| attention_mask: Optional[torch.Tensor] = None, | |
| layer_head_mask: Optional[torch.Tensor] = None, | |
| image_hidden_states: Optional[torch.Tensor] = None, | |
| image_attention_mask: Optional[torch.FloatTensor] = None, | |
| output_attentions: Optional[bool] = False, | |
| use_cache: Optional[bool] = False, | |
| past_key_value: Optional[Tuple[torch.Tensor]] = None, | |
| ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: | |
| """ | |
| Args: | |
| hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` | |
| attention_mask (`torch.FloatTensor`, *optional*): attention mask of size | |
| `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. | |
| layer_head_mask (`torch.FloatTensor`, *optional*): mask for attention heads in a given layer of size | |
| `(encoder_attention_heads,)`. | |
| output_attentions (`bool`, *optional*): | |
| Whether or not to return the attentions tensors of all attention layers. See `attentions` under | |
| returned tensors for more detail. | |
| use_cache (`bool`, *optional*): | |
| If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding | |
| (see `past_key_values`). | |
| past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states | |
| """ | |
| if image_hidden_states is None: | |
| raise ValueError( | |
| "`image_hidden_states` is required for VOPT cross attention module which are visual features to be" | |
| " conditioned on." | |
| ) | |
| if past_key_value is not None: | |
| raise NotImplementedError("Past key value states are not implemented for VOPT cross attention module.") | |
| residual = hidden_states | |
| # 125m, 1.7B, ..., 175B applies layer norm BEFORE attention | |
| if self.do_layer_norm_before: | |
| hidden_states = self.self_attn_layer_norm(hidden_states) | |
| # Self Attention | |
| hidden_states, self_attn_weights, present_key_value = self.cross_attn( | |
| hidden_states=hidden_states, | |
| key_value_states=image_hidden_states, | |
| attention_mask=image_attention_mask, | |
| layer_head_mask=layer_head_mask, | |
| output_attentions=output_attentions, | |
| ) | |
| hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) | |
| if self.normformer_layer_norms: | |
| hidden_states = self.self_attn_post_layer_norm(hidden_states) | |
| hidden_states = residual + self.act_cross_attn(self.alpha_cross_attn) * hidden_states | |
| # 350m applies layer norm AFTER attention | |
| if not self.do_layer_norm_before: | |
| hidden_states = self.self_attn_layer_norm(hidden_states) | |
| # Fully Connected | |
| hidden_states_shape = hidden_states.shape | |
| hidden_states = hidden_states.reshape(-1, hidden_states.size(-1)) | |
| residual = hidden_states | |
| # 125m, 1.7B, ..., 175B applies layer norm BEFORE attention | |
| if self.do_layer_norm_before: | |
| hidden_states = self.final_layer_norm(hidden_states) | |
| hidden_states_to_gate = self.fc1(hidden_states) | |
| if isinstance(self.activation_fn, SwiGLUActivation): | |
| hidden_states = self.activation_fn(hidden_states_to_gate, hidden_states) | |
| else: | |
| hidden_states = self.activation_fn(hidden_states_to_gate) | |
| if self.normformer_layer_norms: | |
| hidden_states = self.mlp_post_layer_norm(hidden_states) | |
| hidden_states = self.fc2(hidden_states) | |
| hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) | |
| hidden_states = (residual + self.act_dense(self.alpha_dense) * hidden_states).view(hidden_states_shape) | |
| # 350m applies layer norm AFTER attention | |
| if not self.do_layer_norm_before: | |
| hidden_states = self.final_layer_norm(hidden_states) | |
| outputs = (hidden_states,) | |
| if output_attentions: | |
| outputs += (self_attn_weights,) | |
| if use_cache: | |
| outputs += (present_key_value,) | |
| return outputs | |
| OPT_START_DOCSTRING = r""" | |
| This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the | |
| library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads | |
| etc.) | |
| This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. | |
| Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage | |
| and behavior. | |
| Parameters: | |
| config ([`VOPTConfig`]): | |
| Model configuration class with all the parameters of the model. Initializing with a config file does not | |
| load the weights associated with the model, only the configuration. Check out the | |
| [`~PreTrainedModel.from_pretrained`] method to load the model weights. | |
| """ | |
| class VOPTPreTrainedModel(VLOOMPreTrainedModelBase): | |
| config_class = VOPTConfig | |
| base_model_prefix = "model" | |
| supports_gradient_checkpointing = True | |
| _no_split_modules = ["OPTDecoderLayer", "VOPTGatedAttentionLayer", "CLIPEncoderLayer"] | |
| _keys_to_ignore_on_load_unexpected = [r"decoder\.version"] | |
| def _init_weights(self, module): | |
| def init_a_linear(module, mean=0.0, std=self.config.init_std): | |
| with ContextManagers(deepspeed_gathered_parameters_context_manager(module.weight, modify=True)): | |
| module.weight.data.normal_(mean=mean, std=std) | |
| if module.bias is not None: | |
| with ContextManagers(deepspeed_gathered_parameters_context_manager(module.bias, modify=True)): | |
| module.bias.data.zero_() | |
| if isinstance(module, VOPTGatedAttentionLayer): | |
| for sub_module_name, sub_module in module.named_modules(): | |
| if isinstance(sub_module, nn.Linear): | |
| if "fc2" in sub_module_name: | |
| factor = 2 * self.config.num_hidden_layers | |
| else: | |
| factor = 1.0 | |
| init_a_linear(sub_module, std=(0.4 / (sub_module.in_features * factor)) ** 0.5) | |
| elif isinstance(module, PerceiverResampler): | |
| with ContextManagers(deepspeed_gathered_parameters_context_manager(module.latents, modify=True)): | |
| module.latents.data.normal_(mean=0.0, std=(1.0 / self.config.vision_embed_dim) ** 0.5) | |
| for sub_module_name, sub_module in module.named_modules(): | |
| if isinstance(sub_module, nn.Linear): | |
| if "c_proj" in sub_module_name: | |
| factor = 2 * self.config.num_hidden_layers | |
| else: | |
| factor = 1.0 | |
| init_a_linear(sub_module, std=(0.4 / (self.config.vision_embed_dim * factor)) ** 0.5) | |
| elif isinstance(module, nn.Embedding): | |
| with ContextManagers(deepspeed_gathered_parameters_context_manager(module.weight, modify=True)): | |
| module.weight.data.normal_(mean=0.0, std=(1.0 / self.config.hidden_size) ** 0.5) | |
| if module.padding_idx is not None: | |
| module.weight.data[module.padding_idx].zero_() | |
| elif isinstance(module, DecoupledLinear): | |
| if hasattr(module, "additional_fc"): | |
| init_a_linear(module.additional_fc, std=(1.0 / (module.additional_fc.in_features)) ** 0.5) | |
| def _set_gradient_checkpointing(self, module, value=False): | |
| if isinstance(module, (VOPTDecoder)): | |
| module.gradient_checkpointing = value | |
| def override_vision_model_wrapper(cls, model, config, vision_model_name, vision_model_params, torch_dtype): | |
| # this can be called via from_pretrained from a class w/ head or w/o head so we extract the beheaded model version | |
| beheaded_model = model.model if hasattr(model, "model") else model | |
| cls.override_vision_model(beheaded_model.decoder, vision_model_name, vision_model_params, torch_dtype) | |
| beheaded_model.freeze_relevant_params(config) | |
| OPT_INPUTS_DOCSTRING = r""" | |
| Args: | |
| input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): | |
| Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide | |
| it. | |
| Indices can be obtained using [`GPT2Tokenizer`]. See [`PreTrainedTokenizer.encode`] and | |
| [`PreTrainedTokenizer.__call__`] for details. | |
| [What are input IDs?](../glossary#input-ids) | |
| attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): | |
| Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: | |
| - 1 for tokens that are **not masked**, | |
| - 0 for tokens that are **masked**. | |
| [What are attention masks?](../glossary#attention-mask) | |
| Indices can be obtained using [`OPTTokenizer`]. See [`PreTrainedTokenizer.encode`] and | |
| [`PreTrainedTokenizer.__call__`] for details. | |
| If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see | |
| `past_key_values`). | |
| If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`] | |
| and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more | |
| information on the default strategy. | |
| head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): | |
| Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`: | |
| - 1 indicates the head is **not masked**, | |
| - 0 indicates the head is **masked**. | |
| past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): | |
| Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape | |
| `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape | |
| `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. | |
| Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention | |
| blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. | |
| If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that | |
| don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all | |
| `decoder_input_ids` of shape `(batch_size, sequence_length)`. | |
| inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): | |
| Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This | |
| is useful if you want more control over how to convert `input_ids` indices into associated vectors than the | |
| model's internal embedding lookup matrix. | |
| use_cache (`bool`, *optional*): | |
| If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see | |
| `past_key_values`). | |
| output_attentions (`bool`, *optional*): | |
| Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned | |
| tensors for more detail. | |
| output_hidden_states (`bool`, *optional*): | |
| Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for | |
| more detail. | |
| return_dict (`bool`, *optional*): | |
| Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. | |
| """ | |
| class VOPTDecoder(VOPTPreTrainedModel): | |
| """ | |
| Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`OPTDecoderLayer`] | |
| Args: | |
| config: VOPTConfig | |
| """ | |
| def __init__(self, config: VOPTConfig, vision_model=None): | |
| super().__init__(config) | |
| self.config = config | |
| self.dropout = config.dropout | |
| self.layerdrop = config.layerdrop | |
| self.padding_idx = config.pad_token_id | |
| self.max_target_positions = config.max_position_embeddings | |
| self.vocab_size = config.vocab_size | |
| self.embed_tokens = DecoupledEmbedding( | |
| num_embeddings=config.vocab_size, | |
| num_additional_embeddings=config.additional_vocab_size, | |
| embedding_dim=config.word_embed_proj_dim, | |
| partially_freeze=config.freeze_text_layers, | |
| padding_idx=self.padding_idx, | |
| ) | |
| self.embed_positions = OPTLearnedPositionalEmbedding(config.max_position_embeddings, config.hidden_size) | |
| # Load an uninitialized model and later in from_pretrained will load the pre-trained model - | |
| # this solves the losing of weights in `from_pretrained` on the main model | |
| self.vision_model = vision_model | |
| # Perceiver Resampler | |
| if config.use_resampler: | |
| self.perceiver_resampler = PerceiverResampler( | |
| self.config, | |
| self.config.vision_embed_dim, | |
| config.resampler_depth, | |
| config.resampler_n_heads, | |
| config.resampler_head_dim, | |
| config.resampler_n_latents, | |
| ) | |
| if config.word_embed_proj_dim != config.hidden_size: | |
| self.project_in = nn.Linear(config.word_embed_proj_dim, config.hidden_size, bias=False) | |
| else: | |
| self.project_in = None | |
| self.layers = nn.ModuleList([OPTDecoderLayer(config) for _ in range(config.num_hidden_layers)]) | |
| self.cross_layer_interval = config.cross_layer_interval | |
| num_cross_layers = config.num_hidden_layers // self.cross_layer_interval | |
| self.gated_cross_attn_layers = nn.ModuleList( | |
| [VOPTGatedAttentionLayer(config) for i in range(num_cross_layers)] | |
| ) | |
| self.gradient_checkpointing = False | |
| # Note that the only purpose of `config._remove_final_layer_norm` is to keep backward compatibility | |
| # with checkpoints that have been fine-tuned before transformers v4.20.1 | |
| # see https://github.com/facebookresearch/metaseq/pull/164 | |
| if config.do_layer_norm_before and not config._remove_final_layer_norm: | |
| self.final_layer_norm = nn.LayerNorm(config.hidden_size) | |
| else: | |
| self.final_layer_norm = None | |
| if config.word_embed_proj_dim != config.hidden_size: | |
| self.project_out = nn.Linear(config.hidden_size, config.word_embed_proj_dim, bias=False) | |
| else: | |
| self.project_out = None | |
| # Initialize weights and apply final processing | |
| self.post_init() | |
| def get_input_embeddings(self): | |
| return self.embed_tokens | |
| def set_input_embeddings(self, value): | |
| self.embed_tokens = value | |
| # Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask | |
| def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length): | |
| # create causal mask | |
| # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] | |
| combined_attention_mask = None | |
| if input_shape[-1] > 1: | |
| combined_attention_mask = _make_causal_mask( | |
| input_shape, inputs_embeds.dtype, past_key_values_length=past_key_values_length | |
| ).to(inputs_embeds.device) | |
| if attention_mask is not None: | |
| # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] | |
| expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to( | |
| inputs_embeds.device | |
| ) | |
| combined_attention_mask = ( | |
| expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask | |
| ) | |
| return combined_attention_mask | |
| def forward( | |
| self, | |
| input_ids: torch.LongTensor = None, | |
| attention_mask: Optional[torch.Tensor] = None, | |
| head_mask: Optional[torch.Tensor] = None, | |
| past_key_values: Optional[List[torch.FloatTensor]] = None, | |
| inputs_embeds: Optional[torch.FloatTensor] = None, | |
| pixel_values: Optional[torch.FloatTensor] = None, | |
| image_embeddings: Optional[torch.FloatTensor] = None, | |
| image_attention_mask: Optional[torch.Tensor] = None, | |
| crossblock_head_mask: Optional[torch.Tensor] = None, # TOFO (ls): check if this is needed | |
| use_cache: Optional[bool] = None, | |
| output_attentions: Optional[bool] = None, | |
| output_hidden_states: Optional[bool] = None, | |
| return_dict: Optional[bool] = None, | |
| ) -> Union[Tuple, BaseModelOutputWithPast]: | |
| r""" | |
| Args: | |
| input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): | |
| Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you | |
| provide it. | |
| Indices can be obtained using [`OPTTokenizer`]. See [`PreTrainedTokenizer.encode`] and | |
| [`PreTrainedTokenizer.__call__`] for details. | |
| [What are input IDs?](../glossary#input-ids) | |
| attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): | |
| Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: | |
| - 1 for tokens that are **not masked**, | |
| - 0 for tokens that are **masked**. | |
| [What are attention masks?](../glossary#attention-mask) | |
| head_mask (`torch.Tensor` of shape `(num_hidden_layers, num_attention_heads)`, *optional*): | |
| Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: | |
| - 1 indicates the head is **not masked**, | |
| - 0 indicates the head is **masked**. | |
| past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): | |
| Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of | |
| shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of | |
| Contains pre-computed hidden-states (key and values in the self-attention blocks and in the | |
| cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. | |
| If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those | |
| that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of | |
| all `decoder_input_ids` of shape `(batch_size, sequence_length)`. | |
| inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): | |
| Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. | |
| This is useful if you want more control over how to convert `input_ids` indices into associated vectors | |
| than the model's internal embedding lookup matrix. | |
| output_attentions (`bool`, *optional*): | |
| Whether or not to return the attentions tensors of all attention layers. See `attentions` under | |
| returned tensors for more detail. | |
| output_hidden_states (`bool`, *optional*): | |
| Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors | |
| for more detail. | |
| return_dict (`bool`, *optional*): | |
| Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. | |
| """ | |
| device = input_ids.device if input_ids is not None else inputs_embeds.device | |
| output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions | |
| output_hidden_states = ( | |
| output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states | |
| ) | |
| use_cache = use_cache if use_cache is not None else self.config.use_cache | |
| return_dict = return_dict if return_dict is not None else self.config.use_return_dict | |
| # retrieve input_ids and inputs_embeds | |
| if input_ids is not None and inputs_embeds is not None: | |
| raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") | |
| elif input_ids is not None: | |
| input_shape = input_ids.size() | |
| input_ids = input_ids.view(-1, input_shape[-1]) | |
| elif inputs_embeds is not None: | |
| input_shape = inputs_embeds.size()[:-1] | |
| else: | |
| raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") | |
| past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 | |
| if pixel_values is not None and image_embeddings is not None: | |
| raise ValueError("You cannot specify both pixel_values and image_embeddings at the same time") | |
| elif pixel_values is not None: | |
| pixel_values = pixel_values.to(dtype=self.dtype, device=input_ids.device) # fp16 compatibility | |
| batch_size, num_images = pixel_values.size(0), pixel_values.size(1) | |
| pixel_values = pixel_values.contiguous().view(batch_size * num_images, *pixel_values.shape[2:]) | |
| # Get sequence from the vision encoder | |
| image_hidden_states = self.vision_model(pixel_values=pixel_values).last_hidden_state | |
| elif image_embeddings is not None: | |
| batch_size, num_images, image_seq_len, image_hidden_size = image_embeddings.size() | |
| image_hidden_states = image_embeddings.to(dtype=self.dtype, device=input_ids.device) | |
| image_hidden_states = image_hidden_states.view(batch_size * num_images, image_seq_len, image_hidden_size) | |
| if self.config.use_resampler: | |
| image_hidden_states = self.perceiver_resampler(image_hidden_states) | |
| image_seq_len, image_hidden_size = image_hidden_states.size(1), image_hidden_states.size(2) | |
| image_hidden_states = image_hidden_states.view(batch_size, num_images * image_seq_len, image_hidden_size) | |
| # Make image_attention_mask compatible with hidden states | |
| text_seq_len = image_attention_mask.size(1) | |
| image_attention_mask = image_attention_mask.unsqueeze(-1) | |
| image_attention_mask = image_attention_mask.repeat(1, 1, 1, image_seq_len) | |
| image_attention_mask = image_attention_mask.view(batch_size, text_seq_len, num_images * image_seq_len) | |
| if image_hidden_states is not None: | |
| image_batch_size, image_sequence_length, _ = image_hidden_states.size() | |
| image_hidden_shape = (image_batch_size, image_sequence_length) | |
| if image_attention_mask is None: | |
| image_attention_mask = torch.ones(image_hidden_shape, device=device) | |
| image_attention_mask = self.invert_attention_mask(image_attention_mask) | |
| else: | |
| image_attention_mask = None | |
| if inputs_embeds is None: | |
| inputs_embeds = self.embed_tokens(input_ids) | |
| # embed positions | |
| if attention_mask is None: | |
| attention_mask = torch.ones(inputs_embeds.shape[:2], dtype=torch.bool, device=inputs_embeds.device) | |
| pos_embeds = self.embed_positions(attention_mask, past_key_values_length) | |
| attention_mask = self._prepare_decoder_attention_mask( | |
| attention_mask, input_shape, inputs_embeds, past_key_values_length | |
| ) | |
| if self.project_in is not None: | |
| inputs_embeds = self.project_in(inputs_embeds) | |
| hidden_states = inputs_embeds + pos_embeds | |
| # decoder layers | |
| all_hidden_states = () if output_hidden_states else None | |
| all_self_attns = () if output_attentions else None | |
| next_decoder_cache = () if use_cache else None | |
| # check if head_mask has a correct number of layers specified if desired | |
| for attn_mask, mask_name in zip([head_mask], ["head_mask"]): | |
| if attn_mask is not None: | |
| if attn_mask.size()[0] != (len(self.layers)): | |
| raise ValueError( | |
| f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for" | |
| f" {head_mask.size()[0]}." | |
| ) | |
| for idx, decoder_layer in enumerate(self.layers): | |
| # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) | |
| if output_hidden_states: | |
| all_hidden_states += (hidden_states,) | |
| dropout_probability = random.uniform(0, 1) | |
| if self.training and (dropout_probability < self.layerdrop): | |
| continue | |
| past_key_value = past_key_values[idx] if past_key_values is not None else None | |
| layer_head_mask = head_mask[idx] if head_mask is not None else None | |
| def vblock( | |
| main_block, | |
| hidden_states, | |
| attention_mask, | |
| layer_head_mask, | |
| past_key_value, | |
| image_hidden_states, | |
| image_attention_mask, | |
| output_attentions, | |
| use_cache, | |
| layer_idx, | |
| cross_layer_interval, | |
| gated_cross_attn_layers, | |
| ): | |
| # TODO(ls): Add cross attention values to respective lists | |
| if layer_idx % cross_layer_interval == 0: | |
| xblock = gated_cross_attn_layers[layer_idx // cross_layer_interval] | |
| outputs = xblock( | |
| hidden_states, | |
| attention_mask=attention_mask, | |
| layer_head_mask=layer_head_mask, | |
| image_hidden_states=image_hidden_states, | |
| image_attention_mask=image_attention_mask, | |
| output_attentions=output_attentions, | |
| use_cache=use_cache, | |
| past_key_value=None, # not implemented | |
| ) | |
| hidden_states = outputs[0] | |
| layer_outputs = main_block( | |
| hidden_states, | |
| attention_mask=attention_mask, | |
| layer_head_mask=layer_head_mask, | |
| past_key_value=past_key_value, | |
| output_attentions=output_attentions, | |
| use_cache=use_cache, | |
| ) | |
| return layer_outputs | |
| if self.gradient_checkpointing and self.training: | |
| past_key_value = None | |
| if use_cache: | |
| logger.warning_once( | |
| "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." | |
| ) | |
| use_cache = False | |
| layer_outputs = torch.utils.checkpoint.checkpoint( | |
| vblock, | |
| decoder_layer, | |
| hidden_states, | |
| attention_mask, | |
| layer_head_mask, | |
| past_key_value, | |
| image_hidden_states, | |
| image_attention_mask, | |
| output_attentions, | |
| use_cache, | |
| idx, | |
| self.cross_layer_interval, | |
| self.gated_cross_attn_layers, | |
| ) | |
| else: | |
| layer_outputs = vblock( | |
| decoder_layer, | |
| hidden_states, | |
| attention_mask=attention_mask, | |
| layer_head_mask=layer_head_mask, | |
| past_key_value=past_key_value, | |
| image_hidden_states=image_hidden_states, | |
| image_attention_mask=image_attention_mask, | |
| output_attentions=output_attentions, | |
| use_cache=use_cache, | |
| layer_idx=idx, | |
| cross_layer_interval=self.cross_layer_interval, | |
| gated_cross_attn_layers=self.gated_cross_attn_layers, | |
| ) | |
| hidden_states = layer_outputs[0] | |
| if use_cache: | |
| next_decoder_cache += (layer_outputs[2 if output_attentions else 1],) | |
| if output_attentions: | |
| all_self_attns += (layer_outputs[1],) | |
| if self.final_layer_norm is not None: | |
| hidden_states = self.final_layer_norm(hidden_states) | |
| if self.project_out is not None: | |
| hidden_states = self.project_out(hidden_states) | |
| # add hidden states from the last decoder layer | |
| if output_hidden_states: | |
| all_hidden_states += (hidden_states,) | |
| next_cache = next_decoder_cache if use_cache else None | |
| if not return_dict: | |
| return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None) | |
| return BaseModelOutputWithPast( | |
| last_hidden_state=hidden_states, | |
| past_key_values=next_cache, | |
| hidden_states=all_hidden_states, | |
| attentions=all_self_attns, | |
| ) | |
| class VOPTModel(VOPTPreTrainedModel): | |
| def __init__(self, config: VOPTConfig, vision_model=None): | |
| super().__init__(config) | |
| self.decoder = VOPTDecoder(config, vision_model=vision_model) | |
| # Initialize weights and apply final processing | |
| self.post_init() | |
| self.freeze_relevant_params(config) | |
| def freeze_relevant_params(self, config=None): | |
| if config is None: | |
| config = self.config | |
| if config.freeze_text_layers: | |
| self.freeze_text_layers(config.freeze_text_module_exceptions) | |
| if config.freeze_vision_layers: | |
| freeze_model(self.decoder.vision_model, module_exceptions=config.freeze_vision_module_exceptions) | |
| def freeze_text_layers(self, module_exceptions): | |
| for module in [self.decoder.embed_positions, self.decoder.layers]: | |
| freeze_model(module, module_exceptions=module_exceptions) | |
| if self.decoder.project_out is not None: | |
| freeze_model(self.decoder.project_out, module_exceptions=module_exceptions) | |
| if self.decoder.final_layer_norm is not None: | |
| freeze_model(self.decoder.final_layer_norm, module_exceptions=module_exceptions) | |
| def get_input_embeddings(self): | |
| return self.decoder.embed_tokens | |
| def set_input_embeddings(self, value): | |
| self.decoder.embed_tokens = value | |
| def get_decoder(self): | |
| return self.decoder | |
| def forward( | |
| self, | |
| input_ids: torch.LongTensor = None, | |
| attention_mask: Optional[torch.Tensor] = None, | |
| head_mask: Optional[torch.Tensor] = None, | |
| past_key_values: Optional[List[torch.FloatTensor]] = None, | |
| inputs_embeds: Optional[torch.FloatTensor] = None, | |
| pixel_values: Optional[torch.FloatTensor] = None, | |
| image_embeddings: Optional[torch.FloatTensor] = None, | |
| image_attention_mask: Optional[torch.Tensor] = None, | |
| crossblock_head_mask: Optional[torch.Tensor] = None, # TOFO (ls): check if this is needed | |
| use_cache: Optional[bool] = None, | |
| output_attentions: Optional[bool] = None, | |
| output_hidden_states: Optional[bool] = None, | |
| return_dict: Optional[bool] = None, | |
| ) -> Union[Tuple, BaseModelOutputWithPast]: | |
| output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions | |
| output_hidden_states = ( | |
| output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states | |
| ) | |
| use_cache = use_cache if use_cache is not None else self.config.use_cache | |
| return_dict = return_dict if return_dict is not None else self.config.use_return_dict | |
| # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn) | |
| decoder_outputs = self.decoder( | |
| input_ids=input_ids, | |
| attention_mask=attention_mask, | |
| head_mask=head_mask, | |
| past_key_values=past_key_values, | |
| inputs_embeds=inputs_embeds, | |
| pixel_values=pixel_values, | |
| image_embeddings=image_embeddings, | |
| image_attention_mask=image_attention_mask, | |
| crossblock_head_mask=crossblock_head_mask, | |
| use_cache=use_cache, | |
| output_attentions=output_attentions, | |
| output_hidden_states=output_hidden_states, | |
| return_dict=return_dict, | |
| ) | |
| if not return_dict: | |
| return decoder_outputs | |
| return BaseModelOutputWithPast( | |
| last_hidden_state=decoder_outputs.last_hidden_state, | |
| past_key_values=decoder_outputs.past_key_values, | |
| hidden_states=decoder_outputs.hidden_states, | |
| attentions=decoder_outputs.attentions, | |
| ) | |
| class VOPTForCausalLM(VOPTPreTrainedModel): | |
| _keys_to_ignore_on_load_missing = [r"lm_head.weight"] | |
| def __init__(self, config, vision_model=None): | |
| super().__init__(config) | |
| # Initialize LM head first so that it is not directly offloaded to the CPU/disk | |
| # the lm_head weight is automatically tied to the embed tokens weight | |
| self.lm_head = DecoupledLinear( | |
| in_features=config.word_embed_proj_dim, | |
| out_features=config.vocab_size, | |
| out_additional_features=config.additional_vocab_size, | |
| bias=False, | |
| partially_freeze=config.freeze_lm_head, | |
| ) | |
| self.model = VOPTModel(config, vision_model=vision_model) | |
| # Initialize weights and apply final processing | |
| self.post_init() | |
| def tie_weights(self): | |
| """ | |
| Overwrite `transformers.modeling_utils.PreTrainedModel.tie_weights` to handle the case of DecoupledLinear and DecoupledEmbedding. | |
| """ | |
| output_embeddings = self.get_output_embeddings() | |
| input_embeddings = self.get_input_embeddings() | |
| if getattr(self.config, "tie_word_embeddings", True): | |
| output_embeddings.weight = input_embeddings.weight | |
| if input_embeddings.num_additional_embeddings > 0: | |
| assert output_embeddings.out_additional_features == input_embeddings.num_additional_embeddings | |
| output_embeddings.additional_fc.weight = input_embeddings.additional_embedding.weight | |
| if hasattr(output_embeddings, "out_features") and hasattr(input_embeddings, "num_embeddings"): | |
| output_embeddings.out_features = input_embeddings.num_embeddings | |
| if hasattr(output_embeddings, "out_additional_features") and hasattr( | |
| input_embeddings, "num_additional_embeddings" | |
| ): | |
| output_embeddings.out_additional_features = input_embeddings.num_additional_embeddings | |
| def get_input_embeddings(self): | |
| return self.model.decoder.embed_tokens | |
| def set_input_embeddings(self, value): | |
| self.model.decoder.embed_tokens = value | |
| def get_output_embeddings(self): | |
| return self.lm_head | |
| def set_output_embeddings(self, new_embeddings): | |
| self.lm_head = new_embeddings | |
| def set_decoder(self, decoder): | |
| self.model.decoder = decoder | |
| def get_decoder(self): | |
| return self.model.decoder | |
| def forward( | |
| self, | |
| input_ids: torch.LongTensor = None, | |
| attention_mask: Optional[torch.Tensor] = None, | |
| head_mask: Optional[torch.Tensor] = None, | |
| past_key_values: Optional[List[torch.FloatTensor]] = None, | |
| inputs_embeds: Optional[torch.FloatTensor] = None, | |
| pixel_values: Optional[torch.FloatTensor] = None, | |
| image_embeddings: Optional[torch.FloatTensor] = None, | |
| image_attention_mask: Optional[torch.Tensor] = None, | |
| crossblock_head_mask: Optional[torch.Tensor] = None, | |
| labels: Optional[torch.LongTensor] = None, | |
| use_cache: Optional[bool] = None, | |
| output_attentions: Optional[bool] = None, | |
| output_hidden_states: Optional[bool] = None, | |
| return_dict: Optional[bool] = None, | |
| ) -> Union[Tuple, CausalLMOutputWithPast]: | |
| r""" | |
| Args: | |
| input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): | |
| Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you | |
| provide it. | |
| Indices can be obtained using [`OPTTokenizer`]. See [`PreTrainedTokenizer.encode`] and | |
| [`PreTrainedTokenizer.__call__`] for details. | |
| [What are input IDs?](../glossary#input-ids) | |
| attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): | |
| Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: | |
| - 1 for tokens that are **not masked**, | |
| - 0 for tokens that are **masked**. | |
| [What are attention masks?](../glossary#attention-mask) | |
| head_mask (`torch.Tensor` of shape `(num_hidden_layers, num_attention_heads)`, *optional*): | |
| Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: | |
| - 1 indicates the head is **not masked**, | |
| - 0 indicates the head is **masked**. | |
| past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): | |
| Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of | |
| shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of | |
| shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. The two additional | |
| tensors are only required when the model is used as a decoder in a Sequence to Sequence model. | |
| Contains pre-computed hidden-states (key and values in the self-attention blocks and in the | |
| cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. | |
| If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those | |
| that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of | |
| all `decoder_input_ids` of shape `(batch_size, sequence_length)`. | |
| inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): | |
| Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. | |
| This is useful if you want more control over how to convert `input_ids` indices into associated vectors | |
| than the model's internal embedding lookup matrix. | |
| labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): | |
| Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., | |
| config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored | |
| (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. | |
| use_cache (`bool`, *optional*): | |
| If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding | |
| (see `past_key_values`). | |
| output_attentions (`bool`, *optional*): | |
| Whether or not to return the attentions tensors of all attention layers. See `attentions` under | |
| returned tensors for more detail. | |
| output_hidden_states (`bool`, *optional*): | |
| Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors | |
| for more detail. | |
| return_dict (`bool`, *optional*): | |
| Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. | |
| Returns: | |
| Example: | |
| ```python | |
| >>> from transformers import GPT2Tokenizer, OPTForCausalLM | |
| >>> model = OPTForCausalLM.from_pretrained("facebook/opt-350m") | |
| >>> tokenizer = GPT2Tokenizer.from_pretrained("facebook/opt-350m") | |
| >>> prompt = "Hey, are you consciours? Can you talk to me?" | |
| >>> inputs = tokenizer(prompt, return_tensors="pt") | |
| >>> # Generate | |
| >>> generate_ids = model.generate(inputs.input_ids, max_length=30) | |
| >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] | |
| "Hey, are you consciours? Can you talk to me?\nI'm not consciours, but I can talk to you." | |
| ```""" | |
| output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions | |
| output_hidden_states = ( | |
| output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states | |
| ) | |
| return_dict = return_dict if return_dict is not None else self.config.use_return_dict | |
| # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) | |
| outputs = self.model.decoder( | |
| input_ids=input_ids, | |
| attention_mask=attention_mask, | |
| head_mask=head_mask, | |
| past_key_values=past_key_values, | |
| inputs_embeds=inputs_embeds, | |
| pixel_values=pixel_values, | |
| image_embeddings=image_embeddings, | |
| image_attention_mask=image_attention_mask, | |
| crossblock_head_mask=crossblock_head_mask, | |
| use_cache=use_cache, | |
| output_attentions=output_attentions, | |
| output_hidden_states=output_hidden_states, | |
| return_dict=return_dict, | |
| ) | |
| logits = self.lm_head(outputs[0]).contiguous() | |
| loss = None | |
| if labels is not None: | |
| # Shift so that tokens < n predict n | |
| if attention_mask is not None: | |
| shift_attention_mask = attention_mask[..., 1:] | |
| shift_logits = logits[..., :-1, :][shift_attention_mask != 0].contiguous() | |
| shift_labels = labels[..., 1:][shift_attention_mask != 0].contiguous() | |
| else: | |
| shift_logits = logits[..., :-1, :].contiguous() | |
| shift_labels = labels[..., 1:].contiguous() | |
| # Flatten the tokens | |
| loss_fct = CrossEntropyLoss() | |
| loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) | |
| if not return_dict: | |
| output = (logits,) + outputs[1:] | |
| return (loss,) + output if loss is not None else output | |
| return CausalLMOutputWithPast( | |
| loss=loss, | |
| logits=logits, | |
| past_key_values=outputs.past_key_values, | |
| hidden_states=outputs.hidden_states, | |
| attentions=outputs.attentions, | |
| ) | |
| def prepare_inputs_for_generation(self, input_ids, past=None, **kwargs): | |
| inputs = prepare_inputs_for_generation(input_ids, past=past, **kwargs) | |
| unwanted_kwargs = ["position_ids", "token_type_ids"] | |
| for kwarg in unwanted_kwargs: | |
| inputs.pop(kwarg, None) | |
| return inputs | |
| def _expand_inputs_for_generation( | |
| *args, | |
| **model_kwargs, | |
| ): | |
| return expand_inputs_for_generation(*args, **model_kwargs) | |
| def _update_model_kwargs_for_generation(outputs, model_kwargs, is_encoder_decoder=False): | |
| return update_model_kwargs_for_generation(outputs, model_kwargs, is_encoder_decoder=is_encoder_decoder) | |
| def _reorder_cache(past, beam_idx): | |
| reordered_past = () | |
| for layer_past in past: | |
| reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),) | |
| return reordered_past | |
| def get_model_tflops_per_batch_per_gpu(self, hparams, data_param, tokenizer, max_num_images): | |
| config_vl_model = self.config | |
| language_embed_size = config_vl_model.hidden_size | |
| num_language_layers = config_vl_model.num_hidden_layers | |
| ffn_inner_size = config_vl_model.ffn_dim | |
| vision_config = self.model.decoder.vision_model.config | |
| if hasattr(vision_config, "vision_config"): | |
| vision_config = vision_config.vision_config | |
| # Get vision model blocks infos | |
| vision_patch_size = vision_config.patch_size | |
| vision_hidden_size = vision_config.hidden_size | |
| num_vision_layers = vision_config.num_hidden_layers | |
| # The +1 is for the CLS token | |
| single_image_seq_len = (vision_config.image_size // vision_patch_size) ** 2 + 1 | |
| vision_exp_factor = vision_config.intermediate_size // vision_hidden_size | |
| # Get language and cross-att blocks infos | |
| num_cross_attn_layers = num_language_layers // config_vl_model.cross_layer_interval | |
| language_seq_len = data_param.max_seq_len | |
| language_exp_factor = (ffn_inner_size // language_embed_size) if ffn_inner_size is not None else 4 | |
| cross_att_exp_factor = (ffn_inner_size // language_embed_size) if ffn_inner_size is not None else 4 | |
| k_v_cross_attn_seq_len = ( | |
| (self.config.resampler_n_latents * max_num_images) | |
| if self.config.use_resampler | |
| else (single_image_seq_len * max_num_images) | |
| ) | |
| language_tflops_per_batch_per_gpu = compute_tflops_per_batch_per_gpu( | |
| num_layers=num_language_layers, | |
| batch_size=hparams.batch_size_per_gpu, | |
| q_seq_len=language_seq_len, | |
| k_seq_len=language_seq_len, | |
| hidden_size=language_embed_size, | |
| kv_in_dim=language_embed_size, | |
| ff_exp_factor=language_exp_factor, | |
| grad_acc_size=hparams.grad_acc_size, | |
| swiglu=False, | |
| vocab_size=tokenizer.vocab_size, | |
| count_backward=True, # Always True regardless of freezing, because gradients are computed for cross-attentions | |
| use_grad_checkpointing=hparams.gradient_checkpointing, | |
| ) | |
| cross_attention_tflops_per_batch_per_gpu = compute_tflops_per_batch_per_gpu( | |
| num_layers=num_cross_attn_layers, | |
| batch_size=hparams.batch_size_per_gpu, | |
| q_seq_len=language_seq_len, | |
| k_seq_len=k_v_cross_attn_seq_len, | |
| hidden_size=language_embed_size, | |
| kv_in_dim=vision_hidden_size, | |
| ff_exp_factor=cross_att_exp_factor, | |
| grad_acc_size=hparams.grad_acc_size, | |
| swiglu=self.config.cross_layer_activation_function == "swiglu", | |
| vocab_size=None, | |
| count_backward=True, | |
| use_grad_checkpointing=hparams.gradient_checkpointing, | |
| ) | |
| vision_tflops_per_batch_per_gpu = compute_tflops_per_batch_per_gpu( | |
| num_layers=num_vision_layers, | |
| batch_size=hparams.batch_size_per_gpu * max_num_images, | |
| q_seq_len=single_image_seq_len, | |
| k_seq_len=single_image_seq_len, | |
| hidden_size=vision_hidden_size, | |
| kv_in_dim=vision_hidden_size, | |
| ff_exp_factor=vision_exp_factor, | |
| grad_acc_size=hparams.grad_acc_size, | |
| swiglu=False, | |
| vocab_size=None, | |
| count_backward=not hparams.model_params["freeze_vision_layers"], | |
| use_grad_checkpointing=hparams.gradient_checkpointing, | |
| ) | |
| if self.config.use_resampler: | |
| perceiver_tflops_per_batch_per_gpu = compute_perceiver_tflops_per_batch_per_gpu( | |
| num_layers=self.config.resampler_depth, | |
| batch_size=hparams.batch_size_per_gpu * max_num_images, | |
| q_seq_len=self.config.resampler_n_latents, | |
| vision_embed_seq_len=single_image_seq_len, | |
| q_k_v_input_dim=vision_hidden_size, | |
| attention_hidden_size=self.config.resampler_n_heads * self.config.resampler_head_dim, | |
| ff_exp_factor=cross_att_exp_factor, | |
| count_backward=True, | |
| use_grad_checkpointing=hparams.gradient_checkpointing, | |
| ) | |
| flop_count = ( | |
| language_tflops_per_batch_per_gpu | |
| + cross_attention_tflops_per_batch_per_gpu | |
| + vision_tflops_per_batch_per_gpu | |
| + perceiver_tflops_per_batch_per_gpu | |
| ) | |
| else: | |
| flop_count = ( | |
| language_tflops_per_batch_per_gpu | |
| + cross_attention_tflops_per_batch_per_gpu | |
| + vision_tflops_per_batch_per_gpu | |
| ) | |
| return flop_count | |