text
stringlengths
1
1.02k
class_index
int64
0
10.8k
source
stringlengths
85
188
# 10. go into different generation modes if is_greedy_gen_mode: if generation_config.num_return_sequences > 1: raise ValueError( f"num_return_sequences has to be 1, but is {generation_config.num_return_sequences} when doing" " greedy search." ) # 11. run greedy search return self.greedy_search( input_ids, max_length=generation_config.max_length, pad_token_id=generation_config.pad_token_id, eos_token_id=generation_config.eos_token_id, logits_processor=logits_processor, output_scores=generation_config.output_scores, return_dict_in_generate=generation_config.return_dict_in_generate, **model_kwargs, ) elif is_contrastive_search_gen_mode: if generation_config.num_return_sequences > 1: raise ValueError(
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
f"num_return_sequences has to be 1, but is {generation_config.num_return_sequences} when doing" " contrastive search." ) # 11. run contrastive search return self.contrastive_search( input_ids, top_k=generation_config.top_k, penalty_alpha=generation_config.penalty_alpha, logits_processor=logits_processor, max_length=generation_config.max_length, pad_token_id=generation_config.pad_token_id, eos_token_id=generation_config.eos_token_id, output_scores=generation_config.output_scores, return_dict_in_generate=generation_config.return_dict_in_generate, **model_kwargs, ) elif is_sample_gen_mode: # 11. prepare logits warper logits_warper = self._get_logits_warper(generation_config=generation_config)
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
# 12. expand input_ids with `num_return_sequences` additional sequences per batch input_ids, model_kwargs = self._expand_inputs_for_generation( input_ids=input_ids, expand_size=generation_config.num_return_sequences, is_encoder_decoder=self.config.is_encoder_decoder, **model_kwargs, ) # 13. run sample return self.sample( input_ids, logits_processor=logits_processor, logits_warper=logits_warper, max_length=generation_config.max_length, pad_token_id=generation_config.pad_token_id, eos_token_id=generation_config.eos_token_id, seed=seed, output_scores=generation_config.output_scores, return_dict_in_generate=generation_config.return_dict_in_generate, **model_kwargs, )
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
elif is_beam_gen_mode: if generation_config.num_beams < generation_config.num_return_sequences: raise ValueError( "Beam search decoding cannot return more sequences than it has beams. Please set num_beams >=" f" num_return_sequences, got {generation_config.num_beams} and" f" {generation_config.num_return_sequences} (respectivelly)" ) # 11. broadcast inputs to the desired number of beams input_ids, model_kwargs = self._expand_inputs_for_generation( input_ids=input_ids, expand_size=generation_config.num_beams, is_encoder_decoder=self.config.is_encoder_decoder, expand_in_new_axis=True, **model_kwargs, )
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
# 12. run beam search return self.beam_search( input_ids, max_length=generation_config.max_length, pad_token_id=generation_config.pad_token_id, eos_token_id=generation_config.eos_token_id, length_penalty=generation_config.length_penalty, early_stopping=generation_config.early_stopping, logits_processor=logits_processor, output_scores=generation_config.output_scores, return_dict_in_generate=generation_config.return_dict_in_generate, num_return_sequences=generation_config.num_return_sequences, **model_kwargs, )
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
elif is_beam_sample_gen_mode: if generation_config.num_beams < generation_config.num_return_sequences: raise ValueError( "Beam search decoding cannot return more sequences than it has beams. Please set num_beams >=" f" num_return_sequences, got {generation_config.num_beams} and" f" {generation_config.num_return_sequences} (respectivelly)" ) # 11. prepare logits warper logits_warper = self._get_logits_warper(generation_config=generation_config) # 12. broadcast inputs to the desired number of beams input_ids, model_kwargs = self._expand_inputs_for_generation( input_ids=input_ids, expand_size=generation_config.num_beams, is_encoder_decoder=self.config.is_encoder_decoder, expand_in_new_axis=True, **model_kwargs, )
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
# 13. run beam sample (beam search with sampling) return self.beam_search( input_ids, do_sample=True, max_length=generation_config.max_length, pad_token_id=generation_config.pad_token_id, eos_token_id=generation_config.eos_token_id, length_penalty=generation_config.length_penalty, early_stopping=generation_config.early_stopping, logits_processor=logits_processor, logits_warper=logits_warper, output_scores=generation_config.output_scores, return_dict_in_generate=generation_config.return_dict_in_generate, num_return_sequences=generation_config.num_return_sequences, **model_kwargs, )
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
def _prepare_attention_mask_for_generation( self, inputs: tf.Tensor, pad_token_id: Optional[int], eos_token_id: Optional[int], ) -> tf.Tensor: is_input_ids = len(inputs.shape) == 2 and inputs.dtype in (tf.int32, tf.int64) is_pad_token_in_inputs = (pad_token_id is not None) and tf.math.reduce_any(inputs == pad_token_id) is_pad_token_not_equal_to_eos_token_id = (eos_token_id is None) or (pad_token_id != eos_token_id) # Check if input is input_ids and padded -> only then is attention_mask defined if is_input_ids and is_pad_token_in_inputs and is_pad_token_not_equal_to_eos_token_id: return tf.cast(tf.math.not_equal(inputs, pad_token_id), dtype=tf.int32) else: return tf.ones(inputs.shape[:2], dtype=tf.int32)
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
def _prepare_encoder_decoder_kwargs_for_generation( self, inputs_tensor: tf.Tensor, model_kwargs, model_input_name: Optional[str] = None ) -> Dict[str, Any]: # 1. get encoder and store encoder outputs encoder = self.get_encoder() # 2. prepare encoder args and encoder kwargs from model kwargs irrelevant_prefix = ["decoder_", "cross_attn", "use_cache"] encoder_kwargs = { argument: value for argument, value in model_kwargs.items() if not any(argument.startswith(p) for p in irrelevant_prefix) } encoder_signature = set(inspect.signature(encoder.call).parameters) encoder_accepts_wildcard = "kwargs" in encoder_signature or "model_kwargs" in encoder_signature if not encoder_accepts_wildcard: encoder_kwargs = { argument: value for argument, value in encoder_kwargs.items() if argument in encoder_signature }
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
# 3. vision models don't use `attention_mask`. encoder_kwargs["return_dict"] = True encoder_kwargs[model_input_name] = inputs_tensor if model_input_name != self.main_input_name: # in Keras, the first input must always be passed encoder_kwargs[self.main_input_name] = None encoder_outputs = encoder(**encoder_kwargs) model_kwargs["encoder_outputs"] = encoder_outputs return model_kwargs
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
def _prepare_decoder_input_ids_for_generation( self, batch_size: int, model_input_name: str, model_kwargs: Dict[str, tf.Tensor], decoder_start_token_id: int = None, bos_token_id: int = None, ) -> Tuple[tf.Tensor, Dict[str, tf.Tensor]]: """Prepares `decoder_input_ids` for generation with encoder-decoder models""" # 1. Check whether the user has defined `decoder_input_ids` manually. To facilitate in terms of input naming, # we also allow the user to pass it under `input_ids`, if the encoder does not use it as the main input. if model_kwargs is not None and "decoder_input_ids" in model_kwargs: decoder_input_ids = model_kwargs.pop("decoder_input_ids") elif "input_ids" in model_kwargs and model_input_name != "input_ids": decoder_input_ids = model_kwargs.pop("input_ids") else: decoder_input_ids = None
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
# 2. Encoder-decoder models expect the `decoder_input_ids` to start with a special token. Let's ensure that. decoder_start_token_id = self._get_decoder_start_token_id(decoder_start_token_id, bos_token_id) decoder_input_ids_start = tf.ones((batch_size, 1), dtype=tf.int32) * decoder_start_token_id
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
# no user input -> use decoder_start_token_id as decoder_input_ids if decoder_input_ids is None: decoder_input_ids = decoder_input_ids_start # user input but doesn't start with decoder_start_token_id -> prepend decoder_start_token_id (and adjust # decoder_attention_mask if provided) elif tf.reduce_all(decoder_input_ids[:, 0] != decoder_start_token_id): decoder_input_ids = tf.concat([decoder_input_ids_start, decoder_input_ids], axis=-1) if "decoder_attention_mask" in model_kwargs: decoder_attention_mask = model_kwargs["decoder_attention_mask"] decoder_attention_mask = tf.concat( (tf.ones_like(decoder_attention_mask)[:, :1], decoder_attention_mask), axis=-1, ) model_kwargs["decoder_attention_mask"] = decoder_attention_mask return decoder_input_ids, model_kwargs
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
def _get_decoder_start_token_id(self, decoder_start_token_id: int = None, bos_token_id: int = None) -> int: # retrieve decoder_start_token_id for encoder-decoder models # fall back to bos_token_id if necessary decoder_start_token_id = ( decoder_start_token_id if decoder_start_token_id is not None else self.generation_config.decoder_start_token_id ) bos_token_id = bos_token_id if bos_token_id is not None else self.generation_config.bos_token_id if decoder_start_token_id is not None: return decoder_start_token_id elif bos_token_id is not None: return bos_token_id raise ValueError( "`decoder_start_token_id` or `bos_token_id` has to be defined for encoder-decoder generation." )
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
@staticmethod def _expand_inputs_for_generation( expand_size: int = 1, is_encoder_decoder: bool = False, input_ids: Optional[tf.Tensor] = None, expand_in_new_axis: bool = False, **model_kwargs, ) -> Tuple[tf.Tensor, Dict[str, Any]]: """ Expands tensors from [batch_size, ...] to [batch_size * expand_size, ...] or [batch_size, expand_size, ...], depending on `expand_in_new_axis`. Beam-based approaches expect this function to be used with `expand_in_new_axis=True` """ def _expand_tensor(tensor: tf.Tensor): if expand_in_new_axis: shape = shape_list(tensor) return tf.broadcast_to(tensor[:, None], (shape[0], expand_size) + tuple(shape[1:])) else: return tf.repeat(tensor, expand_size, axis=0)
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
def _expand_dict_for_generation(dict_to_expand): for key in dict_to_expand: if dict_to_expand[key] is not None and isinstance(dict_to_expand[key], tf.Tensor): dict_to_expand[key] = _expand_tensor(dict_to_expand[key]) return dict_to_expand if input_ids is not None: input_ids = _expand_tensor(input_ids) model_kwargs = _expand_dict_for_generation(model_kwargs) if is_encoder_decoder: if model_kwargs.get("encoder_outputs") is None: raise ValueError("If `is_encoder_decoder` is True, make sure that `encoder_outputs` is defined.") model_kwargs["encoder_outputs"] = _expand_dict_for_generation(model_kwargs["encoder_outputs"]) return input_ids, model_kwargs
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
def _prepare_model_inputs( self, inputs: Optional[tf.Tensor] = None, bos_token_id: Optional[int] = None, model_kwargs: Optional[Dict[str, tf.Tensor]] = None, ) -> Tuple[tf.Tensor, Optional[str], Dict[str, tf.Tensor]]: """ This function extracts the model-specific `inputs` for generation. """ # 1. retrieve all kwargs that are non-None or non-model input related. # some encoder-decoder models have different names for model and encoder if ( self.config.is_encoder_decoder and hasattr(self, "encoder") and hasattr(self.encoder, "main_input_name") and self.encoder.main_input_name != self.main_input_name ): input_name = self.encoder.main_input_name else: input_name = self.main_input_name model_kwargs = {k: v for k, v in model_kwargs.items() if v is not None or k != input_name}
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
# 2. check whether model_input_name is passed as kwarg # if yes and `inputs` is None use kwarg inputs inputs_kwarg = model_kwargs.pop(input_name, None) if inputs_kwarg is not None and inputs is not None: raise ValueError( f"`inputs`: {inputs}` were passed alongside {input_name} which is not allowed. " f"Make sure to either pass {inputs} or {input_name}=..." ) elif inputs_kwarg is not None: inputs = inputs_kwarg
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
# 3. In the presence of `inputs_embeds` for text models: # - decoder-only models should complain if the user attempts to pass `inputs_embeds`, but the model # doesn't have its forwarding implemented. `inputs_embeds` is kept in `model_kwargs` and can coexist with # input_ids (`inputs_embeds` will be used in the 1st generation step, as opposed to `input_ids`) # - encoder-decoder models should complain if the user attempts to pass `inputs_embeds` and `input_ids`, and # pull the former to inputs. It will be used in place of `input_ids` to get the encoder hidden states. if input_name == "input_ids" and "inputs_embeds" in model_kwargs: if not self.config.is_encoder_decoder: has_inputs_embeds_forwarding = "inputs_embeds" in set( inspect.signature(self.prepare_inputs_for_generation).parameters.keys() ) if not has_inputs_embeds_forwarding: raise ValueError(
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
f"You passed `inputs_embeds` to `.generate()`, but the model class {self.__class__.__name__} " "doesn't have its forwarding implemented. See the GPT2 implementation for an example " "(https://github.com/huggingface/transformers/pull/21405), and feel free to open a PR with it!" ) # In this case, `input_ids` is moved to the `model_kwargs`, so a few automations (like the creation of # the attention mask) can rely on the actual model input. model_kwargs["input_ids"] = self._maybe_initialize_input_ids_for_generation( inputs, bos_token_id, model_kwargs=model_kwargs ) else: if inputs is not None: raise ValueError("You passed `inputs_embeds` and `input_ids` to `.generate()`. Please pick one.") inputs, input_name = model_kwargs["inputs_embeds"], "inputs_embeds"
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
# 4. if `inputs` is still None, try to create `input_ids` from BOS token inputs = self._maybe_initialize_input_ids_for_generation(inputs, bos_token_id, model_kwargs) return inputs, input_name, model_kwargs def _maybe_initialize_input_ids_for_generation( self, inputs: Optional[tf.Tensor] = None, bos_token_id: Optional[int] = None, model_kwargs: Optional[Dict[str, tf.Tensor]] = None, ) -> tf.Tensor: """Initializes input ids for generation, if necessary.""" if inputs is not None: return inputs encoder_outputs = model_kwargs.get("encoder_outputs") if self.config.is_encoder_decoder and encoder_outputs is not None: # make dummy input_ids with value -100, as a sanity check ensuring that they won't be used for encoding shape = encoder_outputs.last_hidden_state.shape[:-1] return tf.ones(shape, dtype=tf.int32) * -100
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
if bos_token_id is None: raise ValueError("`bos_token_id` has to be defined when no `input_ids` are provided.") # If there is some tensor in `model_kwargs`, we can infer the batch size from it. This is helpful with # soft-prompting or in multimodal implementations built on top of decoder-only language models. batch_size = 1 for value in model_kwargs.values(): if isinstance(value, tf.Tensor): batch_size = value.shape[0] break return tf.ones((batch_size, 1), dtype=tf.int32) * bos_token_id @staticmethod def _extract_past_from_model_output(outputs: ModelOutput): past_key_values = None if "past_key_values" in outputs: past_key_values = outputs.past_key_values elif "mems" in outputs: past_key_values = outputs.mems elif "past_buckets_states" in outputs: past_key_values = outputs.past_buckets_states return past_key_values
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
def _update_model_kwargs_for_generation( self, outputs: ModelOutput, model_kwargs: Dict[str, Any], is_encoder_decoder: bool = False ) -> Dict[str, Any]: # update past_key_values model_kwargs["past_key_values"] = self._extract_past_from_model_output(outputs) # update attention mask if not is_encoder_decoder: if "attention_mask" in model_kwargs: attention_mask = model_kwargs["attention_mask"] model_kwargs["attention_mask"] = tf.concat( [attention_mask, tf.ones((shape_list(attention_mask)[0], 1), dtype=tf.int32)], axis=-1 ) return model_kwargs
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
def _update_model_kwargs_for_xla_generation( self, model_outputs: ModelOutput, model_kwargs: Dict[str, Any], cur_len: int, max_length: int, batch_size: int, is_encoder_decoder: bool = False, batch_axis: int = 0, ): def _initialize_attention(model_kwargs, num_padding_values, is_encoder_decoder): """initializes the appropriate attention mask -- encoder-decoder models use `decoder_attention_mask`""" if is_encoder_decoder: # One 1 for decoder_start_token_id, 0s for the currently-unfilled locations in the past_key_values tensor, # 1s for the actual input_ids decoder_attention_mask = tf.concat( [ tf.ones((batch_size, 1), dtype=tf.int32), tf.zeros((batch_size, num_padding_values), dtype=tf.int32), tf.ones((batch_size, 1), dtype=tf.int32), ],
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
axis=1, ) mask = {"decoder_attention_mask": decoder_attention_mask} else: attention_mask = model_kwargs.pop("attention_mask") # 0s for the currently-unfilled locations in the past_key_values tensor, 1s for the actual input_ids attention_mask = tf.concat( [ attention_mask, tf.zeros((batch_size, num_padding_values), dtype=attention_mask.dtype), tf.ones((batch_size, 1), dtype=attention_mask.dtype), ], axis=1, ) mask = {"attention_mask": attention_mask} return mask
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
def _update_attention(model_kwargs, new_past_index, is_encoder_decoder): """updates the appropriate attention mask -- encoder-decoder models use `decoder_attention_mask`""" update_start = tf.constant([0, 1], dtype=tf.int32) * new_past_index if is_encoder_decoder: decoder_attention_mask = model_kwargs.pop("decoder_attention_mask") decoder_attention_mask_update_slice = tf.ones((batch_size, 1), dtype=decoder_attention_mask.dtype) decoder_attention_mask = dynamic_update_slice( decoder_attention_mask, decoder_attention_mask_update_slice, update_start ) mask = {"decoder_attention_mask": decoder_attention_mask} else: attention_mask = model_kwargs.pop("attention_mask") attention_mask_update_slice = tf.ones((batch_size, 1), dtype=attention_mask.dtype)
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
attention_mask = dynamic_update_slice(attention_mask, attention_mask_update_slice, update_start) mask = {"attention_mask": attention_mask} return mask
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
def _initialize_past(past_key_values, num_padding_values, batch_axis): """initialize past_key_values with zeros -- the structure depends on `batch_axis`""" if batch_axis == 0: padding_values = tf.constant([[0, 0], [0, 0], [0, num_padding_values], [0, 0]], dtype=tf.int32) new_past = () for past_layer in past_key_values: new_past_layer = list(past_layer) for i in range(len(new_past_layer[:2])): new_past_layer[i] = tf.pad(past_layer[i], padding_values) new_past += (tuple(new_past_layer),) else: padding_values = tf.scatter_nd(indices=[[3, 1]], updates=[num_padding_values], shape=(5, 2)) new_past = list(past_key_values) for i in range(len(past_key_values)): new_past[i] = tf.pad(past_key_values[i], padding_values) return new_past
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
def _update_past(past_key_values, new_past_index, batch_axis): if batch_axis == 0: slice_start_base = tf.constant([0, 0, 1, 0]) new_past = () for past_layer in past_key_values: new_past_layer = list(past_layer) for i in range(len(new_past_layer[:2])): update_slice = past_layer[i][:, :, -1:] # Write the last slice to the first open location in the padded past_key_values array # and then truncate the last slice off the array new_past_layer[i] = dynamic_update_slice( past_layer[i][:, :, :-1], update_slice, slice_start_base * new_past_index ) new_past += (tuple(new_past_layer),) else: slice_start_base = tf.constant([0, 0, 0, 1, 0]) new_past = [None for _ in range(len(past_key_values))]
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
for i in range(len(past_key_values)): update_slice = past_key_values[i][:, :, :, -1:] # Write the last slice to the first open location in the padded past_key_values array # and then truncate the last slice off the array new_past[i] = dynamic_update_slice( past_key_values[i][:, :, :, :-1], update_slice, slice_start_base * new_past_index ) return new_past
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
past_key_values = self._extract_past_from_model_output(model_outputs) if past_key_values is None: raise ValueError( "No known `past_key_values variable` found in model outputs (model outputs keys:" f" {list(model_outputs.keys())})" ) is_past_initialized = model_kwargs.pop("past_key_values", None) is not None
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
if not is_past_initialized: # The padded version of `past_key_values` has a length of `max_length - 1`, as `past_key_values` holds information relative to # previous autoregressive generation steps (step 0 has no past_key_values, step 1 has 1 past_key_values value, ..., the last step # has `max_length - 1` past_key_values values). num_padding_values = max_length - cur_len - 1 mask = _initialize_attention(model_kwargs, num_padding_values, is_encoder_decoder) new_past = _initialize_past(past_key_values, num_padding_values, batch_axis) else: # The new index of past_key_values to be filled corresponds to the current length of the sequence, with two # subtractions: -1 because past_key_values holds information regarding previous generation steps (read comment above) # and -1 again because in an array the index is the length of the array minus 1. new_past_index = cur_len - 2
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
mask = _update_attention(model_kwargs, new_past_index, is_encoder_decoder) new_past = _update_past(past_key_values, new_past_index, batch_axis)
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
# sets the updated variables (mask and past_key_values) model_kwargs.update(mask) model_kwargs["past_key_values"] = tuple(new_past) return model_kwargs def _get_logits_warper( self, generation_config: GenerationConfig, ) -> TFLogitsProcessorList: """ This class returns a [`TFLogitsProcessorList`] list object that contains all relevant [`TFLogitsWarper`] instances used for multinomial sampling. """ # instantiate warpers list warpers = TFLogitsProcessorList()
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
# In beam methods, we need to keep at least one non-eos token to explore continuations that might have a # better score (i.e. keep len(generation_config.eos_token_id) + 1) if generation_config.num_beams > 1: if isinstance(generation_config.eos_token_id, list): min_tokens_to_keep = len(generation_config.eos_token_id) + 1 else: min_tokens_to_keep = 2 else: min_tokens_to_keep = 1
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
if generation_config.temperature is not None and generation_config.temperature != 1.0: warpers.append(TFTemperatureLogitsWarper(generation_config.temperature)) if generation_config.top_k is not None and generation_config.top_k != 0: warpers.append(TFTopKLogitsWarper(top_k=generation_config.top_k, min_tokens_to_keep=min_tokens_to_keep)) if generation_config.top_p is not None and generation_config.top_p < 1.0: warpers.append(TFTopPLogitsWarper(top_p=generation_config.top_p, min_tokens_to_keep=min_tokens_to_keep)) return warpers
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
def _get_logits_processor( self, generation_config: GenerationConfig, input_ids_seq_length: int, logits_processor: Optional[TFLogitsProcessorList], ) -> TFLogitsProcessorList: """ This class returns a [`TFLogitsProcessorList`] list object that contains all relevant [`TFLogitsProcessor`] instances used to modify the scores of the language model head. """ processors = TFLogitsProcessorList()
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
# instantiate processors list if generation_config.repetition_penalty is not None and generation_config.repetition_penalty != 1.0: processors.append(TFRepetitionPenaltyLogitsProcessor(penalty=generation_config.repetition_penalty)) if generation_config.no_repeat_ngram_size is not None and generation_config.no_repeat_ngram_size > 0: processors.append(TFNoRepeatNGramLogitsProcessor(generation_config.no_repeat_ngram_size)) if generation_config.bad_words_ids is not None: processors.append( TFNoBadWordsLogitsProcessor(generation_config.bad_words_ids, generation_config.eos_token_id) ) if ( generation_config.min_length is not None and generation_config.eos_token_id is not None and generation_config.min_length > 0 ): processors.append(TFMinLengthLogitsProcessor(generation_config.min_length, generation_config.eos_token_id))
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
if generation_config.forced_bos_token_id is not None: processors.append(TFForcedBOSTokenLogitsProcessor(generation_config.forced_bos_token_id)) if generation_config.forced_eos_token_id is not None: processors.append( TFForcedEOSTokenLogitsProcessor(generation_config.max_length, generation_config.forced_eos_token_id) ) if generation_config.suppress_tokens is not None: processors.append(TFSuppressTokensLogitsProcessor(generation_config.suppress_tokens)) if generation_config.begin_suppress_tokens is not None: begin_index = input_ids_seq_length begin_index = ( begin_index if (input_ids_seq_length > 1 or generation_config.forced_bos_token_id is None) else begin_index + 1 ) if generation_config.forced_decoder_ids is not None: begin_index += generation_config.forced_decoder_ids[-1][ 0
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
] # generation starts after the last token that is forced processors.append( TFSuppressTokensAtBeginLogitsProcessor(generation_config.begin_suppress_tokens, begin_index) ) if generation_config.forced_decoder_ids is not None: processors.append(TFForceTokensLogitsProcessor(generation_config.forced_decoder_ids))
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
processors = self._merge_criteria_processor_list(processors, logits_processor) return processors
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
def _merge_criteria_processor_list( self, default_list: TFLogitsProcessorList, custom_list: TFLogitsProcessorList, ) -> TFLogitsProcessorList: if len(custom_list) == 0: return default_list for default in default_list: for custom in custom_list: if type(custom) is type(default): object_type = "logits processor" raise ValueError( f"A custom {object_type} of type {type(custom)} with values {custom} has been passed to" f" `generate`, but it has already been created with the values {default}. {default} has been" " created by passing the corresponding arguments to generate or by the model's config default" f" values. If you just want to change the default values of {object_type} consider passing"
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
f" them as arguments to `generate` instead of using a custom {object_type}." ) default_list.extend(custom_list) return default_list
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
def greedy_search( self, input_ids: tf.Tensor, max_length: Optional[int] = None, pad_token_id: Optional[int] = None, eos_token_id: Optional[int] = None, logits_processor: Optional[TFLogitsProcessorList] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_scores: Optional[bool] = None, return_dict_in_generate: Optional[bool] = None, **model_kwargs, ) -> Union[TFGreedySearchOutput, tf.Tensor]: r""" Generates sequences for models with a language modeling head using greedy decoding.
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
Parameters: input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`): The sequence used as a prompt for the generation. logits_processor (`TFLogitsProcessorList`, *optional*): An instance of [`TFLogitsProcessorList`]. List of instances of class derived from [`TFLogitsProcessor`] used to modify the prediction scores of the language modeling head applied at each generation step. max_length (`int`, *optional*, defaults to 20): The maximum length of the sequence to be generated. pad_token_id (`int`, *optional*): The id of the *padding* token. eos_token_id (`Union[int, List[int]]`, *optional*): The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens. output_attentions (`bool`, *optional*, defaults to `False`):
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more details. output_hidden_states (`bool`, *optional*, defaults to `False`): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more details. output_scores (`bool`, *optional*, defaults to `False`): Whether or not to return the prediction scores. See `scores` under returned tensors for more details. return_dict_in_generate (`bool`, *optional*, defaults to `False`): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. model_kwargs: Additional model specific keyword arguments will be forwarded to the `call` function of the model. If model is an encoder-decoder model the kwargs should include `encoder_outputs`.
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
Return: [`~generation.TFGreedySearchDecoderOnlyOutput`], [`~generation.TFGreedySearchEncoderDecoderOutput`] or `tf.Tensor`: A `tf.Tensor` containing the generated tokens (default behaviour) or a [`~generation.TFGreedySearchDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and `return_dict_in_generate=True` or a [`~generation.TFGreedySearchEncoderDecoderOutput`] if `model.config.is_encoder_decoder=True`. Examples: ```python >>> from transformers import ( ... AutoTokenizer, ... TFAutoModelForCausalLM, ... TFLogitsProcessorList, ... TFMinLengthLogitsProcessor, ... ) >>> tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2") >>> model = TFAutoModelForCausalLM.from_pretrained("openai-community/gpt2")
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
>>> # set pad_token_id to eos_token_id because GPT2 does not have a PAD token >>> model.generation_config.pad_token_id = model.generation_config.eos_token_id >>> input_prompt = "Today is a beautiful day, and" >>> input_ids = tokenizer(input_prompt, return_tensors="tf").input_ids >>> # instantiate logits processors >>> logits_processor = TFLogitsProcessorList( ... [ ... TFMinLengthLogitsProcessor(15, eos_token_id=model.generation_config.eos_token_id), ... ] ... ) >>> outputs = model.greedy_search(input_ids, logits_processor=logits_processor) >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) ["Today is a beautiful day, and I'm so happy to be here. I'm so happy to"] ```""" # 1. init greedy_search values logits_processor = logits_processor if logits_processor is not None else TFLogitsProcessorList()
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
max_length = max_length if max_length is not None else self.generation_config.max_length pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id if isinstance(eos_token_id, int): eos_token_id = [eos_token_id] output_scores = output_scores if output_scores is not None else self.generation_config.output_scores output_attentions = ( output_attentions if output_attentions is not None else self.generation_config.output_attentions ) output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.generation_config.output_hidden_states ) return_dict_in_generate = ( return_dict_in_generate if return_dict_in_generate is not None else self.generation_config.return_dict_in_generate )
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
use_cache = model_kwargs.pop("use_cache", self.generation_config.use_cache) use_xla = not tf.executing_eagerly() # TODO (Joao): fix cache format or find programatic way to detect cache index # GPT2 and other models has a slightly different cache structure, with a different batch axis model_name = str(self.decoder) if "EncoderDecoder" in str(self) else str(self) cache_batch_axis = 1 if any(model_prefix in model_name for model_prefix in ("TFGPT2", "TFCTRL")) else 0 # some models, like XLNet, need more than the last token in the presence of past_key_values needs_full_input = "use_mems" in set(inspect.signature(self.prepare_inputs_for_generation).parameters.keys())
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
# 2. init `attentions`, `hidden_states`, and `scores` tuples scores = [] if (return_dict_in_generate and output_scores) else None decoder_attentions = [] if (return_dict_in_generate and output_attentions) else None cross_attentions = [] if (return_dict_in_generate and output_attentions) else None decoder_hidden_states = [] if (return_dict_in_generate and output_hidden_states) else None # 3. init tensors to use for "xla-compileable" generate function batch_size, cur_len = shape_list(input_ids) # initialize `generated` (`input_ids` padded with `pad_token_id`), `finished_sequences` input_ids_padding = tf.ones((batch_size, max_length - cur_len), dtype=tf.int32) * (pad_token_id or 0) generated = tf.concat([input_ids, input_ids_padding], axis=-1) finished_sequences = tf.zeros((batch_size,), dtype=tf.bool)
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
# 4. define "xla-compile-able" stop-condition and auto-regressive function # define condition fn def greedy_search_cond_fn(generated, finished_sequences, cur_len, model_kwargs): """state termination condition fn.""" return ~tf.reduce_all(finished_sequences)
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
# define condition fn def greedy_search_body_fn(generated, finished_sequences, cur_len, model_kwargs): """state update fn.""" if model_kwargs.get("past_key_values") is None or needs_full_input: input_ids = generated[:, :cur_len] else: input_ids = tf.expand_dims(generated[:, cur_len - 1], -1) model_inputs = self.prepare_inputs_for_generation(input_ids, use_cache=use_cache, **model_kwargs) # forward pass to get next token logits model_outputs = self( **model_inputs, return_dict=True, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) next_token_logits = model_outputs.logits[:, -1] # pre-process distribution next_tokens_scores = logits_processor(generated, next_token_logits, cur_len)
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
# Store scores, attentions and hidden_states when required if not use_xla and return_dict_in_generate: if output_scores: scores.append(next_tokens_scores) if output_attentions and self.config.is_encoder_decoder: decoder_attentions.append(model_outputs.decoder_attentions) elif output_attentions and not self.config.is_encoder_decoder: decoder_attentions.append(model_outputs.attentions) if self.config.is_encoder_decoder: cross_attentions.append(model_outputs.cross_attentions) if output_hidden_states and self.config.is_encoder_decoder: decoder_hidden_states.append(model_outputs.decoder_hidden_states) elif output_hidden_states and self.config.is_encoder_decoder: decoder_hidden_states.append(model_outputs.hidden_states)
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
# argmax next_tokens = tf.argmax(next_tokens_scores, axis=-1, output_type=tf.int32) if eos_token_id is not None: if pad_token_id is None: raise ValueError("If `eos_token_id` is defined, make sure that `pad_token_id` is defined.") unfinished_seq = 1 - tf.cast(finished_sequences, tf.int32) next_tokens = next_tokens * unfinished_seq + pad_token_id * (1 - unfinished_seq) next_token_is_eos = tf.math.reduce_any( tf.equal( tf.broadcast_to(next_tokens, (len(eos_token_id), batch_size)), tf.expand_dims(eos_token_id, -1) ), axis=0, ) finished_sequences = finished_sequences | next_token_is_eos
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
# update `generated` and `cur_len` update_indices = tf.stack([tf.range(batch_size), tf.broadcast_to(cur_len, [batch_size])], axis=-1) generated = tf.tensor_scatter_nd_update(tensor=generated, indices=update_indices, updates=next_tokens) cur_len += 1
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
# update model_kwargs if use_xla: model_kwargs = self._update_model_kwargs_for_xla_generation( model_outputs=model_outputs, model_kwargs=model_kwargs, cur_len=cur_len, max_length=max_length, batch_size=batch_size, is_encoder_decoder=self.config.is_encoder_decoder, batch_axis=cache_batch_axis, ) else: model_kwargs = self._update_model_kwargs_for_generation( model_outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder ) # if we don't cache past_key_values key values we need the whole input if model_kwargs.get("past_key_values", None) is None: # let's throw out `past_key_values` since we don't want `None` tensors model_kwargs.pop("past_key_values", None)
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
return generated, finished_sequences, cur_len, model_kwargs # 5. run generation # 1st generation step has to be run before to initialize `past_key_values` generated, finished_sequences, cur_len, model_kwargs = greedy_search_body_fn( generated, finished_sequences, cur_len, model_kwargs ) # 2-to-n generation steps can then be run in autoregressive fashion # only in case 1st generation step does NOT yield EOS token though maximum_iterations = max_length - cur_len generated, _, cur_len, _ = tf.while_loop( greedy_search_cond_fn, greedy_search_body_fn, (generated, finished_sequences, cur_len, model_kwargs), maximum_iterations=maximum_iterations, ) # 6. prepare outputs if not use_xla: # cut for backward compatibility generated = generated[:, :cur_len]
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
if return_dict_in_generate: if self.config.is_encoder_decoder: # if model is an encoder-decoder, retrieve encoder attention weights # and hidden states encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None encoder_hidden_states = ( model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None ) scores = tuple(scores) if scores is not None else None decoder_attentions = tuple(decoder_attentions) if decoder_attentions is not None else None cross_attentions = tuple(cross_attentions) if cross_attentions is not None else None decoder_hidden_states = tuple(decoder_hidden_states) if decoder_hidden_states is not None else None
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
return TFGreedySearchEncoderDecoderOutput( sequences=generated, scores=scores, encoder_attentions=encoder_attentions, encoder_hidden_states=encoder_hidden_states, decoder_attentions=decoder_attentions, cross_attentions=cross_attentions, decoder_hidden_states=decoder_hidden_states, ) else: return TFGreedySearchDecoderOnlyOutput( sequences=generated, scores=scores, attentions=decoder_attentions, hidden_states=decoder_hidden_states, ) else: return generated
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
def sample( self, input_ids: tf.Tensor, logits_processor: Optional[TFLogitsProcessorList] = None, logits_warper: Optional[TFLogitsProcessorList] = None, max_length: Optional[int] = None, pad_token_id: Optional[int] = None, eos_token_id: Optional[int] = None, seed: Optional[Tuple[int, int]] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_scores: Optional[bool] = None, return_dict_in_generate: Optional[bool] = None, **model_kwargs, ) -> Union[TFSampleOutput, tf.Tensor]: r""" Generates sequences for models with a language modeling head using multinomial sampling.
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
Parameters: input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`): The sequence used as a prompt for the generation. logits_processor (`TFLogitsProcessorList`, *optional*): An instance of [`TFLogitsProcessorList`]. List of instances of class derived from [`TFLogitsProcessor`] used to modify the prediction scores of the language modeling head applied at each generation step. logits_warper (`TFLogitsProcessorList`, *optional*): An instance of [`TFLogitsProcessorList`]. List of instances of class derived from [`TFLogitsWarper`] used to warp the prediction score distribution of the language modeling head applied before multinomial sampling at each generation step. max_length (`int`, *optional*, defaults to 20): The maximum length of the sequence to be generated. pad_token_id (`int`, *optional*):
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
The id of the *padding* token. eos_token_id (`Union[int, List[int]]`, *optional*): The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens. seed (`List[int]`, *optional*): Random seed to control sampling, containing two integers, used when `do_sample` is `True`. See the `seed` argument from stateless functions in `tf.random`. output_attentions (`bool`, *optional*, defaults to `False`): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more details. output_hidden_states (`bool`, *optional*, defaults to `False`): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more details. output_scores (`bool`, *optional*, defaults to `False`):
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
Whether or not to return the prediction scores. See `scores` under returned tensors for more details. return_dict_in_generate (`bool`, *optional*, defaults to `False`): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. model_kwargs: Additional model specific kwargs will be forwarded to the `call` function of the model. If model is an encoder-decoder model the kwargs should include `encoder_outputs`.
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
Return: [`~generation.TFSampleDecoderOnlyOutput`], [`~generation.TFSampleEncoderDecoderOutput`] or `tf.Tensor`: A `tf.Tensor` containing the generated tokens (default behaviour) or a [`~generation.TFSampleDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and `return_dict_in_generate=True` or a [`~generation.TFSampleEncoderDecoderOutput`] if `model.config.is_encoder_decoder=True`. Examples: ```python >>> import tensorflow as tf >>> from transformers import ( ... AutoTokenizer, ... TFAutoModelForCausalLM, ... TFLogitsProcessorList, ... TFMinLengthLogitsProcessor, ... TFTopKLogitsWarper, ... TFTemperatureLogitsWarper, ... ) >>> tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2") >>> model = TFAutoModelForCausalLM.from_pretrained("openai-community/gpt2")
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
>>> # set pad_token_id to eos_token_id because GPT2 does not have a EOS token >>> model.generation_config.pad_token_id = model.generation_config.eos_token_id >>> input_prompt = "Today is a beautiful day, and" >>> input_ids = tokenizer(input_prompt, return_tensors="tf").input_ids >>> # instantiate logits processors >>> logits_processor = TFLogitsProcessorList( ... [ ... TFMinLengthLogitsProcessor(15, eos_token_id=model.generation_config.eos_token_id), ... ] ... ) >>> # instantiate logits processors >>> logits_warper = TFLogitsProcessorList( ... [ ... TFTopKLogitsWarper(50), ... TFTemperatureLogitsWarper(0.7), ... ] ... ) >>> tf.random.set_seed(0) >>> outputs = model.sample(input_ids, logits_processor=logits_processor, logits_warper=logits_warper)
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
>>> tokenizer.batch_decode(outputs, skip_special_tokens=True) ['Today is a beautiful day, and I love my country. But when I look at Donald Trump,'] ```""" # 1. init greedy_search values logits_processor = logits_processor if logits_processor is not None else TFLogitsProcessorList() logits_warper = logits_warper if logits_warper is not None else TFLogitsProcessorList()
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
max_length = max_length if max_length is not None else self.generation_config.max_length pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id if isinstance(eos_token_id, int): eos_token_id = [eos_token_id] output_scores = output_scores if output_scores is not None else self.generation_config.output_scores output_attentions = ( output_attentions if output_attentions is not None else self.generation_config.output_attentions ) output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.generation_config.output_hidden_states ) return_dict_in_generate = ( return_dict_in_generate if return_dict_in_generate is not None else self.generation_config.return_dict_in_generate )
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
use_cache = model_kwargs.pop("use_cache", self.generation_config.use_cache) use_xla = not tf.executing_eagerly() # TODO (Joao): fix cache format or find programatic way to detect cache index # GPT2 and other models has a slightly different cache structure, with a different batch axis model_name = str(self.decoder) if "EncoderDecoder" in str(self) else str(self) cache_batch_axis = 1 if any(model_prefix in model_name for model_prefix in ("TFGPT2", "TFCTRL")) else 0 # some models, like XLNet, need more than the last token in the presence of past_key_values needs_full_input = "use_mems" in set(inspect.signature(self.prepare_inputs_for_generation).parameters.keys())
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
# 2. init `attentions`, `hidden_states`, and `scores` tuples scores = [] if (return_dict_in_generate and output_scores) else None decoder_attentions = [] if (return_dict_in_generate and output_attentions) else None cross_attentions = [] if (return_dict_in_generate and output_attentions) else None decoder_hidden_states = [] if (return_dict_in_generate and output_hidden_states) else None # 3. init tensors to use for "xla-compileable" generate function batch_size, cur_len = shape_list(input_ids) # initialize `generated` (pre-populated with `pad_token_id`), `finished_sequences` input_ids_padding = tf.ones((batch_size, max_length - cur_len), dtype=tf.int32) * (pad_token_id or 0) generated = tf.concat([input_ids, input_ids_padding], axis=-1) finished_sequences = tf.zeros((batch_size,), dtype=tf.bool)
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
# 4. define "xla-compile-able" stop-condition and auto-regressive function def sample_cond_fn(generated, finished_sequences, cur_len, model_kwargs): return ~tf.reduce_all(finished_sequences) def sample_body_fn(generated, finished_sequences, cur_len, model_kwargs): if model_kwargs.get("past_key_values") is None or needs_full_input: input_ids = generated[:, :cur_len] else: input_ids = tf.expand_dims(generated[:, cur_len - 1], -1) model_inputs = self.prepare_inputs_for_generation(input_ids, use_cache=use_cache, **model_kwargs) # forward pass to get next token logits model_outputs = self( **model_inputs, return_dict=True, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) next_token_logits = model_outputs.logits[:, -1]
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
# pre-process distribution next_tokens_scores = logits_processor(generated, next_token_logits, cur_len) next_tokens_scores = logits_warper(generated, next_tokens_scores, cur_len) # Store scores, attentions and hidden_states when required if not use_xla and return_dict_in_generate: if output_scores: scores.append(next_tokens_scores) if output_attentions and self.config.is_encoder_decoder: decoder_attentions.append(model_outputs.decoder_attentions) elif output_attentions and not self.config.is_encoder_decoder: decoder_attentions.append(model_outputs.attentions) if self.config.is_encoder_decoder: cross_attentions.append(model_outputs.cross_attentions)
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
if output_hidden_states and self.config.is_encoder_decoder: decoder_hidden_states.append(model_outputs.decoder_hidden_states) elif output_hidden_states and self.config.is_encoder_decoder: decoder_hidden_states.append(model_outputs.hidden_states) # sample if seed is not None: sample_seed = seed else: sample_seed = tf.experimental.numpy.random.randint(tf.int32.min, tf.int32.max, (2,), dtype=tf.int32) next_tokens = tf.squeeze( tf.random.stateless_categorical( logits=next_tokens_scores, num_samples=1, seed=sample_seed, dtype=tf.int32 ), axis=1, )
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
if eos_token_id is not None: if pad_token_id is None: raise ValueError("If `eos_token_id` is defined, make sure that `pad_token_id` is defined.") unfinished_seq = 1 - tf.cast(finished_sequences, tf.int32) next_tokens = next_tokens * unfinished_seq + pad_token_id * (1 - unfinished_seq) next_token_is_eos = tf.math.reduce_any( tf.equal( tf.broadcast_to(next_tokens, (len(eos_token_id), batch_size)), tf.expand_dims(eos_token_id, -1) ), axis=0, ) finished_sequences = finished_sequences | next_token_is_eos # update `generated` and `cur_len` update_indices = tf.stack([tf.range(batch_size), tf.broadcast_to(cur_len, [batch_size])], axis=-1) generated = tf.tensor_scatter_nd_update(tensor=generated, indices=update_indices, updates=next_tokens) cur_len += 1
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
# update model_kwargs if use_xla: model_kwargs = self._update_model_kwargs_for_xla_generation( model_outputs=model_outputs, model_kwargs=model_kwargs, cur_len=cur_len, max_length=max_length, batch_size=batch_size, is_encoder_decoder=self.config.is_encoder_decoder, batch_axis=cache_batch_axis, ) else: model_kwargs = self._update_model_kwargs_for_generation( model_outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder ) # if we don't cache past_key_values key values we need the whole input if model_kwargs.get("past_key_values", None) is None: # let's throw out `past_key_values` since we don't want `None` tensors model_kwargs.pop("past_key_values", None)
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
return generated, finished_sequences, cur_len, model_kwargs # 5. run generation # 1st generation step has to be run before to initialize `past_key_values` generated, finished_sequences, cur_len, model_kwargs = sample_body_fn( generated, finished_sequences, cur_len, model_kwargs ) # 2-to-n generation steps can then be run in autoregressive fashion # only in case 1st generation step does NOT yield EOS token though maximum_iterations = max_length - cur_len generated, _, cur_len, _ = tf.while_loop( sample_cond_fn, sample_body_fn, (generated, finished_sequences, cur_len, model_kwargs), maximum_iterations=maximum_iterations, ) # 6. prepare outputs if not use_xla: # cut for backward compatibility generated = generated[:, :cur_len]
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
if return_dict_in_generate: if self.config.is_encoder_decoder: # if model is an encoder-decoder, retrieve encoder attention weights # and hidden states encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None encoder_hidden_states = ( model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None ) scores = tuple(scores) if scores is not None else None decoder_attentions = tuple(decoder_attentions) if decoder_attentions is not None else None cross_attentions = tuple(cross_attentions) if cross_attentions is not None else None decoder_hidden_states = tuple(decoder_hidden_states) if decoder_hidden_states is not None else None
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
return TFSampleEncoderDecoderOutput( sequences=generated, scores=scores, encoder_attentions=encoder_attentions, encoder_hidden_states=encoder_hidden_states, decoder_attentions=decoder_attentions, cross_attentions=cross_attentions, decoder_hidden_states=decoder_hidden_states, ) else: return TFSampleDecoderOnlyOutput( sequences=generated, scores=scores, attentions=decoder_attentions, hidden_states=decoder_hidden_states, ) else: return generated @staticmethod def _gather_beams(nested, beam_indices, batch_axis=0): """Gathers the beam slices indexed by beam_indices into new beam array."""
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
def gather_fn(tensor): if batch_axis > 0: # pushes all dimentions before the batch to the end, so we get (batch, beam_id, ...) perm = tf.concat((tf.range(tf.rank(tensor))[batch_axis:], tf.range(batch_axis)), axis=0) tensor = tf.transpose(tensor, perm=perm) gathered_tensor = tf.gather(params=tensor, indices=beam_indices, axis=1, batch_dims=1) if batch_axis > 0: # transposes back to the original dimensions perm = tf.concat((tf.range(tf.rank(tensor))[batch_axis:], tf.range(batch_axis)), axis=0) perm = tf.math.invert_permutation(perm) gathered_tensor = tf.transpose(gathered_tensor, perm=perm) return gathered_tensor return tf.nest.map_structure(gather_fn, nested)
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
def beam_search( self, input_ids: tf.Tensor, do_sample: bool = False, max_length: Optional[int] = None, pad_token_id: Optional[int] = None, eos_token_id: Optional[int] = None, length_penalty: Optional[float] = None, early_stopping: Optional[Union[bool, str]] = None, logits_processor: Optional[TFLogitsProcessorList] = None, logits_warper: Optional[TFLogitsProcessorList] = None, num_return_sequences: Optional[int] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_scores: Optional[bool] = None, return_dict_in_generate: Optional[bool] = None, **model_kwargs, ) -> Union[TFBeamSearchOutput, TFBeamSampleOutput, tf.Tensor]: r""" Generates sequences for models with a language modeling head using beam search. If `do_sample` is `False`, uses
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
a greedy approach, otherwise does multinomial sampling without replacement.
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
Parameters: input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`): The sequence used as a prompt for the generation. do_sample (`bool`, *optional*, defaults to `False`): Whether or not to use sampling ; use greedy decoding otherwise. max_length (`int`, *optional*, defaults to 20): The maximum length of the sequence to be generated. pad_token_id (`int`, *optional*): The id of the *padding* token. eos_token_id (`Union[int, List[int]]`, *optional*): The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens. length_penalty (`float`, *optional*, defaults to 1.0): Exponential penalty to the length that is used with beam-based generation. It is applied as an exponent to the sequence length, which in turn is used to divide the score of the sequence. Since the score is
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
the log likelihood of the sequence (i.e. negative), `length_penalty` > 0.0 promotes longer sequences, while `length_penalty` < 0.0 encourages shorter sequences. early_stopping (`bool` or `str`, *optional*, defaults to `False`): Controls the stopping condition for beam-based methods, like beam-search. It accepts the following values: `True`, where the generation stops as soon as there are `num_beams` complete candidates; `False`, where an heuristic is applied and the generation stops when is it very unlikely to find better candidates; `"never"`, where the beam search procedure only stops when there cannot be better candidates (canonical beam search algorithm). logits_processor (`[TFLogitsProcessorList]`, *optional*): An instance of [`TFLogitsProcessorList`]. List of instances of class derived from [`TFLogitsProcessor`]
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
used to modify the prediction scores of the language modeling head applied at each generation step. logits_warper (`TFLogitsProcessorList`, *optional*): An instance of [`TFLogitsProcessorList`]. List of instances of class derived from [`TFLogitsWarper`] used to warp the prediction score distribution of the language modeling head applied before multinomial sampling at each generation step. num_return_sequences(`int`, *optional*, defaults to 1): The number of independently computed returned sequences for each element in the batch. output_attentions (`bool`, *optional*, defaults to `False`): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more details. output_hidden_states (`bool`, *optional*, defaults to `False`):
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more details. return_dict_in_generate (`bool`, *optional*, defaults to `False`): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. model_kwargs: Additional model specific kwargs will be forwarded to the `call` function of the model. If model is an encoder-decoder model the kwargs should include `encoder_outputs`.
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
Return: [`~generation.TFBeamSearchDecoderOnlyOutput`], [`~generation.TFBeamSearchEncoderDecoderOutput`] or `tf.Tensor`: A `tf.Tensor` containing the generated tokens (default behaviour) or a [`~generation.TFBeamSearchDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and `return_dict_in_generate=True` or a [`~generation.TFBeamSearchEncoderDecoderOutput`] if `model.config.is_encoder_decoder=True`. Examples: ```python >>> from transformers import ( ... AutoTokenizer, ... TFAutoModelForSeq2SeqLM, ... TFLogitsProcessorList, ... TFMinLengthLogitsProcessor, ... ) >>> import tensorflow as tf >>> tokenizer = AutoTokenizer.from_pretrained("google-t5/t5-base") >>> model = TFAutoModelForSeq2SeqLM.from_pretrained("google-t5/t5-base")
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
>>> encoder_input_str = "translate English to German: How old are you?" >>> encoder_input_ids = tokenizer(encoder_input_str, return_tensors="tf").input_ids >>> # lets run beam search using 3 beams >>> num_beams = 3 >>> # define decoder start token ids >>> input_ids = tf.ones((1, num_beams, 1), dtype=tf.int32) >>> input_ids = input_ids * model.generation_config.decoder_start_token_id >>> # add encoder_outputs to model keyword arguments >>> encoder_outputs = model.get_encoder()(encoder_input_ids, return_dict=True) >>> encoder_outputs.last_hidden_state = tf.repeat( ... tf.expand_dims(encoder_outputs.last_hidden_state, axis=0), num_beams, axis=1 ... ) >>> model_kwargs = {"encoder_outputs": encoder_outputs}
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
>>> # instantiate logits processors >>> logits_processor = TFLogitsProcessorList( ... [TFMinLengthLogitsProcessor(5, eos_token_id=model.generation_config.eos_token_id)] ... ) >>> outputs = model.beam_search(input_ids, logits_processor=logits_processor, **model_kwargs) >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) ['Wie alt bist du?'] ```""" def flatten_beam_dim(tensor, batch_axis=0): """Flattens the first two dimensions of a non-scalar array.""" shape = shape_list(tensor) return tf.reshape( tensor, shape[:batch_axis] + [shape[batch_axis] * shape[batch_axis + 1]] + shape[batch_axis + 2 :], )
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
def unflatten_beam_dim(tensor, num_beams, batch_axis=0): """Unflattens the first, flat batch*beam dimension of a non-scalar array.""" shape = shape_list(tensor) return tf.reshape(tensor, shape[:batch_axis] + [-1, num_beams] + shape[batch_axis + 1 :]) # 1. init beam_search values logits_processor = logits_processor if logits_processor is not None else TFLogitsProcessorList() logits_warper = logits_warper if logits_warper is not None else TFLogitsProcessorList()
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
max_length = max_length if max_length is not None else self.generation_config.max_length pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id if isinstance(eos_token_id, int): eos_token_id = [eos_token_id] num_return_sequences = ( num_return_sequences if num_return_sequences is not None else self.generation_config.num_return_sequences )
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
output_attentions = ( output_attentions if output_attentions is not None else self.generation_config.output_attentions ) output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.generation_config.output_hidden_states ) output_scores = output_scores if output_scores is not None else self.generation_config.output_scores return_dict_in_generate = ( return_dict_in_generate if return_dict_in_generate is not None else self.generation_config.return_dict_in_generate ) length_penalty = length_penalty if length_penalty is not None else self.generation_config.length_penalty early_stopping = early_stopping if early_stopping is not None else self.generation_config.early_stopping
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
use_cache = model_kwargs.pop("use_cache", self.generation_config.use_cache) use_xla = not tf.executing_eagerly() # TODO (Joao): fix cache format or find programatic way to detect cache index # GPT2 and other models has a slightly different cache structure, with a different batch axis model_name = str(self.decoder) if "EncoderDecoder" in str(self) else str(self) cache_batch_axis = 1 if any(model_prefix in model_name for model_prefix in ("TFGPT2", "TFCTRL")) else 0 # some models, like XLNet, need more than the last token in the presence of past_key_values needs_full_input = "use_mems" in set(inspect.signature(self.prepare_inputs_for_generation).parameters.keys())
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
# 2. init `attentions`, `hidden_states`, and `scores` tuples all_scores = [] if (return_dict_in_generate and output_scores) else None decoder_attentions = [] if (return_dict_in_generate and output_attentions) else None cross_attentions = [] if (return_dict_in_generate and output_attentions) else None decoder_hidden_states = [] if (return_dict_in_generate and output_hidden_states) else None # 3. init tensors to use for "xla-compileable" generate function batch_size, num_beams, cur_len = shape_list(input_ids) # store the prompt length of decoder decoder_prompt_len = cur_len
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
# per batch, beam-item holding current token in loop, pre-populated with `pad_token_id` input_ids_padding = tf.ones((batch_size, num_beams, max_length - cur_len), dtype=tf.int32) * ( pad_token_id or 0 ) running_sequences = tf.concat([input_ids, input_ids_padding], axis=-1) sequences = tf.ones((batch_size, num_beams, max_length), dtype=tf.int32) * (pad_token_id or 0) # per batch,beam-item state bit indicating if sentence has finished. is_sent_finished = tf.zeros((batch_size, num_beams), dtype=tf.bool) # per batch, beam-item score, logprobs running_scores = tf.tile( tf.expand_dims(tf.convert_to_tensor([0.0] + [-1.0e9] * (num_beams - 1)), axis=0), [batch_size, 1] ) scores = tf.ones((batch_size, num_beams)) * -1.0e9
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
# per batch beam indices running_beam_indices = tf.ones((batch_size, num_beams, max_length - decoder_prompt_len), dtype=tf.int32) * -1 beam_indices = tf.ones((batch_size, num_beams, max_length - decoder_prompt_len), dtype=tf.int32) * -1 # flatten beam dim if "encoder_outputs" in model_kwargs: model_kwargs["encoder_outputs"]["last_hidden_state"] = flatten_beam_dim( model_kwargs["encoder_outputs"]["last_hidden_state"] ) if "attention_mask" in model_kwargs: model_kwargs["attention_mask"] = flatten_beam_dim(model_kwargs["attention_mask"])
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
# 4. define "xla-compile-able" stop-condition and auto-regressive function # define stop-condition and auto-regressive function def beam_search_cond_fn( cur_len, running_sequences, running_scores, running_beam_indices, sequences, scores, beam_indices, is_sent_finished, decoder_prompt_len, model_kwargs, ): """ Beam Search termination condition function -- halts the generation loop if any of these conditions becomes False """ # 1. is less than max length? not_max_length_yet = cur_len < max_length
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
# 2. can the new beams still improve? # early_stopping == False -> apply heuristic = always get the best score from `cur_len - decoder_prompt_len`. See the discussion # below for more details. # https://github.com/huggingface/transformers/pull/20901#issuecomment-1369845565 # early_stopping == "never" -> compute the best score from max_length or cur_len, depending on the sign of # length_penalty. Positive length_penalty favors longer sequences, thus we use max_length there. if early_stopping == "never" and length_penalty > 0.0: best_running_score = running_scores[:, :1] / ((max_length - decoder_prompt_len) ** length_penalty) else: best_running_score = running_scores[:, :1] / ( tf.cast(cur_len - decoder_prompt_len, dtype=tf.float32) ** length_penalty ) worst_finished_score = tf.where(
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
is_sent_finished, tf.math.reduce_min(scores, axis=1, keepdims=True), -1.0e9 ) improvement_still_possible = tf.math.reduce_any(best_running_score > worst_finished_score)
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
# 3. is there still a beam that has not finished? still_open_beam = ~(tf.math.reduce_all(is_sent_finished) & (early_stopping is True)) return not_max_length_yet & still_open_beam & improvement_still_possible
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
def beam_search_body_fn( cur_len, running_sequences, running_scores, running_beam_indices, sequences, scores, beam_indices, is_sent_finished, decoder_prompt_len, model_kwargs, ): """ Beam Search iterative update function -- each iteration adds a new token and updates the best sequences seen so far """ # 1. Forward current tokens if model_kwargs.get("past_key_values") is None or needs_full_input: input_ids = running_sequences[:, :, :cur_len] else: input_ids = tf.expand_dims(running_sequences[:, :, cur_len - 1], -1) model_inputs = self.prepare_inputs_for_generation( flatten_beam_dim(input_ids), use_cache=use_cache, **model_kwargs ) model_outputs = self( **model_inputs,
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py