text
stringlengths
1
1.02k
class_index
int64
0
10.8k
source
stringlengths
85
188
return_dict=True, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) logits = unflatten_beam_dim(model_outputs.logits[:, -1], num_beams)
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
# 2. Compute log probs # get log probabilities from logits, process logits with processors (*e.g.* min_length, ...), and # add new logprobs to existing running logprobs scores. log_probs = tf.nn.log_softmax(logits) log_probs = logits_processor(flatten_beam_dim(running_sequences), flatten_beam_dim(log_probs), cur_len) log_probs = unflatten_beam_dim(log_probs, num_beams) if do_sample: log_probs = logits_warper(flatten_beam_dim(running_sequences), flatten_beam_dim(log_probs), cur_len) log_probs = unflatten_beam_dim(log_probs, num_beams) log_probs_processed = log_probs log_probs = log_probs + tf.expand_dims(running_scores, axis=2) vocab_size = log_probs.shape[2] log_probs = tf.reshape(log_probs, (batch_size, num_beams * vocab_size))
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
# Store scores, attentions and hidden_states when required if not use_xla and return_dict_in_generate: if output_scores: all_scores.append( logits_warper( flatten_beam_dim(running_sequences), flatten_beam_dim(log_probs_processed), cur_len, ) ) if output_attentions and self.config.is_encoder_decoder: decoder_attentions.append(model_outputs.decoder_attentions) elif output_attentions and not self.config.is_encoder_decoder: decoder_attentions.append(model_outputs.attentions) if self.config.is_encoder_decoder: cross_attentions.append(model_outputs.cross_attentions)
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
if output_hidden_states and self.config.is_encoder_decoder: decoder_hidden_states.append(model_outputs.decoder_hidden_states) elif output_hidden_states and self.config.is_encoder_decoder: decoder_hidden_states.append(model_outputs.hidden_states)
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
# 3. Retrieve top-K # Each item in batch has num_beams * vocab_size candidate sequences. For each item, get the top 2*k # candidates with the highest log-probabilities. We gather the top 2*K beams here so that even if the # best K sequences reach EOS simultaneously, we have another K sequences remaining to continue the live # beam search. # Gather the top 2*K scores from _all_ beams. # Gather 2*k top beams. # Recover the beam index by floor division. # Recover token id by modulo division and expand Id array for broadcasting. # Update sequences for the 2*K top-k new sequences. beams_to_keep = 2 * num_beams if do_sample: topk_indices = sample_without_replacement(log_probs, beams_to_keep) topk_log_probs = tf.gather(log_probs, topk_indices, axis=1, batch_dims=1) else:
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
topk_log_probs, topk_indices = tf.math.top_k(log_probs, k=beams_to_keep) topk_current_beam_indices = topk_indices // vocab_size topk_running_beam_indices = self._gather_beams(running_beam_indices, topk_current_beam_indices) topk_running_sequences = self._gather_beams(running_sequences, topk_current_beam_indices) topk_ids = topk_indices % vocab_size
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
# writes the new token indices_batch = tf.repeat(tf.range(batch_size), [beams_to_keep]) indices_beam = tf.tile(tf.range(beams_to_keep), [batch_size]) update_indices = tf.stack( [indices_batch, indices_beam, tf.broadcast_to(cur_len, [batch_size * beams_to_keep])], axis=-1 ) topk_sequences = tf.tensor_scatter_nd_update( tensor=topk_running_sequences, indices=update_indices, updates=tf.reshape(topk_ids, [batch_size * beams_to_keep]), )
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
# we want to store the beam indices with batch information -> real beam index = beam index % num beams batch_modified_indices = topk_current_beam_indices + tf.broadcast_to( tf.expand_dims(tf.range(batch_size) * num_beams, axis=1), topk_current_beam_indices.shape ) update_indices = tf.stack( [ indices_batch, indices_beam, tf.broadcast_to(cur_len - decoder_prompt_len, [batch_size * beams_to_keep]), ], axis=-1, ) topk_beam_indices = tf.tensor_scatter_nd_update( tensor=topk_running_beam_indices, indices=update_indices, updates=tf.reshape(batch_modified_indices, [batch_size * beams_to_keep]), )
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
# 4. Check which sequences have ended # Update current sequences: Did the top `num_beams` sequences reach an end marker? # To prevent these just finished sequences from being added to the current sequences # set of active beam search sequences, set their log probs to a very large negative value. if eos_token_id is None: eos_in_next_token = tf.zeros(topk_sequences[:, :, cur_len].shape, dtype=tf.bool) else: eos_in_next_token = tf.math.reduce_any( tf.equal( tf.broadcast_to( topk_sequences[:, :, cur_len], [len(eos_token_id)] + topk_sequences[:, :, cur_len].shape, ), tf.expand_dims(tf.expand_dims(eos_token_id, -1), -1), ), axis=0, ) did_topk_just_finished = eos_in_next_token & tf.broadcast_to(
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
tf.concat((tf.ones((num_beams), dtype=tf.bool), tf.zeros((num_beams), dtype=tf.bool)), axis=0), shape_list(eos_in_next_token), )
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
# non-top `num_beams` eos tokens can't be used to finish a beam, but the others can't be used in the next # running sentences either running_topk_log_probs = topk_log_probs + tf.cast(eos_in_next_token, tf.float32) * -1.0e9 # 5. Get running sequences scores for next # Determine the top k beam indices (from top 2*k beams) from log probs and gather top k beams # (from top 2*k beams). next_topk_indices = tf.math.top_k(running_topk_log_probs, k=num_beams)[1] next_running_sequences, next_running_scores, next_running_beam_indices = self._gather_beams( [topk_sequences, running_topk_log_probs, topk_beam_indices], next_topk_indices )
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
# 6. Process topk logits # Further process log probs: # - add length penalty # - make sure no scores can be added anymore if beam is full # - make sure still running sequences cannot be chosen as finalized beam topk_log_probs = topk_log_probs / ( tf.cast(cur_len + 1 - decoder_prompt_len, dtype=tf.float32) ** length_penalty ) beams_in_batch_are_full = tf.broadcast_to( tf.math.reduce_all(is_sent_finished, axis=-1, keepdims=True), shape_list(did_topk_just_finished) ) & (early_stopping is True) add_penalty = ~did_topk_just_finished | beams_in_batch_are_full topk_log_probs += tf.cast(add_penalty, tf.float32) * -1.0e9
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
# 7. Get scores, sequences, is sentence finished for next. # Combine sequences, scores, and flags along the beam dimension and compare new finished sequence scores # to existing finished scores and select the best from the new set of beams merged_sequences = tf.concat([sequences, topk_sequences], axis=1) merged_scores = tf.concat([scores, topk_log_probs], axis=1) merged_beams = tf.concat([beam_indices, topk_beam_indices], axis=1) merged_is_sent_finished = tf.concat([is_sent_finished, did_topk_just_finished], axis=1) topk_merged_indices = tf.math.top_k(merged_scores, k=num_beams)[1] next_sequences, next_scores, next_beam_indices, next_is_sent_finished = self._gather_beams( [merged_sequences, merged_scores, merged_beams, merged_is_sent_finished], topk_merged_indices )
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
# 8. Prepare data for the next iteration # Determine the top k beam indices from the original set of all beams. With these, gather the top k # beam-associated caches. cur_len = cur_len + 1 if "past_key_values" in model_outputs: cache = tf.nest.map_structure( lambda tensor: unflatten_beam_dim(tensor, num_beams, batch_axis=cache_batch_axis), model_outputs.past_key_values, ) next_running_indices = self._gather_beams(topk_current_beam_indices, next_topk_indices) next_cache = self._gather_beams(cache, next_running_indices, batch_axis=cache_batch_axis) model_outputs["past_key_values"] = tf.nest.map_structure( lambda tensor: flatten_beam_dim(tensor, batch_axis=cache_batch_axis), next_cache )
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
if use_xla: next_model_kwargs = self._update_model_kwargs_for_xla_generation( model_outputs=model_outputs, model_kwargs=model_kwargs, cur_len=cur_len, max_length=max_length, batch_size=(batch_size * num_beams), is_encoder_decoder=self.config.is_encoder_decoder, batch_axis=cache_batch_axis, ) else: next_model_kwargs = self._update_model_kwargs_for_generation( model_outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder ) # if we don't cache past_key_values key values we need the whole input if model_kwargs.get("past_key_values", None) is None: # let's throw out `past_key_values` since we don't want `None` tensors model_kwargs.pop("past_key_values", None)
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
return ( cur_len, next_running_sequences, next_running_scores, next_running_beam_indices, next_sequences, next_scores, next_beam_indices, next_is_sent_finished, decoder_prompt_len, next_model_kwargs, )
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
# 5. run generation # 1st generation step has to be run before to initialize `past_key_values` (if active) ( cur_len, running_sequences, running_scores, running_beam_indices, sequences, scores, beam_indices, is_sent_finished, decoder_prompt_len, model_kwargs, ) = beam_search_body_fn( cur_len, running_sequences, running_scores, running_beam_indices, sequences, scores, beam_indices, is_sent_finished, decoder_prompt_len, model_kwargs, )
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
# 2-to-n generation steps can then be run in autoregressive fashion (only in case 1st generation step does # NOT yield EOS token though) maximum_iterations = max_length - cur_len ( cur_len, running_sequences, running_scores, running_beam_indices, sequences, scores, beam_indices, is_sent_finished, decoder_prompt_len, _, ) = tf.while_loop( beam_search_cond_fn, beam_search_body_fn, ( cur_len, running_sequences, running_scores, running_beam_indices, sequences, scores, beam_indices, is_sent_finished, decoder_prompt_len, model_kwargs, ), maximum_iterations=maximum_iterations, )
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
# 6. prepare outputs # Account for the edge-case where there are no finished sequences for a particular batch item. If so, return # running sequences for that batch item. none_finished = tf.math.reduce_any(is_sent_finished, axis=1) sequences = tf.where(none_finished[:, None, None], sequences, running_sequences) beam_indices = tf.where(none_finished[:, None, None], beam_indices, running_beam_indices) # Apply the length penalty so that running scores match the finalized scores if they are used running_scores = running_scores / (tf.cast(cur_len - decoder_prompt_len, dtype=tf.float32) ** length_penalty) scores = tf.where(none_finished[:, None], scores, running_scores)
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
# Take best beams for each batch (the score is sorted in descending order) sequences = flatten_beam_dim(sequences[:, :num_return_sequences, :]) scores = flatten_beam_dim(scores[:, :num_return_sequences]) beam_indices = flatten_beam_dim(beam_indices[:, :num_return_sequences, :]) if not use_xla: # Cut for backward compatibility sequences = sequences[:, :cur_len] beam_indices = beam_indices[:, : cur_len - decoder_prompt_len] if return_dict_in_generate: if self.config.is_encoder_decoder: # if model is an encoder-decoder, retrieve encoder attention weights and hidden states encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None encoder_hidden_states = ( model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None )
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
output_cls = TFBeamSampleEncoderDecoderOutput if do_sample else TFBeamSearchEncoderDecoderOutput return output_cls( sequences=sequences, sequences_scores=scores, scores=all_scores, beam_indices=beam_indices, encoder_attentions=encoder_attentions, encoder_hidden_states=encoder_hidden_states, decoder_attentions=decoder_attentions, cross_attentions=cross_attentions, decoder_hidden_states=decoder_hidden_states, ) else: output_cls = TFBeamSampleDecoderOnlyOutput if do_sample else TFBeamSearchDecoderOnlyOutput return output_cls( sequences=sequences, sequences_scores=scores, scores=all_scores, beam_indices=beam_indices,
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
attentions=decoder_attentions, hidden_states=decoder_hidden_states, ) else: return sequences
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
def contrastive_search( self, input_ids: tf.Tensor, top_k: Optional[int] = 1, penalty_alpha: Optional[float] = 0, logits_processor: Optional[TFLogitsProcessorList] = None, logits_warper: Optional[TFLogitsProcessorList] = None, max_length: Optional[int] = None, pad_token_id: Optional[int] = None, eos_token_id: Optional[int] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_scores: Optional[bool] = None, return_dict_in_generate: Optional[bool] = None, **model_kwargs, ) -> Union[TFContrastiveSearchOutput, tf.Tensor]: r""" Generates sequences of token ids for models with a language modeling head using **contrastive search** and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models.
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
Parameters: input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`): The sequence used as a prompt for the generation. top_k (`int`, *optional*, defaults to 1): The size of the candidate set that is used to re-rank for contrastive search penalty_alpha (`float`, *optional*, defaults to 0): The degeneration penalty for contrastive search; activate when it is larger than 0 logits_processor (`TFLogitsProcessorList`, *optional*): An instance of [`TFLogitsProcessorList`]. List of instances of class derived from [`TFLogitsProcessor`] used to modify the prediction scores of the language modeling head applied at each generation step. logits_warper (`TFLogitsProcessorList`, *optional*): An instance of [`TFLogitsProcessorList`]. List of instances of class derived from [`TFLogitsWarper`]
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
used to warp the prediction score distribution of the language modeling head applied before multinomial sampling at each generation step. max_length (`int`, *optional*, defaults to 20): The maximum length of the sequence to be generated. pad_token_id (`int`, *optional*): The id of the *padding* token. eos_token_id (`Union[int, List[int]]`, *optional*): The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens. output_attentions (`bool`, *optional*, defaults to `False`): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more details. output_hidden_states (`bool`, *optional*, defaults to `False`): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
for more details. output_scores (`bool`, *optional*, defaults to `False`): Whether or not to return the prediction scores. See `scores` under returned tensors for more details. return_dict_in_generate (`bool`, *optional*, defaults to `False`): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. model_kwargs: Additional model specific keyword arguments will be forwarded to the `call` function of the model. If model is an encoder-decoder model the kwargs should include `encoder_outputs`. Return: [`~generation.TFContrastiveSearchDecoderOnlyOutput`], [`~generation.TFContrastiveSearchEncoderDecoderOutput`] or `tf.Tensor`: A `tf.Tensor` containing the generated tokens (default behaviour) or a [`~generation.TFContrastiveySearchDecoderOnlyOutput`] if
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
`model.config.is_encoder_decoder=False` and `return_dict_in_generate=True` or a [`~generation.TFContrastiveSearchEncoderDecoderOutput`] if `model.config.is_encoder_decoder=True`. Examples: ```python >>> from transformers import AutoTokenizer, TFAutoModelForCausalLM
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
>>> tokenizer = AutoTokenizer.from_pretrained("facebook/opt-125m") >>> model = TFAutoModelForCausalLM.from_pretrained("facebook/opt-125m") >>> # set pad_token_id to eos_token_id because OPT does not have a PAD token >>> model.config.pad_token_id = model.config.eos_token_id >>> input_prompt = "DeepMind Company is" >>> input_ids = tokenizer(input_prompt, return_tensors="tf") >>> outputs = model.contrastive_search(**input_ids, penalty_alpha=0.6, top_k=4, max_length=64) >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) ['DeepMind Company is a company that focuses on the development and commercialization of artificial intelligence (AI). DeepMind’s mission is to help people understand and solve problems that are difficult to solve in the world today.\n\nIn this post, we talk about the benefits of deep learning in business and how it'] ```"""
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
def gather_best_candidate(nested, selected_idx_stacked, batch_axis=0): """Gathers the slices indexed by selected_idx_stacked from a potentially nested structure of tensors.""" def gather_fn(tensor): gathered_tensor = tf.gather(params=tensor, indices=selected_idx_stacked, axis=batch_axis) return gathered_tensor return tf.nest.map_structure(gather_fn, nested)
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
# 1. init greedy_search values logits_processor = logits_processor if logits_processor is not None else TFLogitsProcessorList() logits_warper = logits_warper if logits_warper is not None else TFLogitsProcessorList() max_length = max_length if max_length is not None else self.generation_config.max_length pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id if isinstance(eos_token_id, int): eos_token_id = [eos_token_id] output_scores = output_scores if output_scores is not None else self.generation_config.output_scores output_attentions = ( output_attentions if output_attentions is not None else self.generation_config.output_attentions ) output_hidden_states = (
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
output_hidden_states if output_hidden_states is not None else self.generation_config.output_hidden_states ) return_dict_in_generate = ( return_dict_in_generate if return_dict_in_generate is not None else self.generation_config.return_dict_in_generate ) use_cache = True # In contrastive search, we always use cache model_kwargs.pop("use_cache", None)
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
use_xla = not tf.executing_eagerly() # TODO (Joao): fix cache format or find programatic way to detect cache index # GPT2 and other models has a slightly different cache structure, with a different batch axis model_name = str(self.decoder) if "EncoderDecoder" in str(self) else str(self) cache_batch_axis = 1 if any(model_prefix in model_name for model_prefix in ("TFGPT2", "TFCTRL")) else 0 # 2. init `attentions`, `hidden_states`, and `scores` tuples scores = [] if (return_dict_in_generate and output_scores) else None decoder_attentions = [] if (return_dict_in_generate and output_attentions) else None cross_attentions = [] if (return_dict_in_generate and output_attentions) else None decoder_hidden_states = [] if (return_dict_in_generate and output_hidden_states) else None # 3. init tensors to use for "xla-compileable" generate function batch_size, cur_len = shape_list(input_ids)
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
# initialize `generated` (`input_ids` padded with `pad_token_id`), `finished_sequences` input_ids_padding = tf.ones((batch_size, max_length - cur_len), dtype=tf.int32) * (pad_token_id or 0) generated = tf.concat([input_ids, input_ids_padding], axis=-1) finished_sequences = tf.zeros((batch_size,), dtype=tf.bool) # 4. define "xla-compile-able" stop-condition and auto-regressive function # define condition fn def contrastive_search_cond_fn( generated, finished_sequences, cur_len, model_kwargs, next_step_cached_variables ): """state termination condition fn.""" return ~tf.reduce_all(finished_sequences) # define condition fn def contrastive_search_body_fn( generated, finished_sequences, cur_len, model_kwargs, next_step_cached_variables ): """state update fn."""
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
# if the first step in the loop, encode all the prefix and obtain: (1) past_key_values; # (2) last_hidden_states; (3) logit_for_next_step; (4) update model kwargs for the next step if model_kwargs.get("past_key_values") is None: # prepare inputs model_inputs = self.prepare_inputs_for_generation( generated[:, :cur_len], use_cache=use_cache, **model_kwargs ) # encode the given prefix and prepare model inputs; encoder-decoder model process the prefix and save # the `encoder_outputs` outputs = self( **model_inputs, return_dict=True, output_hidden_states=True, output_attentions=output_attentions )
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
# last decoder hidden states will be used to compute the degeneration penalty (cosine similarity with # previous tokens) if self.config.is_encoder_decoder: last_hidden_states = outputs.decoder_hidden_states[-1] else: last_hidden_states = outputs.hidden_states[-1] # XLA: last_hidden_states normally grows at each step, but in XLA it is padded so as to be used across # iterations (with fixed shapes) if use_xla: last_hidden_states = tf.pad(last_hidden_states, [[0, 0], [0, max_length - cur_len], [0, 0]]) # next logit for contrastive search to select top-k candidate tokens logit_for_next_step = outputs.logits[:, -1, :]
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
if use_xla: model_kwargs = self._update_model_kwargs_for_xla_generation( model_outputs=outputs, model_kwargs=model_kwargs, cur_len=cur_len, max_length=max_length, batch_size=batch_size, is_encoder_decoder=self.config.is_encoder_decoder, batch_axis=cache_batch_axis, ) else: model_kwargs = self._update_model_kwargs_for_generation( outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder ) # Expands model inputs top_k times, for batched forward passes (akin to beam search). _, model_kwargs = self._expand_inputs_for_generation( expand_size=top_k, is_encoder_decoder=self.config.is_encoder_decoder, **model_kwargs )
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
past_key_values = model_kwargs.get("past_key_values") if past_key_values is None: raise ValueError( f"{self.__class__.__name__} does not support caching and therefore **can't** be used " "for contrastive search." ) elif ( not isinstance(past_key_values[0], (tuple, tf.Tensor)) or past_key_values[0][0].shape[0] != batch_size ): raise ValueError( f"{self.__class__.__name__} does not have a standard cache format and therefore **can't** be " "used for contrastive search without further modifications." ) else: logit_for_next_step = next_step_cached_variables["logit_for_next_step"] last_hidden_states = next_step_cached_variables["last_hidden_states"]
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
outputs = next_step_cached_variables["outputs"]
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
# contrastive_search main logic start: # contrastive search decoding consists of two steps: (1) candidate tokens recall; (2) candidate re-rank by # degeneration penalty logit_for_next_step = logits_processor(generated, logit_for_next_step, cur_len) logit_for_next_step = logits_warper(generated, logit_for_next_step, cur_len) next_probs = stable_softmax(logit_for_next_step, axis=-1) top_k_probs, top_k_ids = tf.math.top_k(next_probs, k=top_k)
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
# Store scores, attentions and hidden_states when required if not use_xla and return_dict_in_generate: if output_scores: scores.append(logit_for_next_step) if output_attentions and self.config.is_encoder_decoder: decoder_attentions.append(outputs.decoder_attentions) elif output_attentions and not self.config.is_encoder_decoder: decoder_attentions.append(outputs.attentions) if self.config.is_encoder_decoder: cross_attentions.append(outputs.cross_attentions) if output_hidden_states and self.config.is_encoder_decoder: decoder_hidden_states.append(outputs.decoder_hidden_states) elif output_hidden_states and self.config.is_encoder_decoder: decoder_hidden_states.append(outputs.hidden_states)
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
# Replicates the new past_key_values to match the `top_k` candidates model_kwargs["past_key_values"] = tf.nest.map_structure( lambda tensor: tf.repeat(tensor, top_k, axis=cache_batch_axis), model_kwargs["past_key_values"] ) # compute the candidate tokens by the language model and collects their hidden_states next_model_inputs = self.prepare_inputs_for_generation( tf.reshape(top_k_ids, [-1, 1]), use_cache=use_cache, **model_kwargs ) outputs = self( **next_model_inputs, return_dict=True, output_hidden_states=True, output_attentions=output_attentions ) next_past_key_values = self._extract_past_from_model_output(outputs)
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
logits = outputs.logits[:, -1, :] # name is different for encoder-decoder and decoder-only models if self.config.is_encoder_decoder: next_hidden = outputs.decoder_hidden_states[-1] full_hidden_states = outputs.decoder_hidden_states else: next_hidden = outputs.hidden_states[-1] full_hidden_states = outputs.hidden_states context_hidden = tf.repeat(last_hidden_states[:, :cur_len, :], top_k, axis=0) # compute the degeneration penalty and re-rank the candidates based on the degeneration penalty and the # model confidence selected_idx = _ranking_fast(context_hidden, next_hidden, top_k_probs, penalty_alpha, top_k)
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
# converts indices to a dimension of top_k to the stacked top_k * batch_size dimension, for indexing # without a need to reshape on tensors that have these two dimensions stacked selected_idx_stacked = selected_idx + tf.range(selected_idx.shape[0], dtype=tf.int64) * top_k # prepare for the next step: (1) next token_id; (2) past_key_values; (3) last_hidden_states for computing # the degeneration penalty; (4) logits for selecting next top-k candidates; (5) selected tokens scores # (model confidence minus degeneration penalty); (6) decoder hidden_states next_tokens = tf.gather(top_k_ids, selected_idx, axis=1, batch_dims=1) next_hidden = gather_best_candidate(next_hidden, selected_idx_stacked)
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
# XLA: last_hidden_states normally grows at each step, but in XLA it is padded so as to be used across # iterations (with fixed shapes) if use_xla: last_hidden_states = dynamic_update_slice(last_hidden_states, next_hidden, [0, cur_len, 0]) else: last_hidden_states = tf.concat([last_hidden_states, next_hidden], axis=1) next_decoder_hidden_states = gather_best_candidate(full_hidden_states, selected_idx_stacked) next_past_key_values = gather_best_candidate( next_past_key_values, selected_idx_stacked, batch_axis=cache_batch_axis ) logit_for_next_step = gather_best_candidate(logits, selected_idx_stacked)
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
# Rebuilds the relevant parts of the model output for the selected token, for use in the next iteration if self.config.is_encoder_decoder: next_step_cross_attentions = () next_step_decoder_attentions = () if output_attentions: next_step_cross_attentions = gather_best_candidate(outputs.cross_attentions, selected_idx_stacked) next_step_decoder_attentions = gather_best_candidate( outputs.decoder_attentions, selected_idx_stacked ) outputs = TFSeq2SeqLMOutput( past_key_values=next_past_key_values, decoder_hidden_states=next_decoder_hidden_states, decoder_attentions=next_step_decoder_attentions or None, cross_attentions=next_step_cross_attentions or None, ) else: next_step_attentions = ()
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
if output_attentions: next_step_attentions = gather_best_candidate(outputs.attentions, selected_idx_stacked) outputs = TFCausalLMOutputWithPast( past_key_values=next_past_key_values, hidden_states=next_decoder_hidden_states, attentions=next_step_attentions or None, ) # contrastive_search main logic end
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
if eos_token_id is not None: if pad_token_id is None: raise ValueError("If `eos_token_id` is defined, make sure that `pad_token_id` is defined.") unfinished_seq = 1 - tf.cast(finished_sequences, tf.int32) next_tokens = next_tokens * unfinished_seq + pad_token_id * (1 - unfinished_seq) next_token_is_eos = tf.math.reduce_any( tf.equal( tf.broadcast_to(next_tokens, (len(eos_token_id), batch_size)), tf.expand_dims(eos_token_id, -1) ), axis=0, ) finished_sequences = finished_sequences | next_token_is_eos # update `generated` and `cur_len` update_indices = tf.stack([tf.range(batch_size), tf.broadcast_to(cur_len, [batch_size])], axis=-1) generated = tf.tensor_scatter_nd_update(tensor=generated, indices=update_indices, updates=next_tokens) cur_len += 1
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
if use_xla: # NOTE: 1) relative to other generation strategies, contrastive search is always running forward # passes one step ahead -- hence the `cur_len=cur_len + 1`; 2) the attention mask here is expanded from # [batch_size, ...] to [batch_size*top_k, ...] -- hence the `batch_size=batch_size * top_k` model_kwargs = self._update_model_kwargs_for_xla_generation( model_outputs=outputs, model_kwargs=model_kwargs, cur_len=cur_len + 1, max_length=max_length, batch_size=batch_size * top_k, is_encoder_decoder=self.config.is_encoder_decoder, batch_axis=cache_batch_axis, ) else: model_kwargs = self._update_model_kwargs_for_generation( outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder )
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
next_step_cached_variables = { "logit_for_next_step": logit_for_next_step, "last_hidden_states": last_hidden_states, "outputs": outputs, } return generated, finished_sequences, cur_len, model_kwargs, next_step_cached_variables # 5. run generation # 1st generation step has to be run before to initialize `past_key_values` generated, finished_sequences, cur_len, model_kwargs, next_step_cached_variables = contrastive_search_body_fn( generated, finished_sequences, cur_len, model_kwargs, None )
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
# 2-to-n generation steps can then be run in autoregressive fashion # only in case 1st generation step does NOT yield EOS token though maximum_iterations = max_length - cur_len generated, _, cur_len, _, _ = tf.while_loop( contrastive_search_cond_fn, contrastive_search_body_fn, (generated, finished_sequences, cur_len, model_kwargs, next_step_cached_variables), maximum_iterations=maximum_iterations, ) # 6. prepare outputs if not use_xla: # cut for backward compatibility generated = generated[:, :cur_len]
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
if return_dict_in_generate: if self.config.is_encoder_decoder: # if model is an encoder-decoder, retrieve encoder attention weights # and hidden states encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None encoder_hidden_states = ( model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None ) scores = tuple(scores) if scores is not None else None decoder_attentions = tuple(decoder_attentions) if decoder_attentions is not None else None cross_attentions = tuple(cross_attentions) if cross_attentions is not None else None decoder_hidden_states = tuple(decoder_hidden_states) if decoder_hidden_states is not None else None
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
return TFContrastiveSearchEncoderDecoderOutput( sequences=generated, scores=scores, encoder_attentions=encoder_attentions, encoder_hidden_states=encoder_hidden_states, decoder_attentions=decoder_attentions, cross_attentions=cross_attentions, decoder_hidden_states=decoder_hidden_states, ) else: return TFContrastiveSearchDecoderOnlyOutput( sequences=generated, scores=scores, attentions=decoder_attentions, hidden_states=decoder_hidden_states, ) else: return generated
10,755
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_utils.py
class FlaxLogitsProcessor: """Abstract base class for all logit processors that can be applied during generation.""" @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray) -> jnp.ndarray: """Flax method for processing logits.""" raise NotImplementedError( f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
10,756
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/flax_logits_process.py
class FlaxLogitsWarper: """Abstract base class for all logit warpers that can be applied during generation with multinomial sampling.""" @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray) -> jnp.ndarray: """Flax method for warping logits.""" raise NotImplementedError( f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
10,757
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/flax_logits_process.py
class FlaxLogitsProcessorList(list): """ This class can be used to create a list of [`FlaxLogitsProcessor`] or [`FlaxLogitsWarper`] to subsequently process a `scores` input tensor. This class inherits from list and adds a specific *__call__* method to apply each [`FlaxLogitsProcessor`] or [`FlaxLogitsWarper`] to the inputs. """
10,758
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/flax_logits_process.py
@add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int, **kwargs) -> jnp.ndarray: for processor in self: function_args = inspect.signature(processor.__call__).parameters if len(function_args) > 3: if not all(arg in kwargs for arg in list(function_args.keys())[2:]): raise ValueError( f"Make sure that all the required parameters: {list(function_args.keys())} for " f"{processor.__class__} are passed to the logits processor." ) scores = processor(input_ids, scores, cur_len, **kwargs) else: scores = processor(input_ids, scores, cur_len) return scores
10,758
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/flax_logits_process.py
class FlaxTemperatureLogitsWarper(FlaxLogitsWarper): r""" [`FlaxLogitsWarper`] for temperature (exponential scaling output probability distribution). Args: temperature (`float`): The value used to module the logits distribution. """ def __init__(self, temperature: float): if not isinstance(temperature, float) or not (temperature > 0): raise ValueError(f"`temperature` has to be a strictly positive float, but is {temperature}") self.temperature = temperature def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray: scores = scores / self.temperature return scores
10,759
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/flax_logits_process.py
class FlaxTopPLogitsWarper(FlaxLogitsWarper): """ [`FlaxLogitsWarper`] that performs top-p, i.e. restricting to top tokens summing to prob_cut_off <= prob_cut_off. Args: top_p (`float`): If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or higher are kept for generation. filter_value (`float`, *optional*, defaults to -inf): All filtered values will be set to this float value. min_tokens_to_keep (`int`, *optional*, defaults to 1): Minimum number of tokens that cannot be filtered. """
10,760
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/flax_logits_process.py
def __init__(self, top_p: float, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1): if not isinstance(top_p, float) or (top_p < 0 or top_p > 1.0): raise ValueError(f"`top_p` has to be a float > 0 and < 1, but is {top_p}") if not isinstance(min_tokens_to_keep, int) or (min_tokens_to_keep < 1): raise ValueError(f"`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}") self.top_p = top_p self.filter_value = filter_value self.min_tokens_to_keep = min_tokens_to_keep def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray: topk_scores, topk_indices = lax.top_k(scores, scores.shape[-1]) mask_scores = jnp.full_like(scores, self.filter_value) cumulative_probs = jax.nn.softmax(topk_scores, axis=-1).cumsum(axis=-1) score_mask = cumulative_probs < self.top_p
10,760
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/flax_logits_process.py
# include the token that is higher than top_p as well score_mask = jnp.roll(score_mask, 1) score_mask |= score_mask.at[:, 0].set(True) # min tokens to keep score_mask = score_mask.at[:, : self.min_tokens_to_keep].set(True) topk_next_scores = jnp.where(score_mask, topk_scores, mask_scores) next_scores = jax.lax.sort_key_val(topk_indices, topk_next_scores)[-1] return next_scores
10,760
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/flax_logits_process.py
class FlaxTopKLogitsWarper(FlaxLogitsWarper): r""" [`FlaxLogitsWarper`] that performs top-k, i.e. restricting to the k highest probability elements. Args: top_k (`int`): The number of highest probability vocabulary tokens to keep for top-k-filtering. filter_value (`float`, *optional*, defaults to -inf): All filtered values will be set to this float value. min_tokens_to_keep (`int`, *optional*, defaults to 1): Minimum number of tokens that cannot be filtered. """ def __init__(self, top_k: int, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1): if not isinstance(top_k, int) or top_k <= 0: raise ValueError(f"`top_k` has to be a strictly positive integer, but is {top_k}") self.top_k = max(top_k, min_tokens_to_keep) self.filter_value = filter_value
10,761
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/flax_logits_process.py
def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray: batch_size, vocab_size = scores.shape next_scores_flat = jnp.full(batch_size * vocab_size, self.filter_value) topk = min(self.top_k, scores.shape[-1]) # Safety check topk_scores, topk_indices = lax.top_k(scores, topk) shift = jnp.broadcast_to((jnp.arange(batch_size) * vocab_size)[:, None], (batch_size, topk)).flatten() topk_scores_flat = topk_scores.flatten() topk_indices_flat = topk_indices.flatten() + shift next_scores_flat = next_scores_flat.at[topk_indices_flat].set(topk_scores_flat) next_scores = next_scores_flat.reshape(batch_size, vocab_size) return next_scores
10,761
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/flax_logits_process.py
class FlaxForcedBOSTokenLogitsProcessor(FlaxLogitsProcessor): r""" [`FlaxLogitsProcessor`] that enforces the specified token as the first generated token. Args: bos_token_id (`int`): The id of the token to force as the first generated token. """ def __init__(self, bos_token_id: int): self.bos_token_id = bos_token_id def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray: new_scores = jnp.full(scores.shape, -float("inf")) apply_penalty = 1 - jnp.bool_(cur_len - 1) scores = jnp.where(apply_penalty, new_scores.at[:, self.bos_token_id].set(0), scores) return scores
10,762
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/flax_logits_process.py
class FlaxForcedEOSTokenLogitsProcessor(FlaxLogitsProcessor): r""" [`FlaxLogitsProcessor`] that enforces the specified token as the last generated token when `max_length` is reached. Args: max_length (`int`): The maximum length of the sequence to be generated. eos_token_id (`int`): The id of the token to force as the last generated token when `max_length` is reached. """ def __init__(self, max_length: int, eos_token_id: int): self.max_length = max_length self.eos_token_id = eos_token_id def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray: new_scores = jnp.full(scores.shape, -float("inf")) apply_penalty = 1 - jnp.bool_(cur_len - self.max_length + 1) scores = jnp.where(apply_penalty, new_scores.at[:, self.eos_token_id].set(0), scores) return scores
10,763
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/flax_logits_process.py
class FlaxMinLengthLogitsProcessor(FlaxLogitsProcessor): r""" [`FlaxLogitsProcessor`] enforcing a min-length by setting EOS probability to 0. Args: min_length (`int`): The minimum length below which the score of `eos_token_id` is set to `-float("Inf")`. eos_token_id (`int`): The id of the *end-of-sequence* token. """ def __init__(self, min_length: int, eos_token_id: int): if not isinstance(min_length, int) or min_length < 0: raise ValueError(f"`min_length` has to be a positive integer, but is {min_length}") if not isinstance(eos_token_id, int) or eos_token_id < 0: raise ValueError(f"`eos_token_id` has to be a positive integer, but is {eos_token_id}") self.min_length = min_length self.eos_token_id = eos_token_id
10,764
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/flax_logits_process.py
def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray: # create boolean flag to decide if min length penalty should be applied apply_penalty = 1 - jnp.clip(cur_len - self.min_length, 0, 1) scores = jnp.where(apply_penalty, scores.at[:, self.eos_token_id].set(-float("inf")), scores) return scores
10,764
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/flax_logits_process.py
class FlaxSuppressTokensAtBeginLogitsProcessor(FlaxLogitsProcessor): r""" [`FlaxLogitsProcessor`] supressing a list of tokens as soon as the `generate` function starts generating using `begin_index` tokens. This should ensure that the tokens defined by `begin_suppress_tokens` are not sampled at the beginning of the generation. Args: begin_suppress_tokens (`List[int]`): Tokens to not sample. begin_index (`int`): Index where the tokens are suppressed. """ def __init__(self, begin_suppress_tokens, begin_index): self.begin_suppress_tokens = list(begin_suppress_tokens) self.begin_index = begin_index def __call__(self, input_ids, scores, cur_len: int): apply_penalty = 1 - jnp.bool_(cur_len - self.begin_index) scores = jnp.where(apply_penalty, scores.at[:, self.begin_suppress_tokens].set(-float("inf")), scores) return scores
10,765
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/flax_logits_process.py
class FlaxSuppressTokensLogitsProcessor(FlaxLogitsProcessor): r""" [`FlaxLogitsProcessor`] suppressing a list of tokens at each decoding step. The processor will set their log probs to be `-inf` so they are not sampled. Args: suppress_tokens (`list`): Tokens to not sample. """ def __init__(self, suppress_tokens: list): self.suppress_tokens = list(suppress_tokens) def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray: scores = scores.at[..., self.suppress_tokens].set(-float("inf")) return scores
10,766
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/flax_logits_process.py
class FlaxForceTokensLogitsProcessor(FlaxLogitsProcessor): r""" [`FlaxLogitsProcessor`] that takes a list of pairs of integers which indicates a mapping from generation indices to token indices that will be forced before sampling. The processor will set their log probs to 0 and all other tokens to `-inf` so that they are sampled at their corresponding index. Args: force_token_map (`list`): Map giving token ids and indices where they will be forced to be sampled. """
10,767
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/flax_logits_process.py
def __init__(self, force_token_map): force_token_map = dict(force_token_map) # Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the # index of the array corresponds to the index of the token to be forced, for XLA compatibility. # Indexes without forced tokens will have a negative value. force_token_array = jnp.ones((max(force_token_map.keys()) + 1), dtype=jnp.int32) * -1 for index, token in force_token_map.items(): if token is not None: force_token_array = force_token_array.at[index].set(token) self.force_token_array = jnp.int32(force_token_array) def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray: def _force_token(generation_idx): batch_size = scores.shape[0] current_token = self.force_token_array[generation_idx]
10,767
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/flax_logits_process.py
new_scores = jnp.ones_like(scores, dtype=scores.dtype) * -float("inf") updates = jnp.zeros((batch_size, 1), dtype=scores.dtype) new_scores = lax.dynamic_update_slice(new_scores, updates, (0, current_token)) return new_scores scores = lax.cond( cur_len >= self.force_token_array.shape[0], # If the current length is geq than the length of force_token_array, the processor does nothing. lambda: scores, # Otherwise, it may force a certain token. lambda: lax.cond( self.force_token_array[cur_len] >= 0, # Only valid (positive) tokens are forced lambda: _force_token(cur_len), # Otherwise, the processor does nothing. lambda: scores, ), ) return scores
10,767
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/flax_logits_process.py
class FlaxWhisperTimeStampLogitsProcessor(FlaxLogitsProcessor): r""" Whisper specific Processor. This processor can be used to force a list of tokens. The processor will set their log probs to `inf` so that they are sampled at their corresponding index. Args: generate_config (`GenerateConfig`): The generate config used to generate the output. The following parameters are required: eos_token_id (`int`, *optional*, defaults to 50257): The id of the *end-of-sequence* token. no_timestamps_token_id (`int`, *optional*, defaults to 50363): The id of the `"<|notimestamps|>"` token. max_initial_timestamp_index (`int`, *optional*, defaults to 1): Used to set the maximum value of the initial timestamp. This is used to prevent the model from predicting timestamps that are too far in the future. """
10,768
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/flax_logits_process.py
def __init__(self, generate_config, model_config, decoder_input_length): self.eos_token_id = generate_config.eos_token_id self.no_timestamps_token_id = generate_config.no_timestamps_token_id self.timestamp_begin = generate_config.no_timestamps_token_id + 1 self.begin_index = decoder_input_length + 1 if generate_config.is_multilingual: # room for language token and task token self.begin_index += 2 if hasattr(generate_config, "max_initial_timestamp_index"): self.max_initial_timestamp_index = generate_config.max_initial_timestamp_index else: self.max_initial_timestamp_index = model_config.vocab_size if self.max_initial_timestamp_index is None: self.max_initial_timestamp_index = model_config.vocab_size
10,768
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/flax_logits_process.py
def __call__(self, input_ids, scores, cur_len): # suppress <|notimestamps|> which is handled by without_timestamps scores = scores.at[:, self.no_timestamps_token_id].set(-float("inf")) def handle_pairs(input_ids_k, scores_k): last_was_timestamp = jnp.where((cur_len - self.begin_index) >= 1, True, False) last_was_timestamp = jnp.where( input_ids_k[cur_len - 1] >= self.timestamp_begin, True and last_was_timestamp, False, ) penultimate_was_timestamp = jnp.where((cur_len - self.begin_index) < 2, True, False) penultimate_was_timestamp = jnp.where( input_ids_k[cur_len - 2] >= self.timestamp_begin, True, penultimate_was_timestamp, )
10,768
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/flax_logits_process.py
return jnp.where( last_was_timestamp, jnp.where( penultimate_was_timestamp > 0, scores_k.at[self.timestamp_begin :].set(-float("inf")), scores_k.at[: self.eos_token_id].set(-float("inf")), ), scores_k, ) scores = jax.vmap(handle_pairs)(input_ids, scores) apply_max_initial_timestamp = jnp.where(cur_len == self.begin_index, True, False) apply_max_initial_timestamp = jnp.where( self.max_initial_timestamp_index is not None, True and apply_max_initial_timestamp, False, ) last_allowed = self.timestamp_begin + self.max_initial_timestamp_index scores = jnp.where( apply_max_initial_timestamp, scores.at[:, last_allowed + 1 :].set(-float("inf")), scores, )
10,768
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/flax_logits_process.py
# if sum of probability over timestamps is above any other token, sample timestamp logprobs = jax.nn.log_softmax(scores, axis=-1) def handle_cumulative_probs(logprobs_k, scores_k): timestamp_logprob = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :], axis=-1) max_text_token_logprob = jnp.max(logprobs_k[: self.timestamp_begin]) return jnp.where( timestamp_logprob > max_text_token_logprob, scores_k.at[: self.timestamp_begin].set(-float("inf")), scores_k, ) scores = jax.vmap(handle_cumulative_probs)(logprobs, scores) return scores
10,768
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/flax_logits_process.py
class FlaxNoRepeatNGramLogitsProcessor(FlaxLogitsProcessor): r""" [`FlaxLogitsProcessor`] that enforces no repetition of n-grams. See [Fairseq](https://github.com/pytorch/fairseq/blob/a07cb6f40480928c9e0548b737aadd36ee66ac76/fairseq/sequence_generator.py#L345). Args: ngram_size (`int`): All ngrams of size `ngram_size` can only occur once. """ def __init__(self, ngram_size: int): if not isinstance(ngram_size, int) or ngram_size <= 0: raise ValueError(f"`ngram_size` has to be a strictly positive integer, but is {ngram_size}") self.ngram_size = ngram_size
10,769
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/flax_logits_process.py
def get_previous_ngrams(self, input_ids: jnp.ndarray, vocab_size: int, cur_len: int): """ get a matrix of size (batch_size,) + (vocab_size,)*n (for n-grams) that represent the n-grams that occurred previously. The BCOO representation allow to store only the few non-zero entries, instead of the full (huge) matrix """ batch_size, seq_len = input_ids.shape # number of n-grams in the whole sequence seq_ngrams = seq_len - (self.ngram_size - 1) # number of n-grams in the currently generated sequence cur_ngrams = cur_len - (self.ngram_size - 1) def body_fun(i, val): b = i % batch_size pos = i // batch_size return val.at[i].set( jnp.array( [ b, ] + [jnp.array(input_ids)[b, pos + j] for j in range(self.ngram_size)] ) )
10,769
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/flax_logits_process.py
shape = (batch_size * seq_ngrams, self.ngram_size + 1) all_update_indices = jax.lax.fori_loop( 0, batch_size * cur_ngrams, body_fun, jnp.zeros(shape, dtype=input_ids.dtype) ) # ignore the n-grams not yet generated data = (jnp.arange(batch_size * seq_ngrams) < batch_size * cur_ngrams).astype("float32") return sparse.BCOO((data, all_update_indices), shape=(batch_size,) + (vocab_size,) * self.ngram_size) def get_banned_tokens_mask(self, latest_tokens: jnp.ndarray, previous_ngrams) -> jnp.ndarray: """ Determines which tokens must be banned given latest tokens and the previously seen ngrams. """ @sparse.sparsify @jax.vmap def inner_fn(latest_tokens, previous_ngrams): return previous_ngrams[tuple(latest_tokens)] return sparse.bcoo_todense(inner_fn(latest_tokens, previous_ngrams))
10,769
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/flax_logits_process.py
def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray: def true_fn(): _, vocab_size = scores.shape # store the previously seen n-grams previous_ngrams = self.get_previous_ngrams(input_ids, vocab_size, cur_len) # get the n-1 last tokens that prefix the n-gram being generated latest_tokens = jnp.zeros((input_ids.shape[0], self.ngram_size - 1), dtype=input_ids.dtype) latest_tokens = jax.lax.dynamic_update_slice( latest_tokens, jax.lax.dynamic_slice( input_ids, (0, cur_len - (self.ngram_size - 1)), (input_ids.shape[0], (self.ngram_size - 1)) ), (0, 0), )
10,769
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/flax_logits_process.py
# compute the banned tokens, ie all the tokens that when added to the latest tokens lead to a n-gram that was previously generated banned_tokens_indices_mask = self.get_banned_tokens_mask(latest_tokens, previous_ngrams).astype("bool") return jnp.where(banned_tokens_indices_mask, -float("inf"), scores) output = jax.lax.cond((cur_len >= self.ngram_size - 1), true_fn, lambda: scores) return output
10,769
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/flax_logits_process.py
class Constraint(ABC): r"""Abstract base class for all constraints that can be applied during generation. It must define how the constraint can be satisfied. All classes that inherit Constraint must follow the requirement that ```py completed = False while not completed: _, completed = constraint.update(constraint.advance()) ``` will always terminate (halt). """ def __init__(self): # test for the above condition self.test() def test(self): """ Tests whether this constraint has been properly defined. """ counter = 0 completed = False while not completed: if counter == 1: self.reset() advance = self.advance() if not self.does_advance(advance): raise Exception( "Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true." )
10,770
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/beam_constraints.py
stepped, completed, reset = self.update(advance) counter += 1 if counter > 10000: raise Exception("update() does not fulfill the constraint.") if self.remaining() != 0: raise Exception("Custom Constraint is not defined correctly.") @abstractmethod def advance(self): """ When called, returns the token(s) that would take this constraint one step closer to being fulfilled. Return: token_ids (Union[int, List[int], None]): - A single token ID (int) that advances the constraint, or - A list of token IDs that could advance the constraint - None if the constraint is completed or cannot be advanced """ raise NotImplementedError( f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
10,770
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/beam_constraints.py
@abstractmethod def does_advance(self, token_id: int): """ Reads in a token and returns whether it creates progress. """ raise NotImplementedError( f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." ) @abstractmethod def update(self, token_id: int): """ Reads in a token and returns booleans that indicate the progress made by it. This function will update the state of this object unlikes `does_advance(self, token_id: int)`. This isn't to test whether a certain token will advance the progress; it's to update its state as if it has been generated. This becomes important if token_id != desired token (refer to else statement in PhrasalConstraint)
10,770
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/beam_constraints.py
Args: token_id(`int`): The id of a newly generated token in the beam search. Return: stepped(`bool`): Whether this constraint has become one step closer to being fulfuilled. completed(`bool`): Whether this constraint has been completely fulfilled by this token being generated. reset (`bool`): Whether this constraint has reset its progress by this token being generated. """ raise NotImplementedError( f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
10,770
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/beam_constraints.py
@abstractmethod def reset(self): """ Resets the state of this constraint to its initialization. We would call this in cases where the fulfillment of a constraint is abrupted by an unwanted token. """ raise NotImplementedError( f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." ) @abstractmethod def remaining(self): """ Returns the number of remaining steps of `advance()` in order to complete this constraint. """ raise NotImplementedError( f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." ) @abstractmethod def copy(self, stateful=False): """ Creates a new instance of this constraint. Args: stateful(`bool`): Whether to not only copy the constraint for new instance, but also its state.
10,770
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/beam_constraints.py
Return: constraint(`Constraint`): The same constraint as the one being called from. """ raise NotImplementedError( f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
10,770
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/beam_constraints.py
class PhrasalConstraint(Constraint): r""" [`Constraint`] enforcing that an ordered sequence of tokens is included in the output. Args: token_ids (`List[int]`): The id of the token that must be generated by the output. """ def __init__(self, token_ids: List[int]): super(Constraint, self).__init__() if not isinstance(token_ids, list) or len(token_ids) == 0: raise ValueError(f"`token_ids` has to be a non-empty list, but is {token_ids}.") if any((not isinstance(token_id, int) or token_id < 0) for token_id in token_ids): raise ValueError(f"Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.") self.token_ids = token_ids self.seqlen = len(self.token_ids) self.fulfilled_idx = -1 # the index of the currently fulfilled step self.completed = False
10,771
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/beam_constraints.py
def advance(self): if self.completed: return None return self.token_ids[self.fulfilled_idx + 1] def does_advance(self, token_id: int): if not isinstance(token_id, int): raise TypeError(f"`token_id` has to be an `int`, but is {token_id} of type {type(token_id)}") if self.completed: return False return token_id == self.token_ids[self.fulfilled_idx + 1] def update(self, token_id: int): if not isinstance(token_id, int): raise TypeError(f"`token_id` has to be an `int`, but is {token_id} of type {type(token_id)}") stepped = False completed = False reset = False
10,771
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/beam_constraints.py
if self.does_advance(token_id): self.fulfilled_idx += 1 stepped = True if self.fulfilled_idx == (self.seqlen - 1): completed = True self.completed = completed else: # failed to make progress. reset = True self.reset() return stepped, completed, reset def reset(self): self.completed = False self.fulfilled_idx = 0 def remaining(self): return self.seqlen - (self.fulfilled_idx + 1) def copy(self, stateful=False): new_constraint = PhrasalConstraint(self.token_ids) if stateful: new_constraint.seq_len = self.seqlen new_constraint.fulfilled_idx = self.fulfilled_idx new_constraint.completed = self.completed return new_constraint
10,771
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/beam_constraints.py
class DisjunctiveTrie: def __init__(self, nested_token_ids: List[List[int]], no_subsets=True): r""" A helper class that builds a trie with the words represented in `nested_token_ids`. """ self.max_height = max([len(one) for one in nested_token_ids]) root = {} for token_ids in nested_token_ids: level = root for tidx, token_id in enumerate(token_ids): if token_id not in level: level[token_id] = {} level = level[token_id] if no_subsets and self.has_subsets(root, nested_token_ids): raise ValueError( "Each list in `nested_token_ids` can't be a complete subset of another list, but is" f" {nested_token_ids}." ) self.trie = root
10,772
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/beam_constraints.py
def next_tokens(self, current_seq): """ The next possible tokens that will progress the trie, given the current sequence of tokens in `current_seq`. """ start = self.trie for current_token in current_seq: start = start[current_token] next_tokens = list(start.keys()) return next_tokens def reached_leaf(self, current_seq): next_tokens = self.next_tokens(current_seq) return len(next_tokens) == 0 def count_leaves(self, root): next_nodes = list(root.values()) if len(next_nodes) == 0: return 1 else: return sum([self.count_leaves(nn) for nn in next_nodes]) def has_subsets(self, trie, nested_token_ids): """ Returns whether # of leaves == # of words. Otherwise some word is a subset of another. """ leaf_count = self.count_leaves(trie) return len(nested_token_ids) != leaf_count
10,772
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/beam_constraints.py
class DisjunctiveConstraint(Constraint): r""" A special [`Constraint`] that is fulfilled by fulfilling just one of several constraints. Args: nested_token_ids (`List[List[int]]`): A list of words, where each word is a list of ids. This constraint is fulfilled by generating just one from the list of words. """ def __init__(self, nested_token_ids: List[List[int]]): super(Constraint, self).__init__()
10,773
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/beam_constraints.py
if not isinstance(nested_token_ids, list) or len(nested_token_ids) == 0: raise ValueError(f"`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.") if any(not isinstance(token_ids, list) for token_ids in nested_token_ids): raise ValueError(f"`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.") if any( any((not isinstance(token_id, int) or token_id < 0) for token_id in token_ids) for token_ids in nested_token_ids ): raise ValueError( f"Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}." ) self.trie = DisjunctiveTrie(nested_token_ids) self.token_ids = nested_token_ids self.seqlen = self.trie.max_height self.current_seq = [] self.completed = False def advance(self): token_list = self.trie.next_tokens(self.current_seq)
10,773
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/beam_constraints.py
if len(token_list) == 0: return None else: return token_list def does_advance(self, token_id: int): if not isinstance(token_id, int): raise TypeError(f"`token_id` is supposed to be type `int`, but is {token_id} of type {type(token_id)}") next_tokens = self.trie.next_tokens(self.current_seq) return token_id in next_tokens def update(self, token_id: int): if not isinstance(token_id, int): raise TypeError(f"`token_id` is supposed to be type `int`, but is {token_id} of type {type(token_id)}") stepped = False completed = False reset = False if self.does_advance(token_id): self.current_seq.append(token_id) stepped = True else: reset = True self.reset() completed = self.trie.reached_leaf(self.current_seq) self.completed = completed return stepped, completed, reset
10,773
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/beam_constraints.py
def reset(self): self.completed = False self.current_seq = [] def remaining(self): if self.completed: # since this can be completed without reaching max height return 0 else: return self.seqlen - len(self.current_seq) def copy(self, stateful=False): new_constraint = DisjunctiveConstraint(self.token_ids) if stateful: new_constraint.seq_len = self.seqlen new_constraint.current_seq = self.current_seq new_constraint.completed = self.completed return new_constraint
10,773
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/beam_constraints.py
class ConstraintListState: r""" A class for beam scorers to track its progress through a list of constraints. Args: constraints (`List[Constraint]`): A list of [`Constraint`] objects that must be fulfilled by the beam scorer. """ def __init__(self, constraints: List[Constraint]): self.constraints = constraints # max # of steps required to fulfill a given constraint self.max_seqlen = max([c.seqlen for c in constraints]) self.n_constraints = len(constraints) self.completed = False self.init_state() def init_state(self): self.complete_constraints = [] self.inprogress_constraint = None self.pending_constraints = [constraint.copy(stateful=False) for constraint in self.constraints]
10,774
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/beam_constraints.py
def get_bank(self): add = 0 if self.inprogress_constraint: # extra points for having a constraint mid-fulfilled add += self.max_seqlen - self.inprogress_constraint.remaining() return (len(self.complete_constraints) * self.max_seqlen) + add def advance(self): """The list of tokens to generate such that we can make progress. By "list" we don't mean the list of token that will fully fulfill a constraint. Given constraints `c_i = {t_ij | j == # of tokens}`, If we're not in the middle of progressing through a specific constraint `c_i`, we return: `[t_k1 for k in indices of unfulfilled constraints]` If we are in the middle of a constraint, then we return: `[t_ij]`, where `i` is the index of the inprogress constraint, `j` is the next step for the constraint.
10,774
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/beam_constraints.py
Though we don't care which constraint is fulfilled first, if we are in the progress of fulfilling a constraint, that's the only one we'll return. """ token_list = [] if self.inprogress_constraint is None: for constraint in self.pending_constraints: # "pending" == "unfulfilled yet" advance = constraint.advance() if isinstance(advance, int): token_list.append(advance) elif isinstance(advance, list): token_list.extend(advance) else: advance = self.inprogress_constraint.advance() if isinstance(advance, int): token_list.append(advance) elif isinstance(advance, list): token_list.extend(advance) if len(token_list) == 0: return None else: return token_list
10,774
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/beam_constraints.py
def reset(self, token_ids: Optional[List[int]]): """ token_ids: the tokens generated thus far to reset the state of the progress through constraints. """ self.init_state() if token_ids is not None: for token in token_ids: # completes or steps **one** constraint complete, stepped = self.add(token) # the entire list of constraints are fulfilled if self.completed: break def add(self, token_id: int): if not isinstance(token_id, int): raise TypeError(f"`token_id` should be an `int`, but is `{token_id}`.") complete, stepped = False, False if self.completed: complete = True stepped = False return complete, stepped
10,774
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/beam_constraints.py