Text Generation
Transformers
PyTorch
English
llama
text-generation-inference
Inference Endpoints
ksych commited on
Commit
6fb65ce
·
verified ·
1 Parent(s): 3bd5427

Fix padding

Browse files
Files changed (1) hide show
  1. inference.py +1 -1
inference.py CHANGED
@@ -26,7 +26,7 @@ def decode_tts(tokens, quantizer, n_codebooks, n_original_tokens, start_audio_to
26
  if reminder:
27
  # pad if last frame is incomplete
28
  pad_tokens = torch.zeros(n_codebooks - reminder, device="cuda")
29
- audio_tokens = torch.cat([audio_tokens, pad_tokens[reminder:n_codebooks]], dim=0)
30
 
31
  transposed = audio_tokens.view(-1, n_codebooks).t()
32
  codes = transposed.view(n_codebooks, 1, -1).to(device)
 
26
  if reminder:
27
  # pad if last frame is incomplete
28
  pad_tokens = torch.zeros(n_codebooks - reminder, device="cuda")
29
+ audio_tokens = torch.cat([audio_tokens, pad_tokens], dim=0)
30
 
31
  transposed = audio_tokens.view(-1, n_codebooks).t()
32
  codes = transposed.view(n_codebooks, 1, -1).to(device)