Update README.md
Browse files
README.md
CHANGED
@@ -446,7 +446,7 @@ libri_data = load_dataset("distil-whisper/librispeech_long", "clean", split="val
|
|
446 |
audio_array = libri_data[0]["audio"]["array"]
|
447 |
inputs = processor(text=chat_prompt, audios=audio_array)
|
448 |
|
449 |
-
outputs = model.generate(**inputs, max_new_tokens=256
|
450 |
generated_ids = outputs[:, inputs['input_ids'].size(1):]
|
451 |
response = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
|
452 |
```
|
@@ -490,7 +490,7 @@ libri_data = load_dataset("distil-whisper/librispeech_long", "clean", split="val
|
|
490 |
audio_array = [libri_data[0]["audio"]["array"]]*2
|
491 |
inputs = processor(text=chat_prompt, audios=audio_array)
|
492 |
|
493 |
-
outputs = model.generate(**inputs, max_new_tokens=256
|
494 |
generated_ids = outputs[:, inputs['input_ids'].size(1):]
|
495 |
response = processor.batch_decode(generated_ids, skip_special_tokens=True)
|
496 |
```
|
|
|
446 |
audio_array = libri_data[0]["audio"]["array"]
|
447 |
inputs = processor(text=chat_prompt, audios=audio_array)
|
448 |
|
449 |
+
outputs = model.generate(**inputs, max_new_tokens=256)
|
450 |
generated_ids = outputs[:, inputs['input_ids'].size(1):]
|
451 |
response = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
|
452 |
```
|
|
|
490 |
audio_array = [libri_data[0]["audio"]["array"]]*2
|
491 |
inputs = processor(text=chat_prompt, audios=audio_array)
|
492 |
|
493 |
+
outputs = model.generate(**inputs, max_new_tokens=256)
|
494 |
generated_ids = outputs[:, inputs['input_ids'].size(1):]
|
495 |
response = processor.batch_decode(generated_ids, skip_special_tokens=True)
|
496 |
```
|