YingxuHe commited on
Commit
efafd23
·
1 Parent(s): da4de03

update default generation config

Browse files
Files changed (1) hide show
  1. README.md +2 -2
README.md CHANGED
@@ -454,7 +454,7 @@ audio_array, sample_rate = librosa.load("/path/to/your/audio/file", sr=16000)
454
  audio_array = [audio_array]*2
455
  inputs = processor(text=chat_prompt, audios=audio_array)
456
 
457
- outputs = model.generate(**inputs, max_new_tokens=256, do_sample=True, temperature=0.1, repetition_penalty=1.1, top_p=0.9, no_repeat_ngram_size=6)
458
  generated_ids = outputs[:, inputs['input_ids'].size(1):]
459
  response = processor.batch_decode(generated_ids, skip_special_tokens=True)
460
  ```
@@ -508,7 +508,7 @@ for key, value in inputs.items():
508
  if value.dtype == torch.float32:
509
  inputs[key] = inputs[key].to(torch.bfloat16)
510
 
511
- outputs = model.generate(**inputs, max_new_tokens=256, do_sample=True, temperature=0.1, repetition_penalty=1.1, top_p=0.9, no_repeat_ngram_size=6)
512
  generated_ids = outputs[:, inputs['input_ids'].size(1):]
513
  response = processor.batch_decode(generated_ids, skip_special_tokens=True)
514
  ```
 
454
  audio_array = [audio_array]*2
455
  inputs = processor(text=chat_prompt, audios=audio_array)
456
 
457
+ outputs = model.generate(**inputs, max_new_tokens=256)
458
  generated_ids = outputs[:, inputs['input_ids'].size(1):]
459
  response = processor.batch_decode(generated_ids, skip_special_tokens=True)
460
  ```
 
508
  if value.dtype == torch.float32:
509
  inputs[key] = inputs[key].to(torch.bfloat16)
510
 
511
+ outputs = model.generate(**inputs, max_new_tokens=256)
512
  generated_ids = outputs[:, inputs['input_ids'].size(1):]
513
  response = processor.batch_decode(generated_ids, skip_special_tokens=True)
514
  ```