Update README.md
Browse files
README.md
CHANGED
@@ -35,7 +35,7 @@ def summarize(text, tokenizer, model, num_beams=4, temperature=1, max_new_tokens
|
|
35 |
in_data = inputs.input_ids.to('cuda')
|
36 |
attention_mask = inputs.attention_mask.to('cuda')
|
37 |
output_ids = model.generate(input_ids=in_data, attention_mask=attention_mask, num_beams=num_beams, max_new_tokens=max_new_tokens, do_sample=True, early_stopping=True, use_cache=True, temperature=temperature, pad_token_id=tokenizer.eos_token_id, eos_token_id=tokenizer.eos_token_id)
|
38 |
-
generated_text = tokenizer.decode(output_ids[0], skip_special_tokens=
|
39 |
|
40 |
return generated_text
|
41 |
|
@@ -66,7 +66,7 @@ def summarize_batch(texts, tokenizer, model, num_beams=4, temperature=1, max_new
|
|
66 |
eos_token_id=tokenizer.eos_token_id
|
67 |
)
|
68 |
|
69 |
-
generated_texts = [tokenizer.decode(output, skip_special_tokens=
|
70 |
|
71 |
return generated_texts
|
72 |
|
|
|
35 |
in_data = inputs.input_ids.to('cuda')
|
36 |
attention_mask = inputs.attention_mask.to('cuda')
|
37 |
output_ids = model.generate(input_ids=in_data, attention_mask=attention_mask, num_beams=num_beams, max_new_tokens=max_new_tokens, do_sample=True, early_stopping=True, use_cache=True, temperature=temperature, pad_token_id=tokenizer.eos_token_id, eos_token_id=tokenizer.eos_token_id)
|
38 |
+
generated_text = tokenizer.decode(output_ids[0], skip_special_tokens=True)
|
39 |
|
40 |
return generated_text
|
41 |
|
|
|
66 |
eos_token_id=tokenizer.eos_token_id
|
67 |
)
|
68 |
|
69 |
+
generated_texts = [tokenizer.decode(output, skip_special_tokens=True) for output in output_ids]
|
70 |
|
71 |
return generated_texts
|
72 |
|