maayanorner commited on
Commit
4f3fc9b
verified
1 Parent(s): 9e51b9d

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +3 -3
README.md CHANGED
@@ -11,7 +11,7 @@ Known Issues:
11
  https://github.com/IAHLT/summarization_he
12
 
13
 
14
- ```# !pip install bitsandbytes>=0.41.3 to quantize
15
  import torch
16
  from transformers import (
17
  AutoModelForCausalLM,
@@ -20,7 +20,7 @@ from transformers import (
20
  )
21
 
22
 
23
- def predict_text(text, tokenizer, model, num_beams=4, temperature=1, max_new_tokens=512):
24
  # This text template is important.
25
  inputs = tokenizer(f'{text}\n### 住讬讻讜诐:', return_tensors="pt")
26
  in_data = inputs.input_ids.to('cuda')
@@ -58,7 +58,7 @@ tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
58
 
59
  text = '讟拽住讟 诇住讬讻讜诐'
60
 
61
- predict_text(text, max_new_tokens=512, tokenizer=tokenizer, model=model)
62
  ```
63
 
64
  # Examples:
 
11
  https://github.com/IAHLT/summarization_he
12
 
13
 
14
+ ```# install bitsandbytes>=0.41.3 to quantize
15
  import torch
16
  from transformers import (
17
  AutoModelForCausalLM,
 
20
  )
21
 
22
 
23
+ def summarize(text, tokenizer, model, num_beams=4, temperature=1, max_new_tokens=512):
24
  # This text template is important.
25
  inputs = tokenizer(f'{text}\n### 住讬讻讜诐:', return_tensors="pt")
26
  in_data = inputs.input_ids.to('cuda')
 
58
 
59
  text = '讟拽住讟 诇住讬讻讜诐'
60
 
61
+ summarize(text, max_new_tokens=512, tokenizer=tokenizer, model=model)
62
  ```
63
 
64
  # Examples: