hugohrban commited on
Commit
6966604
·
verified ·
1 Parent(s): 5f0626f

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +9 -5
README.md CHANGED
@@ -12,17 +12,21 @@ Example usage:
12
 
13
  ```python
14
  from transformers import AutoModelForCausalLM
15
- from transformers import AutoTokenizer
 
 
 
16
  import torch
17
  import torch.nn.functional as F
18
 
19
  # load model and tokenizer
20
  model = AutoModelForCausalLM.from_pretrained("hugohrban/progen2-small-mix7", trust_remote_code=True)
21
- tokenizer = AutoTokenizer.from_pretrained("hugohrban/progen2-small-mix7", trust_remote_code=True)
 
22
 
23
  # prepare input
24
  prompt = "<|pf03668|>1MEVVIVTGMSGAGK"
25
- input_ids = torch.tensor(tokenizer.encode(prompt)).to(model.device)
26
 
27
  # forward pass
28
  logits = model(input_ids).logits
@@ -30,6 +34,6 @@ logits = model(input_ids).logits
30
  # print output probabilities
31
  next_token_logits = logits[-1, :]
32
  next_token_probs = F.softmax(next_token_logits, dim=-1)
33
- for i, prob in enumerate(next_token_probs):
34
- print(f"{tokenizer.decode(i)}: {100 * prob:.2f}%")
35
  ```
 
12
 
13
  ```python
14
  from transformers import AutoModelForCausalLM
15
+ from tokenizers import Tokenizer
16
+ # optionally use local imports
17
+ # from models.progen.modeling_progen import ProGenForCausalLM
18
+ # from models.progen.configuration_progen import ProGenConfig
19
  import torch
20
  import torch.nn.functional as F
21
 
22
  # load model and tokenizer
23
  model = AutoModelForCausalLM.from_pretrained("hugohrban/progen2-small-mix7", trust_remote_code=True)
24
+ tokenizer = Tokenizer.from_pretrained("hugohrban/progen2-small-mix7")
25
+ tokenizer.no_padding()
26
 
27
  # prepare input
28
  prompt = "<|pf03668|>1MEVVIVTGMSGAGK"
29
+ input_ids = torch.tensor(tokenizer.encode(prompt).ids).to(model.device)
30
 
31
  # forward pass
32
  logits = model(input_ids).logits
 
34
  # print output probabilities
35
  next_token_logits = logits[-1, :]
36
  next_token_probs = F.softmax(next_token_logits, dim=-1)
37
+ for i in range(tokenizer.get_vocab_size(with_added_tokens=False)):
38
+ print(f"{tokenizer.id_to_token(i)}: {round(100 * next_token_probs[i].item(), 2):.2f} %")
39
  ```