matthieumeeus97 commited on
Commit
1b97d7f
·
verified ·
1 Parent(s): fd5198f

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +7 -7
README.md CHANGED
@@ -30,7 +30,7 @@ It achieves the following results on the evaluation set:
30
  - Logits/rejected: -0.8732
31
  - Logits/chosen: -0.9594
32
 
33
- # use the model
34
 
35
  ```
36
  from transformers import AutoTokenizer, AutoModelForCausalLM
@@ -43,18 +43,18 @@ messages = [
43
  {"role": "user", "content": "Jacques brel, Willem Elsschot en Jan Jambon zitten op café. Waar zouden ze over babbelen?"},
44
  ]
45
 
46
- input_ids = new_tokenizer.apply_chat_template(
47
  messages,
48
  add_generation_prompt=True,
49
  return_tensors="pt"
50
- ).to(new_model.device)
51
 
52
  new_terminators = [
53
- new_tokenizer.eos_token_id,
54
- new_tokenizer.convert_tokens_to_ids("<|eot_id|>")
55
  ]
56
 
57
- outputs = new_model.generate(
58
  input_ids,
59
  max_new_tokens=512,
60
  eos_token_id=new_terminators,
@@ -63,7 +63,7 @@ outputs = new_model.generate(
63
  top_p=0.95,
64
  )
65
  response = outputs[0][input_ids.shape[-1]:]
66
- print(new_tokenizer.decode(response, skip_special_tokens=True))
67
 
68
  ```
69
 
 
30
  - Logits/rejected: -0.8732
31
  - Logits/chosen: -0.9594
32
 
33
+ # Use the model
34
 
35
  ```
36
  from transformers import AutoTokenizer, AutoModelForCausalLM
 
43
  {"role": "user", "content": "Jacques brel, Willem Elsschot en Jan Jambon zitten op café. Waar zouden ze over babbelen?"},
44
  ]
45
 
46
+ input_ids = tokenizer.apply_chat_template(
47
  messages,
48
  add_generation_prompt=True,
49
  return_tensors="pt"
50
+ ).to(model.device)
51
 
52
  new_terminators = [
53
+ tokenizer.eos_token_id,
54
+ tokenizer.convert_tokens_to_ids("<|eot_id|>")
55
  ]
56
 
57
+ outputs = model.generate(
58
  input_ids,
59
  max_new_tokens=512,
60
  eos_token_id=new_terminators,
 
63
  top_p=0.95,
64
  )
65
  response = outputs[0][input_ids.shape[-1]:]
66
+ print(tokenizer.decode(response, skip_special_tokens=True))
67
 
68
  ```
69