Aananda-giri commited on
Commit
42609a3
·
verified ·
1 Parent(s): 19fa3b4

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +14 -10
README.md CHANGED
@@ -93,7 +93,7 @@ print(f'device: {device}')
93
 
94
  # Load checkpoint
95
  latest_model_checkpoint = "parameters_300m/model_pg_398000_steps.pth"
96
- checkpoint = torch.load(latest_model_checkpoint, weights_only=False)
97
  model.load_state_dict(checkpoint["model_state_dict"])
98
  ```
99
 
@@ -112,24 +112,28 @@ generate_and_print_sample(
112
 
113
  #### Advanced Text Generation
114
  ```python
115
- from previous_chapters import generate_and_print_chat
 
116
 
117
- generated_text = generate_and_print_chat(
 
118
  prompt="रामले भात",
119
  tokenizer=tokenizer,
120
  chat_tokenizer=chat_tokenizer,
121
  model=model,
 
 
122
  device=device,
123
- max_new_tokens=150,
124
- context_length=None,
125
- temperature=0.1,
126
- top_k=50,
127
- top_p=0.9,
128
  repetition_penalty=1.2,
129
- clean_the_text=True
 
130
  )
131
 
132
- print("Generated text:\n", generated_text)
133
  ```
134
 
135
 
 
93
 
94
  # Load checkpoint
95
  latest_model_checkpoint = "parameters_300m/model_pg_398000_steps.pth"
96
+ checkpoint = torch.load(latest_model_checkpoint, map_location=device, weights_only=False)
97
  model.load_state_dict(checkpoint["model_state_dict"])
98
  ```
99
 
 
112
 
113
  #### Advanced Text Generation
114
  ```python
115
+ from previous_chapters import generate_chat_optimized
116
+ import time
117
 
118
+ start_time = time.time()
119
+ output_text = generate_chat_optimized(
120
  prompt="रामले भात",
121
  tokenizer=tokenizer,
122
  chat_tokenizer=chat_tokenizer,
123
  model=model,
124
+ max_new_tokens=20,
125
+ context_size=512,
126
  device=device,
127
+ temperature=0.3,
128
+ top_k=5,
129
+ top_p=None,
130
+ eos_id=None,
 
131
  repetition_penalty=1.2,
132
+ penalize_len_below=10,
133
+ batch_size=1 # Added parameter
134
  )
135
 
136
+ print(f"time:{time.time() - start_time}\n output_text: {output_text}")
137
  ```
138
 
139