Update README.md
Browse files
README.md
CHANGED
@@ -42,7 +42,7 @@ In the table, all models are fine-tuned based on the Qwen2.5-Math-7B base model.
|
|
42 |
```python
|
43 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
44 |
|
45 |
-
model_name = "
|
46 |
device = "cuda" # the device to load the model onto
|
47 |
|
48 |
model = AutoModelForCausalLM.from_pretrained(
|
@@ -68,7 +68,7 @@ model_inputs = tokenizer([text], return_tensors="pt").to(device)
|
|
68 |
|
69 |
generated_ids = model.generate(
|
70 |
**model_inputs,
|
71 |
-
max_new_tokens=
|
72 |
)
|
73 |
generated_ids = [
|
74 |
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
|
|
|
42 |
```python
|
43 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
44 |
|
45 |
+
model_name = "SunnyLin/Qwen2.5-7B-DPO-VP"
|
46 |
device = "cuda" # the device to load the model onto
|
47 |
|
48 |
model = AutoModelForCausalLM.from_pretrained(
|
|
|
68 |
|
69 |
generated_ids = model.generate(
|
70 |
**model_inputs,
|
71 |
+
max_new_tokens=2048
|
72 |
)
|
73 |
generated_ids = [
|
74 |
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
|