hanbin commited on
Commit
57d8e3b
·
verified ·
1 Parent(s): b4e0029

Update Usage

Browse files
Files changed (1) hide show
  1. README.md +12 -14
README.md CHANGED
@@ -60,10 +60,10 @@ We apply tailored prompts for coding and math task:
60
 
61
 
62
  ```python
 
63
  import os
64
  from tqdm import tqdm
65
  import torch
66
- from transformers import AutoTokenizer
67
  from vllm import LLM, SamplingParams
68
  os.environ["NCCL_IGNORE_DISABLED_P2P"] = "1"
69
  os.environ["TOKENIZERS_PARALLELISM"] = "true"
@@ -82,27 +82,25 @@ def generate(question_list,model_path):
82
  completions = [[output.text for output in output_item.outputs] for output_item in outputs]
83
  return completions
84
 
85
- def make_conv_hf(question, tokenizer):
86
- # for math problem
87
- content = question + "\n\nPresent the answer in LaTex format: \\boxed{Your answer}"
88
- # for code problem
89
- # content = question + "\n\nWrite Python code to solve the problem. Present the code in \n```python\nYour code\n```\nat the end."
90
- msg = [
91
- {"role": "user", "content": content}
92
- ]
93
- chat = tokenizer.apply_chat_template(msg, tokenize=False, add_generation_prompt=True)
94
- return chat
95
-
96
  def run():
97
  model_path = "PRIME-RL/Eurus-2-7B-PRIME-Zero"
98
  all_problems = [
99
  "which number is larger? 9.11 or 9.9?"
100
  ]
101
- tokenizer = AutoTokenizer.from_pretrained(model_path)
102
- completions = generate([make_conv_hf(problem_data, tokenizer) for problem_data in all_problems],model_path)
103
  print(completions)
 
104
  if __name__ == "__main__":
105
  run()
 
 
106
  ```
107
 
108
 
 
60
 
61
 
62
  ```python
63
+
64
  import os
65
  from tqdm import tqdm
66
  import torch
 
67
  from vllm import LLM, SamplingParams
68
  os.environ["NCCL_IGNORE_DISABLED_P2P"] = "1"
69
  os.environ["TOKENIZERS_PARALLELISM"] = "true"
 
82
  completions = [[output.text for output in output_item.outputs] for output_item in outputs]
83
  return completions
84
 
85
+
86
+ def make_conv_zero(question):
87
+ question = question + "\n\nPresent the answer in LaTex format: \\boxed{Your answer}"
88
+ content = f"A conversation between User and Assistant. The user asks a question, and the Assistant solves it. The assistant first thinks about the reasoning process in the mind and then provides the user with the answer. The reasoning process and answer are enclosed within <think> </think> and <answer> </answer> tags, respectively, i.e., <think> reasoning process here </think> <answer> answer here </answer>. User: {question}. Assistant:"
89
+ print(content)
90
+ return content
91
+
 
 
 
 
92
  def run():
93
  model_path = "PRIME-RL/Eurus-2-7B-PRIME-Zero"
94
  all_problems = [
95
  "which number is larger? 9.11 or 9.9?"
96
  ]
97
+ completions = generate([make_conv_zero(problem_data) for problem_data in all_problems],model_path)
 
98
  print(completions)
99
+ # " Let's compare the numbers 9.11 and 9.9.\n\nTo determine which number is larger, we can compare them digit by digit from left to right.\n\nFirst, let's look at the digits in the ones place:\n- Both numbers have a 9 in the ones place.\n\nNext, let's look at the digits in the tenths place:\n- The number 9.11 has a 1 in the tenths place.\n- The number 9.9 has a 9 in the tenths place.\n\nSince 9 is greater than 1, we can conclude that 9.9 is greater than 9.11.\n\nSo, the larger number is \\(\\boxed{9.9}\\)."
100
  if __name__ == "__main__":
101
  run()
102
+
103
+
104
  ```
105
 
106