Commit
·
db35e01
1
Parent(s):
78e2e1c
Add temperature, top_p, and smart max_tokens defaults
Browse files- Set temperature=1.0 and top_p=1.0 as defaults (OpenAI recommended)
- Add --temperature and --top-p flags for customization
- Auto-scale max_new_tokens based on reasoning_effort:
- low: 512 tokens
- medium: 1024 tokens
- high: 2048 tokens (prevents truncation)
- Document OpenAI's sampling recommendations in README
- Users can still override with explicit --max-new-tokens
- README.md +14 -1
- gpt_oss_minimal.py +33 -10
README.md
CHANGED
@@ -28,8 +28,15 @@ hf jobs uv run --flavor l4x4 --secrets HF_TOKEN=hf_*** \
|
|
28 |
| `--prompt-column` | Column containing prompts | `prompt` |
|
29 |
| `--model-id` | Model to use | `openai/gpt-oss-20b` |
|
30 |
| `--max-samples` | Limit samples to process | None (all) |
|
31 |
-
| `--max-new-tokens` | Max tokens to generate |
|
32 |
| `--reasoning-effort` | Reasoning depth: low/medium/high | `medium` |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
33 |
|
34 |
## 💡 What You Get
|
35 |
|
@@ -102,6 +109,12 @@ The `reasoning_effort` parameter controls how much chain-of-thought reasoning th
|
|
102 |
- `medium`: Balanced reasoning (default)
|
103 |
- `high`: Detailed step-by-step reasoning
|
104 |
|
|
|
|
|
|
|
|
|
|
|
|
|
105 |
## 📚 Resources
|
106 |
|
107 |
- [Model: openai/gpt-oss-20b](https://huggingface.co/openai/gpt-oss-20b)
|
|
|
28 |
| `--prompt-column` | Column containing prompts | `prompt` |
|
29 |
| `--model-id` | Model to use | `openai/gpt-oss-20b` |
|
30 |
| `--max-samples` | Limit samples to process | None (all) |
|
31 |
+
| `--max-new-tokens` | Max tokens to generate | Auto-scales: 512/1024/2048 |
|
32 |
| `--reasoning-effort` | Reasoning depth: low/medium/high | `medium` |
|
33 |
+
| `--temperature` | Sampling temperature | `1.0` |
|
34 |
+
| `--top-p` | Top-p sampling | `1.0` |
|
35 |
+
|
36 |
+
**Note**: `max-new-tokens` auto-scales based on `reasoning-effort` if not set:
|
37 |
+
- `low`: 512 tokens
|
38 |
+
- `medium`: 1024 tokens
|
39 |
+
- `high`: 2048 tokens (prevents truncation of detailed reasoning)
|
40 |
|
41 |
## 💡 What You Get
|
42 |
|
|
|
109 |
- `medium`: Balanced reasoning (default)
|
110 |
- `high`: Detailed step-by-step reasoning
|
111 |
|
112 |
+
### Sampling Parameters
|
113 |
+
OpenAI recommends `temperature=1.0` and `top_p=1.0` as defaults for GPT OSS models:
|
114 |
+
- These settings provide good diversity without compromising quality
|
115 |
+
- The model was trained to work well with these parameters
|
116 |
+
- Adjust only if you need specific behavior (e.g., lower temperature for more deterministic output)
|
117 |
+
|
118 |
## 📚 Resources
|
119 |
|
120 |
- [Model: openai/gpt-oss-20b](https://huggingface.co/openai/gpt-oss-20b)
|
gpt_oss_minimal.py
CHANGED
@@ -60,15 +60,37 @@ def main():
|
|
60 |
parser.add_argument("--model-id", default="openai/gpt-oss-20b", help="Model to use")
|
61 |
parser.add_argument("--max-samples", type=int, help="Limit number of samples")
|
62 |
parser.add_argument(
|
63 |
-
"--max-new-tokens", type=int,
|
64 |
)
|
65 |
parser.add_argument(
|
66 |
-
"--reasoning-effort",
|
67 |
-
choices=["low", "medium", "high"],
|
68 |
default="medium",
|
69 |
-
help="Reasoning effort level (default: medium)"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
70 |
)
|
71 |
args = parser.parse_args()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
72 |
|
73 |
# Check GPU availability
|
74 |
if not torch.cuda.is_available():
|
@@ -136,20 +158,21 @@ def main():
|
|
136 |
|
137 |
# Apply chat template with reasoning_effort parameter
|
138 |
inputs = tokenizer.apply_chat_template(
|
139 |
-
messages,
|
140 |
-
add_generation_prompt=True,
|
141 |
-
return_tensors="pt",
|
142 |
return_dict=True,
|
143 |
-
reasoning_effort=args.reasoning_effort # "low", "medium", or "high"
|
144 |
).to(model.device)
|
145 |
|
146 |
-
# Generate
|
147 |
with torch.no_grad():
|
148 |
generated = model.generate(
|
149 |
**inputs,
|
150 |
max_new_tokens=args.max_new_tokens,
|
151 |
do_sample=True,
|
152 |
-
temperature=
|
|
|
153 |
)
|
154 |
|
155 |
# Decode only the generated part (excluding input)
|
|
|
60 |
parser.add_argument("--model-id", default="openai/gpt-oss-20b", help="Model to use")
|
61 |
parser.add_argument("--max-samples", type=int, help="Limit number of samples")
|
62 |
parser.add_argument(
|
63 |
+
"--max-new-tokens", type=int, help="Max tokens to generate (auto-scales with reasoning effort if not set)"
|
64 |
)
|
65 |
parser.add_argument(
|
66 |
+
"--reasoning-effort",
|
67 |
+
choices=["low", "medium", "high"],
|
68 |
default="medium",
|
69 |
+
help="Reasoning effort level (default: medium)",
|
70 |
+
)
|
71 |
+
parser.add_argument(
|
72 |
+
"--temperature",
|
73 |
+
type=float,
|
74 |
+
default=1.0,
|
75 |
+
help="Sampling temperature (default: 1.0)",
|
76 |
+
)
|
77 |
+
parser.add_argument(
|
78 |
+
"--top-p",
|
79 |
+
type=float,
|
80 |
+
default=1.0,
|
81 |
+
help="Top-p sampling (default: 1.0)",
|
82 |
)
|
83 |
args = parser.parse_args()
|
84 |
+
|
85 |
+
# Auto-scale max_new_tokens based on reasoning effort if not explicitly set
|
86 |
+
if args.max_new_tokens is None:
|
87 |
+
if args.reasoning_effort == "high":
|
88 |
+
args.max_new_tokens = 2048 # More tokens for detailed reasoning
|
89 |
+
elif args.reasoning_effort == "medium":
|
90 |
+
args.max_new_tokens = 1024
|
91 |
+
else: # low
|
92 |
+
args.max_new_tokens = 512
|
93 |
+
print(f"Auto-set max_new_tokens={args.max_new_tokens} based on reasoning_effort={args.reasoning_effort}")
|
94 |
|
95 |
# Check GPU availability
|
96 |
if not torch.cuda.is_available():
|
|
|
158 |
|
159 |
# Apply chat template with reasoning_effort parameter
|
160 |
inputs = tokenizer.apply_chat_template(
|
161 |
+
messages,
|
162 |
+
add_generation_prompt=True,
|
163 |
+
return_tensors="pt",
|
164 |
return_dict=True,
|
165 |
+
reasoning_effort=args.reasoning_effort, # "low", "medium", or "high"
|
166 |
).to(model.device)
|
167 |
|
168 |
+
# Generate with user-specified or default parameters
|
169 |
with torch.no_grad():
|
170 |
generated = model.generate(
|
171 |
**inputs,
|
172 |
max_new_tokens=args.max_new_tokens,
|
173 |
do_sample=True,
|
174 |
+
temperature=args.temperature,
|
175 |
+
top_p=args.top_p,
|
176 |
)
|
177 |
|
178 |
# Decode only the generated part (excluding input)
|