openai-oss / gpt_oss_minimal.py
davanstrien's picture
davanstrien HF Staff
Update repository references to new uv-scripts org
50a87a3
# /// script
# requires-python = ">=3.10"
# dependencies = [
# "torch",
# "transformers>=4.55.0",
# "datasets",
# "hf-xet >= 1.1.7",
# "huggingface-hub[hf_transfer]",
# "accelerate",
# ]
# ///
"""
Minimal GPT OSS generation script for HF Jobs.
Based on official HuggingFace blog recommendations.
Works on regular GPUs (L4, A10G, A100) without Flash Attention 3.
Usage:
# Quick local test
uv run gpt_oss_minimal.py \
--input-dataset davanstrien/haiku_dpo \
--output-dataset username/haiku-raw \
--prompt-column question \
--max-samples 2
# HF Jobs execution (L4x4 for proper memory)
hf jobs uv run --flavor l4x4 --secrets HF_TOKEN=hf_*** \
https://huggingface.co/datasets/uv-scripts/openai-oss/raw/main/gpt_oss_minimal.py \
--input-dataset davanstrien/haiku_dpo \
--output-dataset username/haiku-raw \
--prompt-column question
"""
import argparse
import os
import sys
from datetime import datetime
import torch
from datasets import Dataset, load_dataset
from huggingface_hub import DatasetCard, get_token, login
from transformers import AutoModelForCausalLM, AutoTokenizer
# Enable fast downloads
os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
def create_dataset_card(
input_dataset: str,
model_id: str,
num_examples: int,
reasoning_effort: str,
generation_time: str,
) -> str:
"""Create a minimal dataset card."""
return f"""---
tags:
- synthetic
- gpt-oss
- reasoning
---
# GPT OSS Generated Responses
This dataset was generated using OpenAI's GPT OSS model with reasoning channels.
## Generation Details
- **Source Dataset**: [{input_dataset}](https://huggingface.co/datasets/{input_dataset})
- **Model**: [{model_id}](https://huggingface.co/{model_id})
- **Model Collection**: [OpenAI GPT OSS Models](https://huggingface.co/collections/openai/gpt-oss-68911959590a1634ba11c7a4)
- **Number of Examples**: {num_examples:,}
- **Reasoning Effort**: {reasoning_effort}
- **Generation Date**: {generation_time}
## Dataset Structure
Each example contains:
- `prompt`: Original prompt from source dataset
- `raw_output`: Full model response with channel markers
- `model`: Model identifier
- `reasoning_effort`: Reasoning level used
## Usage
To extract the final response, look for text after `<|channel|>final<|message|>` in the raw_output.
Generated using [uv-scripts/openai-oss](https://huggingface.co/datasets/uv-scripts/openai-oss).
"""
def main():
parser = argparse.ArgumentParser(
description="Minimal GPT OSS generation for HF Jobs"
)
parser.add_argument(
"--input-dataset", required=True, help="Input dataset on HF Hub"
)
parser.add_argument(
"--output-dataset", required=True, help="Output dataset on HF Hub"
)
parser.add_argument(
"--prompt-column", default="prompt", help="Column containing prompts"
)
parser.add_argument("--model-id", default="openai/gpt-oss-20b", help="Model to use")
parser.add_argument("--max-samples", type=int, help="Limit number of samples")
parser.add_argument(
"--max-new-tokens",
type=int,
help="Max tokens to generate (auto-scales with reasoning effort if not set)",
)
parser.add_argument(
"--reasoning-effort",
choices=["low", "medium", "high"],
default="medium",
help="Reasoning effort level (default: medium)",
)
parser.add_argument(
"--temperature",
type=float,
default=1.0,
help="Sampling temperature (default: 1.0)",
)
parser.add_argument(
"--top-p",
type=float,
default=1.0,
help="Top-p sampling (default: 1.0)",
)
args = parser.parse_args()
# Auto-scale max_new_tokens based on reasoning effort if not explicitly set
if args.max_new_tokens is None:
if args.reasoning_effort == "high":
args.max_new_tokens = 2048 # More tokens for detailed reasoning
elif args.reasoning_effort == "medium":
args.max_new_tokens = 1024
else: # low
args.max_new_tokens = 512
print(
f"Auto-set max_new_tokens={args.max_new_tokens} based on reasoning_effort={args.reasoning_effort}"
)
# Check GPU availability
if not torch.cuda.is_available():
print(
"ERROR: GPU required. Use HF Jobs with --flavor a10g-small or run on GPU machine"
)
sys.exit(1)
print(f"GPU: {torch.cuda.get_device_name(0)}")
print(f"Memory: {torch.cuda.get_device_properties(0).total_memory / 1024**3:.1f}GB")
# Authenticate
token = os.environ.get("HF_TOKEN") or get_token()
if not token:
print(
"ERROR: HF_TOKEN required. Set HF_TOKEN env var or run: huggingface-cli login"
)
sys.exit(1)
login(token=token, add_to_git_credential=False)
# Load tokenizer (following official blog)
print(f"Loading tokenizer: {args.model_id}")
tokenizer = AutoTokenizer.from_pretrained(args.model_id)
# Load model (without Flash Attention 3 for compatibility)
print(f"Loading model: {args.model_id}")
print("Note: MXFP4 will auto-dequantize to bf16 on non-Hopper GPUs")
model = AutoModelForCausalLM.from_pretrained(
args.model_id,
device_map="auto",
torch_dtype="auto", # Let transformers choose optimal dtype
# NOT using: attn_implementation="kernels-community/vllm-flash-attn3"
# as it requires H100/H200 GPUs
)
print("Model loaded successfully")
# Load dataset
print(f"Loading dataset: {args.input_dataset}")
dataset = load_dataset(args.input_dataset, split="train")
if args.prompt_column not in dataset.column_names:
print(f"ERROR: Column '{args.prompt_column}' not found")
print(f"Available columns: {dataset.column_names}")
sys.exit(1)
# if args.random_sample:
# dataset.shuffle()
# print(f"Random sampling enabled. Using {args.max_samples} samples.")
# Limit samples if requested
if args.max_samples:
dataset = dataset.select(range(min(args.max_samples, len(dataset))))
print(f"Processing {len(dataset)} examples")
# Process each example
results = []
generation_start_time = datetime.now().isoformat()
for i, example in enumerate(dataset):
print(f"[{i + 1}/{len(dataset)}] Processing...")
prompt_text = example[args.prompt_column]
# Create messages (user message only, as per official examples)
messages = [{"role": "user", "content": prompt_text}]
# Apply chat template with reasoning_effort parameter
inputs = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
return_tensors="pt",
return_dict=True,
reasoning_effort=args.reasoning_effort, # "low", "medium", or "high"
).to(model.device)
# Generate with user-specified or default parameters
with torch.no_grad():
generated = model.generate(
**inputs,
max_new_tokens=args.max_new_tokens,
do_sample=True,
temperature=args.temperature,
top_p=args.top_p,
)
# Decode only the generated part (excluding input)
response = tokenizer.decode(
generated[0][inputs["input_ids"].shape[-1] :],
skip_special_tokens=False, # Keep channel markers
)
# Store raw output with channel markers
results.append(
{
"prompt": prompt_text,
"raw_output": response,
"model": args.model_id,
"reasoning_effort": args.reasoning_effort,
}
)
# Show preview of output structure
if i == 0:
print("Sample output preview (first 200 chars):")
print(response[:200])
print("...")
# Create and push dataset
print("\nCreating output dataset...")
output_dataset = Dataset.from_list(results)
print(f"Pushing to {args.output_dataset}...")
output_dataset.push_to_hub(args.output_dataset, token=token)
# Create and push dataset card
print("Creating dataset card...")
card_content = create_dataset_card(
input_dataset=args.input_dataset,
model_id=args.model_id,
num_examples=len(results),
reasoning_effort=args.reasoning_effort,
generation_time=generation_start_time,
)
card = DatasetCard(card_content)
card.push_to_hub(args.output_dataset, token=token)
print(f"\n✅ Complete!")
print(f"Dataset: https://huggingface.co/datasets/{args.output_dataset}")
print(f"\nOutput format:")
print("- prompt: Original prompt")
print("- raw_output: Full model response with channel markers")
print("- model: Model ID used")
print(f"- reasoning_effort: {args.reasoning_effort}")
print(
"\nTo extract final response, look for text after '<|channel|>final<|message|>'"
)
if __name__ == "__main__":
main()