|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
""" |
|
Minimal GPT OSS generation script for HF Jobs. |
|
|
|
Based on official HuggingFace blog recommendations. |
|
Works on regular GPUs (L4, A10G, A100) without Flash Attention 3. |
|
|
|
Usage: |
|
# Quick local test |
|
uv run gpt_oss_minimal.py \ |
|
--input-dataset davanstrien/haiku_dpo \ |
|
--output-dataset username/haiku-raw \ |
|
--prompt-column question \ |
|
--max-samples 2 |
|
|
|
# HF Jobs execution (L4x4 for proper memory) |
|
hf jobs uv run --flavor l4x4 --secrets HF_TOKEN=hf_*** \ |
|
https://huggingface.co/datasets/uv-scripts/openai-oss/raw/main/gpt_oss_minimal.py \ |
|
--input-dataset davanstrien/haiku_dpo \ |
|
--output-dataset username/haiku-raw \ |
|
--prompt-column question |
|
""" |
|
|
|
import argparse |
|
import os |
|
import sys |
|
from datetime import datetime |
|
|
|
import torch |
|
from datasets import Dataset, load_dataset |
|
from huggingface_hub import DatasetCard, get_token, login |
|
from transformers import AutoModelForCausalLM, AutoTokenizer |
|
|
|
|
|
os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1" |
|
|
|
|
|
def create_dataset_card( |
|
input_dataset: str, |
|
model_id: str, |
|
num_examples: int, |
|
reasoning_effort: str, |
|
generation_time: str, |
|
) -> str: |
|
"""Create a minimal dataset card.""" |
|
return f"""--- |
|
tags: |
|
- synthetic |
|
- gpt-oss |
|
- reasoning |
|
--- |
|
|
|
# GPT OSS Generated Responses |
|
|
|
This dataset was generated using OpenAI's GPT OSS model with reasoning channels. |
|
|
|
## Generation Details |
|
|
|
- **Source Dataset**: [{input_dataset}](https://huggingface.co/datasets/{input_dataset}) |
|
- **Model**: [{model_id}](https://huggingface.co/{model_id}) |
|
- **Model Collection**: [OpenAI GPT OSS Models](https://huggingface.co/collections/openai/gpt-oss-68911959590a1634ba11c7a4) |
|
- **Number of Examples**: {num_examples:,} |
|
- **Reasoning Effort**: {reasoning_effort} |
|
- **Generation Date**: {generation_time} |
|
|
|
## Dataset Structure |
|
|
|
Each example contains: |
|
- `prompt`: Original prompt from source dataset |
|
- `raw_output`: Full model response with channel markers |
|
- `model`: Model identifier |
|
- `reasoning_effort`: Reasoning level used |
|
|
|
## Usage |
|
|
|
To extract the final response, look for text after `<|channel|>final<|message|>` in the raw_output. |
|
|
|
Generated using [uv-scripts/openai-oss](https://huggingface.co/datasets/uv-scripts/openai-oss). |
|
""" |
|
|
|
|
|
def main(): |
|
parser = argparse.ArgumentParser( |
|
description="Minimal GPT OSS generation for HF Jobs" |
|
) |
|
parser.add_argument( |
|
"--input-dataset", required=True, help="Input dataset on HF Hub" |
|
) |
|
parser.add_argument( |
|
"--output-dataset", required=True, help="Output dataset on HF Hub" |
|
) |
|
parser.add_argument( |
|
"--prompt-column", default="prompt", help="Column containing prompts" |
|
) |
|
parser.add_argument("--model-id", default="openai/gpt-oss-20b", help="Model to use") |
|
parser.add_argument("--max-samples", type=int, help="Limit number of samples") |
|
parser.add_argument( |
|
"--max-new-tokens", |
|
type=int, |
|
help="Max tokens to generate (auto-scales with reasoning effort if not set)", |
|
) |
|
parser.add_argument( |
|
"--reasoning-effort", |
|
choices=["low", "medium", "high"], |
|
default="medium", |
|
help="Reasoning effort level (default: medium)", |
|
) |
|
parser.add_argument( |
|
"--temperature", |
|
type=float, |
|
default=1.0, |
|
help="Sampling temperature (default: 1.0)", |
|
) |
|
parser.add_argument( |
|
"--top-p", |
|
type=float, |
|
default=1.0, |
|
help="Top-p sampling (default: 1.0)", |
|
) |
|
args = parser.parse_args() |
|
|
|
|
|
if args.max_new_tokens is None: |
|
if args.reasoning_effort == "high": |
|
args.max_new_tokens = 2048 |
|
elif args.reasoning_effort == "medium": |
|
args.max_new_tokens = 1024 |
|
else: |
|
args.max_new_tokens = 512 |
|
print( |
|
f"Auto-set max_new_tokens={args.max_new_tokens} based on reasoning_effort={args.reasoning_effort}" |
|
) |
|
|
|
|
|
if not torch.cuda.is_available(): |
|
print( |
|
"ERROR: GPU required. Use HF Jobs with --flavor a10g-small or run on GPU machine" |
|
) |
|
sys.exit(1) |
|
|
|
print(f"GPU: {torch.cuda.get_device_name(0)}") |
|
print(f"Memory: {torch.cuda.get_device_properties(0).total_memory / 1024**3:.1f}GB") |
|
|
|
|
|
token = os.environ.get("HF_TOKEN") or get_token() |
|
if not token: |
|
print( |
|
"ERROR: HF_TOKEN required. Set HF_TOKEN env var or run: huggingface-cli login" |
|
) |
|
sys.exit(1) |
|
login(token=token, add_to_git_credential=False) |
|
|
|
|
|
print(f"Loading tokenizer: {args.model_id}") |
|
tokenizer = AutoTokenizer.from_pretrained(args.model_id) |
|
|
|
|
|
print(f"Loading model: {args.model_id}") |
|
print("Note: MXFP4 will auto-dequantize to bf16 on non-Hopper GPUs") |
|
|
|
model = AutoModelForCausalLM.from_pretrained( |
|
args.model_id, |
|
device_map="auto", |
|
torch_dtype="auto", |
|
|
|
|
|
) |
|
print("Model loaded successfully") |
|
|
|
|
|
print(f"Loading dataset: {args.input_dataset}") |
|
dataset = load_dataset(args.input_dataset, split="train") |
|
|
|
if args.prompt_column not in dataset.column_names: |
|
print(f"ERROR: Column '{args.prompt_column}' not found") |
|
print(f"Available columns: {dataset.column_names}") |
|
sys.exit(1) |
|
|
|
|
|
|
|
|
|
|
|
if args.max_samples: |
|
dataset = dataset.select(range(min(args.max_samples, len(dataset)))) |
|
|
|
print(f"Processing {len(dataset)} examples") |
|
|
|
|
|
results = [] |
|
generation_start_time = datetime.now().isoformat() |
|
|
|
for i, example in enumerate(dataset): |
|
print(f"[{i + 1}/{len(dataset)}] Processing...") |
|
|
|
prompt_text = example[args.prompt_column] |
|
|
|
|
|
messages = [{"role": "user", "content": prompt_text}] |
|
|
|
|
|
inputs = tokenizer.apply_chat_template( |
|
messages, |
|
add_generation_prompt=True, |
|
return_tensors="pt", |
|
return_dict=True, |
|
reasoning_effort=args.reasoning_effort, |
|
).to(model.device) |
|
|
|
|
|
with torch.no_grad(): |
|
generated = model.generate( |
|
**inputs, |
|
max_new_tokens=args.max_new_tokens, |
|
do_sample=True, |
|
temperature=args.temperature, |
|
top_p=args.top_p, |
|
) |
|
|
|
|
|
response = tokenizer.decode( |
|
generated[0][inputs["input_ids"].shape[-1] :], |
|
skip_special_tokens=False, |
|
) |
|
|
|
|
|
results.append( |
|
{ |
|
"prompt": prompt_text, |
|
"raw_output": response, |
|
"model": args.model_id, |
|
"reasoning_effort": args.reasoning_effort, |
|
} |
|
) |
|
|
|
|
|
if i == 0: |
|
print("Sample output preview (first 200 chars):") |
|
print(response[:200]) |
|
print("...") |
|
|
|
|
|
print("\nCreating output dataset...") |
|
output_dataset = Dataset.from_list(results) |
|
|
|
print(f"Pushing to {args.output_dataset}...") |
|
output_dataset.push_to_hub(args.output_dataset, token=token) |
|
|
|
|
|
print("Creating dataset card...") |
|
card_content = create_dataset_card( |
|
input_dataset=args.input_dataset, |
|
model_id=args.model_id, |
|
num_examples=len(results), |
|
reasoning_effort=args.reasoning_effort, |
|
generation_time=generation_start_time, |
|
) |
|
card = DatasetCard(card_content) |
|
card.push_to_hub(args.output_dataset, token=token) |
|
|
|
print(f"\n✅ Complete!") |
|
print(f"Dataset: https://huggingface.co/datasets/{args.output_dataset}") |
|
print(f"\nOutput format:") |
|
print("- prompt: Original prompt") |
|
print("- raw_output: Full model response with channel markers") |
|
print("- model: Model ID used") |
|
print(f"- reasoning_effort: {args.reasoning_effort}") |
|
print( |
|
"\nTo extract final response, look for text after '<|channel|>final<|message|>'" |
|
) |
|
|
|
|
|
if __name__ == "__main__": |
|
main() |
|
|