# /// script # requires-python = ">=3.10" # dependencies = [ # "torch", # "transformers>=4.55.0", # "datasets", # "hf-xet >= 1.1.7", # "huggingface-hub[hf_transfer]", # "accelerate", # ] # /// """ Minimal GPT OSS generation script for HF Jobs. Based on official HuggingFace blog recommendations. Works on regular GPUs (L4, A10G, A100) without Flash Attention 3. Usage: # Quick local test uv run gpt_oss_minimal.py \ --input-dataset davanstrien/haiku_dpo \ --output-dataset username/haiku-raw \ --prompt-column question \ --max-samples 2 # HF Jobs execution (A10G for $1.50/hr) hf jobs uv run --flavor a10g-small \ https://huggingface.co/datasets/uv-scripts/openai-oss/raw/main/gpt_oss_minimal.py \ --input-dataset davanstrien/haiku_dpo \ --output-dataset username/haiku-raw \ --prompt-column question """ import argparse import os import sys import torch from datasets import Dataset, load_dataset from huggingface_hub import get_token, login from transformers import AutoModelForCausalLM, AutoTokenizer # Enable fast downloads os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1" def main(): parser = argparse.ArgumentParser( description="Minimal GPT OSS generation for HF Jobs" ) parser.add_argument( "--input-dataset", required=True, help="Input dataset on HF Hub" ) parser.add_argument( "--output-dataset", required=True, help="Output dataset on HF Hub" ) parser.add_argument( "--prompt-column", default="prompt", help="Column containing prompts" ) parser.add_argument("--model-id", default="openai/gpt-oss-20b", help="Model to use") parser.add_argument("--max-samples", type=int, help="Limit number of samples") parser.add_argument( "--max-new-tokens", type=int, default=1024, help="Max tokens to generate" ) parser.add_argument( "--reasoning-effort", choices=["low", "medium", "high"], default="medium", help="Reasoning effort level (default: medium)" ) args = parser.parse_args() # Check GPU availability if not torch.cuda.is_available(): print( "ERROR: GPU required. Use HF Jobs with --flavor a10g-small or run on GPU machine" ) sys.exit(1) print(f"GPU: {torch.cuda.get_device_name(0)}") print(f"Memory: {torch.cuda.get_device_properties(0).total_memory / 1024**3:.1f}GB") # Authenticate token = os.environ.get("HF_TOKEN") or get_token() if not token: print( "ERROR: HF_TOKEN required. Set HF_TOKEN env var or run: huggingface-cli login" ) sys.exit(1) login(token=token, add_to_git_credential=False) # Load tokenizer (following official blog) print(f"Loading tokenizer: {args.model_id}") tokenizer = AutoTokenizer.from_pretrained(args.model_id) # Load model (without Flash Attention 3 for compatibility) print(f"Loading model: {args.model_id}") print("Note: MXFP4 will auto-dequantize to bf16 on non-Hopper GPUs") model = AutoModelForCausalLM.from_pretrained( args.model_id, device_map="auto", torch_dtype="auto", # Let transformers choose optimal dtype # NOT using: attn_implementation="kernels-community/vllm-flash-attn3" # as it requires H100/H200 GPUs ) print("Model loaded successfully") # Load dataset print(f"Loading dataset: {args.input_dataset}") dataset = load_dataset(args.input_dataset, split="train") if args.prompt_column not in dataset.column_names: print(f"ERROR: Column '{args.prompt_column}' not found") print(f"Available columns: {dataset.column_names}") sys.exit(1) # if args.random_sample: # dataset.shuffle() # print(f"Random sampling enabled. Using {args.max_samples} samples.") # Limit samples if requested if args.max_samples: dataset = dataset.select(range(min(args.max_samples, len(dataset)))) print(f"Processing {len(dataset)} examples") # Process each example results = [] for i, example in enumerate(dataset): print(f"[{i + 1}/{len(dataset)}] Processing...") prompt_text = example[args.prompt_column] # Create messages (user message only, as per official examples) messages = [{"role": "user", "content": prompt_text}] # Apply chat template with reasoning_effort parameter inputs = tokenizer.apply_chat_template( messages, add_generation_prompt=True, return_tensors="pt", return_dict=True, reasoning_effort=args.reasoning_effort # "low", "medium", or "high" ).to(model.device) # Generate with torch.no_grad(): generated = model.generate( **inputs, max_new_tokens=args.max_new_tokens, do_sample=True, temperature=0.7, ) # Decode only the generated part (excluding input) response = tokenizer.decode( generated[0][inputs["input_ids"].shape[-1] :], skip_special_tokens=False, # Keep channel markers ) # Store raw output with channel markers results.append( { "prompt": prompt_text, "raw_output": response, "model": args.model_id, "reasoning_effort": args.reasoning_effort, } ) # Show preview of output structure if i == 0: print("Sample output preview (first 200 chars):") print(response[:200]) print("...") # Create and push dataset print("\nCreating output dataset...") output_dataset = Dataset.from_list(results) print(f"Pushing to {args.output_dataset}...") output_dataset.push_to_hub(args.output_dataset, token=token) print(f"\n✅ Complete!") print(f"Dataset: https://huggingface.co/datasets/{args.output_dataset}") print(f"\nOutput format:") print("- prompt: Original prompt") print("- raw_output: Full model response with channel markers") print("- model: Model ID used") print(f"- reasoning_effort: {args.reasoning_effort}") print( "\nTo extract final response, look for text after '<|channel|>final<|message|>'" ) if __name__ == "__main__": main()