davanstrien HF Staff commited on
Commit
bb33774
·
1 Parent(s): 61f5a7b

Add minimal GPT OSS script for HF Jobs

Browse files

- Simplified version without channel parsing
- Stores raw outputs only
- Optimized for reliability on HF Jobs
- Works on regular GPUs (L4, A10G, A100)
- Based on official HF blog recommendations

Files changed (1) hide show
  1. gpt_oss_minimal.py +168 -0
gpt_oss_minimal.py ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # /// script
2
+ # requires-python = ">=3.10"
3
+ # dependencies = [
4
+ # "torch",
5
+ # "transformers>=4.45.0",
6
+ # "datasets",
7
+ # "huggingface-hub[hf_transfer]",
8
+ # "accelerate",
9
+ # ]
10
+ # ///
11
+ """
12
+ Minimal GPT OSS generation script for HF Jobs.
13
+
14
+ Based on official HuggingFace blog recommendations.
15
+ Works on regular GPUs (L4, A10G, A100) without Flash Attention 3.
16
+
17
+ Usage:
18
+ # Quick local test
19
+ uv run gpt_oss_minimal.py \
20
+ --input-dataset davanstrien/haiku_dpo \
21
+ --output-dataset username/haiku-raw \
22
+ --prompt-column question \
23
+ --max-samples 2
24
+
25
+ # HF Jobs execution (A10G for $1.50/hr)
26
+ hf jobs uv run --flavor a10g-small \
27
+ https://huggingface.co/datasets/uv-scripts/openai-oss/raw/main/gpt_oss_minimal.py \
28
+ --input-dataset davanstrien/haiku_dpo \
29
+ --output-dataset username/haiku-raw \
30
+ --prompt-column question
31
+ """
32
+
33
+ import argparse
34
+ import os
35
+ import sys
36
+
37
+ import torch
38
+ from datasets import Dataset, load_dataset
39
+ from huggingface_hub import get_token, login
40
+ from transformers import AutoModelForCausalLM, AutoTokenizer
41
+
42
+ # Enable fast downloads
43
+ os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
44
+
45
+
46
+ def main():
47
+ parser = argparse.ArgumentParser(description="Minimal GPT OSS generation for HF Jobs")
48
+ parser.add_argument("--input-dataset", required=True, help="Input dataset on HF Hub")
49
+ parser.add_argument("--output-dataset", required=True, help="Output dataset on HF Hub")
50
+ parser.add_argument("--prompt-column", default="prompt", help="Column containing prompts")
51
+ parser.add_argument("--model-id", default="openai/gpt-oss-20b", help="Model to use")
52
+ parser.add_argument("--max-samples", type=int, help="Limit number of samples")
53
+ parser.add_argument("--max-new-tokens", type=int, default=1024, help="Max tokens to generate")
54
+ args = parser.parse_args()
55
+
56
+ # Check GPU availability
57
+ if not torch.cuda.is_available():
58
+ print("ERROR: GPU required. Use HF Jobs with --flavor a10g-small or run on GPU machine")
59
+ sys.exit(1)
60
+
61
+ print(f"GPU: {torch.cuda.get_device_name(0)}")
62
+ print(f"Memory: {torch.cuda.get_device_properties(0).total_memory / 1024**3:.1f}GB")
63
+
64
+ # Authenticate
65
+ token = os.environ.get("HF_TOKEN") or get_token()
66
+ if not token:
67
+ print("ERROR: HF_TOKEN required. Set HF_TOKEN env var or run: huggingface-cli login")
68
+ sys.exit(1)
69
+ login(token=token, add_to_git_credential=False)
70
+
71
+ # Load tokenizer (following official blog)
72
+ print(f"Loading tokenizer: {args.model_id}")
73
+ tokenizer = AutoTokenizer.from_pretrained(args.model_id)
74
+
75
+ # Load model (without Flash Attention 3 for compatibility)
76
+ print(f"Loading model: {args.model_id}")
77
+ print("Note: MXFP4 will auto-dequantize to bf16 on non-Hopper GPUs")
78
+
79
+ model = AutoModelForCausalLM.from_pretrained(
80
+ args.model_id,
81
+ device_map="auto",
82
+ torch_dtype="auto", # Let transformers choose optimal dtype
83
+ # NOT using: attn_implementation="kernels-community/vllm-flash-attn3"
84
+ # as it requires H100/H200 GPUs
85
+ )
86
+ print("Model loaded successfully")
87
+
88
+ # Load dataset
89
+ print(f"Loading dataset: {args.input_dataset}")
90
+ dataset = load_dataset(args.input_dataset, split="train")
91
+
92
+ if args.prompt_column not in dataset.column_names:
93
+ print(f"ERROR: Column '{args.prompt_column}' not found")
94
+ print(f"Available columns: {dataset.column_names}")
95
+ sys.exit(1)
96
+
97
+ # Limit samples if requested
98
+ if args.max_samples:
99
+ dataset = dataset.select(range(min(args.max_samples, len(dataset))))
100
+
101
+ print(f"Processing {len(dataset)} examples")
102
+
103
+ # Process each example
104
+ results = []
105
+ for i, example in enumerate(dataset):
106
+ print(f"[{i+1}/{len(dataset)}] Processing...")
107
+
108
+ prompt_text = example[args.prompt_column]
109
+
110
+ # Create messages (user message only, as per official examples)
111
+ messages = [
112
+ {"role": "user", "content": prompt_text}
113
+ ]
114
+
115
+ # Apply chat template (following official blog)
116
+ inputs = tokenizer.apply_chat_template(
117
+ messages,
118
+ add_generation_prompt=True,
119
+ return_tensors="pt",
120
+ return_dict=True
121
+ ).to(model.device)
122
+
123
+ # Generate
124
+ with torch.no_grad():
125
+ generated = model.generate(
126
+ **inputs,
127
+ max_new_tokens=args.max_new_tokens,
128
+ do_sample=True,
129
+ temperature=0.7,
130
+ )
131
+
132
+ # Decode only the generated part (excluding input)
133
+ response = tokenizer.decode(
134
+ generated[0][inputs["input_ids"].shape[-1]:],
135
+ skip_special_tokens=False # Keep channel markers
136
+ )
137
+
138
+ # Store raw output with channel markers
139
+ results.append({
140
+ "prompt": prompt_text,
141
+ "raw_output": response,
142
+ "model": args.model_id,
143
+ })
144
+
145
+ # Show preview of output structure
146
+ if i == 0:
147
+ print(f"Sample output preview (first 200 chars):")
148
+ print(response[:200])
149
+ print("...")
150
+
151
+ # Create and push dataset
152
+ print("\nCreating output dataset...")
153
+ output_dataset = Dataset.from_list(results)
154
+
155
+ print(f"Pushing to {args.output_dataset}...")
156
+ output_dataset.push_to_hub(args.output_dataset, token=token)
157
+
158
+ print(f"\n✅ Complete!")
159
+ print(f"Dataset: https://huggingface.co/datasets/{args.output_dataset}")
160
+ print(f"\nOutput format:")
161
+ print("- prompt: Original prompt")
162
+ print("- raw_output: Full model response with channel markers")
163
+ print("- model: Model ID used")
164
+ print("\nTo extract final response, look for text after '<|channel|>final<|message|>'")
165
+
166
+
167
+ if __name__ == "__main__":
168
+ main()