davanstrien HF Staff commited on
Commit
f5e30d5
·
1 Parent(s): db35e01

Add automatic dataset card generation

Browse files

- Creates minimal dataset card with generation details
- Documents dataset structure and usage
- Includes source dataset, model, and generation parameters
- Auto-pushed with dataset to HuggingFace Hub

Files changed (2) hide show
  1. gpt_oss_minimal.py +66 -4
  2. gpt_oss_transformers.py +73 -163
gpt_oss_minimal.py CHANGED
@@ -37,13 +37,57 @@ import sys
37
 
38
  import torch
39
  from datasets import Dataset, load_dataset
40
- from huggingface_hub import get_token, login
41
  from transformers import AutoModelForCausalLM, AutoTokenizer
 
42
 
43
  # Enable fast downloads
44
  os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
45
 
46
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
47
  def main():
48
  parser = argparse.ArgumentParser(
49
  description="Minimal GPT OSS generation for HF Jobs"
@@ -60,7 +104,9 @@ def main():
60
  parser.add_argument("--model-id", default="openai/gpt-oss-20b", help="Model to use")
61
  parser.add_argument("--max-samples", type=int, help="Limit number of samples")
62
  parser.add_argument(
63
- "--max-new-tokens", type=int, help="Max tokens to generate (auto-scales with reasoning effort if not set)"
 
 
64
  )
65
  parser.add_argument(
66
  "--reasoning-effort",
@@ -81,7 +127,7 @@ def main():
81
  help="Top-p sampling (default: 1.0)",
82
  )
83
  args = parser.parse_args()
84
-
85
  # Auto-scale max_new_tokens based on reasoning effort if not explicitly set
86
  if args.max_new_tokens is None:
87
  if args.reasoning_effort == "high":
@@ -90,7 +136,9 @@ def main():
90
  args.max_new_tokens = 1024
91
  else: # low
92
  args.max_new_tokens = 512
93
- print(f"Auto-set max_new_tokens={args.max_new_tokens} based on reasoning_effort={args.reasoning_effort}")
 
 
94
 
95
  # Check GPU availability
96
  if not torch.cuda.is_available():
@@ -148,6 +196,8 @@ def main():
148
 
149
  # Process each example
150
  results = []
 
 
151
  for i, example in enumerate(dataset):
152
  print(f"[{i + 1}/{len(dataset)}] Processing...")
153
 
@@ -203,6 +253,18 @@ def main():
203
 
204
  print(f"Pushing to {args.output_dataset}...")
205
  output_dataset.push_to_hub(args.output_dataset, token=token)
 
 
 
 
 
 
 
 
 
 
 
 
206
 
207
  print(f"\n✅ Complete!")
208
  print(f"Dataset: https://huggingface.co/datasets/{args.output_dataset}")
 
37
 
38
  import torch
39
  from datasets import Dataset, load_dataset
40
+ from huggingface_hub import DatasetCard, get_token, login
41
  from transformers import AutoModelForCausalLM, AutoTokenizer
42
+ from datetime import datetime
43
 
44
  # Enable fast downloads
45
  os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
46
 
47
 
48
+ def create_dataset_card(
49
+ input_dataset: str,
50
+ model_id: str,
51
+ num_examples: int,
52
+ reasoning_effort: str,
53
+ generation_time: str,
54
+ ) -> str:
55
+ """Create a minimal dataset card."""
56
+ return f"""---
57
+ tags:
58
+ - synthetic
59
+ - gpt-oss
60
+ - reasoning
61
+ ---
62
+
63
+ # GPT OSS Generated Responses
64
+
65
+ This dataset was generated using OpenAI's GPT OSS model with reasoning channels.
66
+
67
+ ## Generation Details
68
+
69
+ - **Source Dataset**: [{input_dataset}](https://huggingface.co/datasets/{input_dataset})
70
+ - **Model**: [{model_id}](https://huggingface.co/{model_id})
71
+ - **Number of Examples**: {num_examples:,}
72
+ - **Reasoning Effort**: {reasoning_effort}
73
+ - **Generation Date**: {generation_time}
74
+
75
+ ## Dataset Structure
76
+
77
+ Each example contains:
78
+ - `prompt`: Original prompt from source dataset
79
+ - `raw_output`: Full model response with channel markers
80
+ - `model`: Model identifier
81
+ - `reasoning_effort`: Reasoning level used
82
+
83
+ ## Usage
84
+
85
+ To extract the final response, look for text after `<|channel|>final<|message|>` in the raw_output.
86
+
87
+ Generated using [davanstrien/openai-oss](https://huggingface.co/datasets/davanstrien/openai-oss).
88
+ """
89
+
90
+
91
  def main():
92
  parser = argparse.ArgumentParser(
93
  description="Minimal GPT OSS generation for HF Jobs"
 
104
  parser.add_argument("--model-id", default="openai/gpt-oss-20b", help="Model to use")
105
  parser.add_argument("--max-samples", type=int, help="Limit number of samples")
106
  parser.add_argument(
107
+ "--max-new-tokens",
108
+ type=int,
109
+ help="Max tokens to generate (auto-scales with reasoning effort if not set)",
110
  )
111
  parser.add_argument(
112
  "--reasoning-effort",
 
127
  help="Top-p sampling (default: 1.0)",
128
  )
129
  args = parser.parse_args()
130
+
131
  # Auto-scale max_new_tokens based on reasoning effort if not explicitly set
132
  if args.max_new_tokens is None:
133
  if args.reasoning_effort == "high":
 
136
  args.max_new_tokens = 1024
137
  else: # low
138
  args.max_new_tokens = 512
139
+ print(
140
+ f"Auto-set max_new_tokens={args.max_new_tokens} based on reasoning_effort={args.reasoning_effort}"
141
+ )
142
 
143
  # Check GPU availability
144
  if not torch.cuda.is_available():
 
196
 
197
  # Process each example
198
  results = []
199
+ generation_start_time = datetime.now().isoformat()
200
+
201
  for i, example in enumerate(dataset):
202
  print(f"[{i + 1}/{len(dataset)}] Processing...")
203
 
 
253
 
254
  print(f"Pushing to {args.output_dataset}...")
255
  output_dataset.push_to_hub(args.output_dataset, token=token)
256
+
257
+ # Create and push dataset card
258
+ print("Creating dataset card...")
259
+ card_content = create_dataset_card(
260
+ input_dataset=args.input_dataset,
261
+ model_id=args.model_id,
262
+ num_examples=len(results),
263
+ reasoning_effort=args.reasoning_effort,
264
+ generation_time=generation_start_time,
265
+ )
266
+ card = DatasetCard(card_content)
267
+ card.push_to_hub(args.output_dataset, token=token)
268
 
269
  print(f"\n✅ Complete!")
270
  print(f"Dataset: https://huggingface.co/datasets/{args.output_dataset}")
gpt_oss_transformers.py CHANGED
@@ -3,42 +3,39 @@
3
  # dependencies = [
4
  # "datasets",
5
  # "huggingface-hub[hf_transfer]",
6
- # "hf-xet >= 1.1.7",
7
  # "torch",
8
- # "transformers>=4.55.0",
9
  # "tqdm",
10
  # "accelerate",
11
  # ]
12
  # ///
13
  """
14
- Generate responses with transparent reasoning using OpenAI's GPT OSS models.
15
 
16
- This implementation works on regular GPUs (L4, A100, A10G, T4) without requiring H100s.
17
- The models automatically dequantize MXFP4 to bf16 when needed, making them accessible
18
- on standard datacenter hardware.
19
-
20
- Key features:
21
- - Works on regular GPUs without special hardware
22
- - Extracts reasoning from analysis/commentary channels
23
- - Handles the simplified channel output format
24
- - No Flash Attention 3 or special kernels needed
25
 
26
  Example usage:
27
- # Quick test with a single prompt
28
- uv run gpt_oss_transformers.py --prompt "Write a haiku about mountains"
29
-
30
  # Generate haiku with reasoning
31
  uv run gpt_oss_transformers.py \\
32
  --input-dataset davanstrien/haiku_dpo \\
33
  --output-dataset username/haiku-reasoning \\
34
  --prompt-column question
35
 
36
- # HF Jobs execution (A10G for $1.50/hr)
 
 
 
 
 
 
 
 
37
  hf jobs uv run --flavor a10g-small \\
38
  https://huggingface.co/datasets/uv-scripts/openai-oss/raw/main/gpt_oss_transformers.py \\
39
- --input-dataset davanstrien/haiku_dpo \\
40
- --output-dataset username/haiku-reasoning \\
41
- --prompt-column question
42
  """
43
 
44
  import argparse
@@ -91,79 +88,34 @@ def parse_channels(raw_output: str) -> Dict[str, str]:
91
  """
92
  Extract think/content from GPT OSS channel-based output.
93
 
94
- The actual output format is simpler than expected:
95
- analysisREASONING_TEXTassistantfinalRESPONSE_TEXT
96
-
97
- Sometimes includes commentary channel:
98
- commentaryMETA_TEXTanalysisREASONING_TEXTassistantfinalRESPONSE_TEXT
99
  """
100
- result = {"think": "", "content": "", "raw_output": raw_output}
101
-
102
- # Clean up the text - remove system prompt if present
103
- if "user" in raw_output:
104
- # Take everything after the last user prompt
105
- parts = raw_output.split("user")
106
- if len(parts) > 1:
107
- text = parts[-1]
108
- # Find where the assistant response starts
109
- for marker in ["analysis", "commentary", "assistant"]:
110
- if marker in text:
111
- idx = text.find(marker)
112
- if idx > 0:
113
- text = text[idx:]
114
- raw_output = text
115
- break
116
- else:
117
- text = raw_output
118
-
119
- # Extract reasoning (analysis and/or commentary)
120
- reasoning_parts = []
121
 
122
- # Try to extract analysis
123
- if "analysis" in text:
124
- match = re.search(
125
- r"analysis(.*?)(?:commentary|assistantfinal|final|$)", text, re.DOTALL
126
- )
127
- if match:
128
- reasoning_parts.append(("Analysis", match.group(1).strip()))
129
-
130
- # Try to extract commentary
131
- if "commentary" in text:
132
- match = re.search(
133
- r"commentary(.*?)(?:analysis|assistantfinal|final|$)", text, re.DOTALL
134
- )
135
- if match:
136
- reasoning_parts.append(("Commentary", match.group(1).strip()))
137
-
138
- # Combine reasoning
139
- if reasoning_parts:
140
- result["think"] = "\n\n".join(
141
- f"[{label}] {content}" for label, content in reasoning_parts
142
- )
143
 
144
- # Extract final response
145
- if "assistantfinal" in text:
146
- parts = text.split("assistantfinal")
147
- if len(parts) > 1:
148
- result["content"] = parts[-1].strip()
149
- elif "final" in text:
150
- # Fallback - look for "final" keyword
151
- parts = text.split("final")
152
- if len(parts) > 1:
153
- result["content"] = parts[-1].strip()
154
-
155
- # Clean up any remaining tokens
156
- for key in ["think", "content"]:
157
- result[key] = result[key].replace("<|end|>", "").replace("<|return|>", "")
158
- result[key] = (
159
- result[key].replace("<|message|>", "").replace("assistant", "").strip()
160
- )
161
 
162
  # If no channels found, treat entire output as content
163
- if not result["think"] and not result["content"]:
164
- result["content"] = raw_output.strip()
165
 
166
- return result
167
 
168
 
169
  def create_dataset_card(
@@ -274,75 +226,40 @@ def main(
274
  logger.info("HuggingFace token found, authenticating...")
275
  login(token=HF_TOKEN)
276
 
277
- # Load tokenizer (always use padding_side="left" for generation)
278
  logger.info(f"Loading tokenizer: {model_id}")
279
  tokenizer = AutoTokenizer.from_pretrained(
280
- model_id,
281
- padding_side="left", # Always use left padding for generation
282
  )
283
 
284
  # Add padding token if needed
285
  if tokenizer.pad_token is None:
286
  tokenizer.pad_token = tokenizer.eos_token
287
 
288
- # Model loading configuration based on OpenAI cookbook
289
- # For 20B model, standard auto device map works
290
- # For 120B model, use tensor parallel planning
291
- if "120b" in model_id:
292
- model_kwargs = {
293
- "tp_plan": "auto",
294
- "enable_expert_parallel": True,
295
- }
296
- else:
297
- model_kwargs = {
298
- "device_map": "auto",
299
- }
300
 
301
  # Load model
302
  logger.info(f"Loading model: {model_id}")
303
- logger.info("Using standard configuration (no Flash Attention 3 needed)")
304
-
305
- # Note about MXFP4
306
- logger.info("Note: MXFP4 will auto-dequantize to bf16 on non-Hopper GPUs")
307
-
308
- # Check available GPU memory
309
- if num_gpus > 0:
310
- gpu_memory = torch.cuda.get_device_properties(0).total_memory / 1024**3
311
- if gpu_memory < 40 and "20b" in model_id.lower():
312
- logger.info(
313
- f"GPU has {gpu_memory:.1f}GB. 20B model needs ~40GB when dequantized"
314
- )
315
- logger.info("Model will still load but may use CPU offloading if needed")
316
 
317
  try:
318
- # Load with standard configuration (no Flash Attention 3)
319
- # This works on L4, A100, A10G, T4 GPUs
320
  model = AutoModelForCausalLM.from_pretrained(
321
  model_id,
322
- torch_dtype=torch.bfloat16, # Can also use "auto"
323
- # DO NOT USE: attn_implementation="kernels-community/vllm-flash-attn3"
324
- **model_kwargs,
325
  )
326
  model.eval()
327
- logger.info("Successfully loaded model")
328
-
329
- # Report memory usage
330
- if torch.cuda.is_available():
331
- memory_gb = torch.cuda.memory_allocated() / 1024**3
332
- logger.info(f"GPU memory used: {memory_gb:.1f}GB")
333
-
334
- except torch.cuda.OutOfMemoryError as e:
335
- logger.error(f"Out of memory error: {e}")
336
- logger.error("\nMemory requirements:")
337
- logger.error("- 20B model: ~40GB VRAM (use A100-40GB or 2xL4)")
338
- logger.error("- 120B model: ~240GB VRAM (use 4xA100-80GB)")
339
- logger.error("\nFor HF Jobs, try:")
340
- logger.error("- 20B: --flavor a10g-large or a100-large")
341
- logger.error("- 120B: --flavor 4xa100")
342
- sys.exit(1)
343
  except Exception as e:
344
- logger.error(f"Error loading model: {e}")
345
- sys.exit(1)
 
 
 
 
 
 
 
346
 
347
  # Generation configuration
348
  generation_config = GenerationConfig(
@@ -375,36 +292,29 @@ def main(
375
  prompts = []
376
  original_prompts = []
377
 
378
- # Get current date for system prompt
379
- from datetime import datetime
380
-
381
- current_date = datetime.now().strftime("%Y-%m-%d")
382
-
383
  for example in tqdm(dataset, desc="Preparing prompts"):
384
  prompt_text = example[prompt_column]
385
  original_prompts.append(prompt_text)
386
 
387
- # Create messages with reasoning level in system prompt
388
- messages = [
389
- {
390
- "role": "system",
391
- "content": f"""You are ChatGPT, a large language model trained by OpenAI.
392
- Knowledge cutoff: 2024-06
393
- Current date: {current_date}
394
-
395
- Reasoning: {reasoning_level}
396
-
397
- # Valid channels: analysis, commentary, final. Channel must be included for every message.""",
398
- },
399
- {"role": "user", "content": prompt_text},
400
- ]
401
-
402
- # Apply chat template
403
- prompt = tokenizer.apply_chat_template(
404
- messages,
405
- add_generation_prompt=True,
406
- tokenize=False,
407
- )
408
  prompts.append(prompt)
409
 
410
  # Generate responses in batches
 
3
  # dependencies = [
4
  # "datasets",
5
  # "huggingface-hub[hf_transfer]",
 
6
  # "torch",
7
+ # "transformers>=4.45.0",
8
  # "tqdm",
9
  # "accelerate",
10
  # ]
11
  # ///
12
  """
13
+ Generate responses with transparent reasoning using OpenAI's open source GPT OSS models.
14
 
15
+ This implementation uses standard Transformers library for maximum compatibility.
16
+ The models output structured reasoning in separate channels, allowing you to
17
+ capture both the thinking process and final response.
 
 
 
 
 
 
18
 
19
  Example usage:
 
 
 
20
  # Generate haiku with reasoning
21
  uv run gpt_oss_transformers.py \\
22
  --input-dataset davanstrien/haiku_dpo \\
23
  --output-dataset username/haiku-reasoning \\
24
  --prompt-column question
25
 
26
+ # Any prompt dataset with custom settings
27
+ uv run gpt_oss_transformers.py \\
28
+ --input-dataset username/prompts \\
29
+ --output-dataset username/responses-with-reasoning \\
30
+ --prompt-column prompt \\
31
+ --reasoning-level high \\
32
+ --max-samples 100
33
+
34
+ # HF Jobs execution
35
  hf jobs uv run --flavor a10g-small \\
36
  https://huggingface.co/datasets/uv-scripts/openai-oss/raw/main/gpt_oss_transformers.py \\
37
+ --input-dataset username/prompts \\
38
+ --output-dataset username/responses-with-reasoning
 
39
  """
40
 
41
  import argparse
 
88
  """
89
  Extract think/content from GPT OSS channel-based output.
90
 
91
+ Expected format:
92
+ <|start|>assistant<|channel|>analysis<|message|>CHAIN_OF_THOUGHT<|end|>
93
+ <|start|>assistant<|channel|>final<|message|>ACTUAL_MESSAGE
 
 
94
  """
95
+ think = ""
96
+ content = ""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
97
 
98
+ # Extract analysis channel (chain of thought)
99
+ analysis_pattern = (
100
+ r"<\|start\|>assistant<\|channel\|>analysis<\|message\|>(.*?)<\|end\|>"
101
+ )
102
+ analysis_match = re.search(analysis_pattern, raw_output, re.DOTALL)
103
+ if analysis_match:
104
+ think = analysis_match.group(1).strip()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
105
 
106
+ # Extract final channel (user-facing content)
107
+ final_pattern = (
108
+ r"<\|start\|>assistant<\|channel\|>final<\|message\|>(.*?)(?:<\|end\|>|$)"
109
+ )
110
+ final_match = re.search(final_pattern, raw_output, re.DOTALL)
111
+ if final_match:
112
+ content = final_match[1].strip()
 
 
 
 
 
 
 
 
 
 
113
 
114
  # If no channels found, treat entire output as content
115
+ if not think and not content:
116
+ content = raw_output.strip()
117
 
118
+ return {"think": think, "content": content, "raw_output": raw_output}
119
 
120
 
121
  def create_dataset_card(
 
226
  logger.info("HuggingFace token found, authenticating...")
227
  login(token=HF_TOKEN)
228
 
229
+ # Load tokenizer
230
  logger.info(f"Loading tokenizer: {model_id}")
231
  tokenizer = AutoTokenizer.from_pretrained(
232
+ model_id, padding_side="left" if "120b" in model_id else "right"
 
233
  )
234
 
235
  # Add padding token if needed
236
  if tokenizer.pad_token is None:
237
  tokenizer.pad_token = tokenizer.eos_token
238
 
239
+ # Model loading configuration
240
+ device_map = {"tp_plan": "auto"} if "120b" in model_id else "auto"
 
 
 
 
 
 
 
 
 
 
241
 
242
  # Load model
243
  logger.info(f"Loading model: {model_id}")
244
+ logger.info("This may take a few minutes for large models...")
 
 
 
 
 
 
 
 
 
 
 
 
245
 
246
  try:
 
 
247
  model = AutoModelForCausalLM.from_pretrained(
248
  model_id,
249
+ torch_dtype=torch.bfloat16,
250
+ **device_map,
 
251
  )
252
  model.eval()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
253
  except Exception as e:
254
+ logger.error(f"Failed to load model: {e}")
255
+ logger.error("Trying with default configuration...")
256
+ # Fallback to simpler loading
257
+ model = AutoModelForCausalLM.from_pretrained(
258
+ model_id,
259
+ torch_dtype="auto",
260
+ device_map="auto",
261
+ )
262
+ model.eval()
263
 
264
  # Generation configuration
265
  generation_config = GenerationConfig(
 
292
  prompts = []
293
  original_prompts = []
294
 
 
 
 
 
 
295
  for example in tqdm(dataset, desc="Preparing prompts"):
296
  prompt_text = example[prompt_column]
297
  original_prompts.append(prompt_text)
298
 
299
+ # Create message format (using user role only as per documentation)
300
+ messages = [{"role": "user", "content": prompt_text}]
301
+
302
+ # Apply chat template with reasoning effort
303
+ try:
304
+ prompt = tokenizer.apply_chat_template(
305
+ messages,
306
+ reasoning_effort=reasoning_level,
307
+ add_generation_prompt=True,
308
+ tokenize=False,
309
+ )
310
+ except TypeError:
311
+ # Fallback if reasoning_effort parameter not supported
312
+ logger.warning(
313
+ "reasoning_effort parameter not supported, using standard template"
314
+ )
315
+ prompt = tokenizer.apply_chat_template(
316
+ messages, add_generation_prompt=True, tokenize=False
317
+ )
 
 
318
  prompts.append(prompt)
319
 
320
  # Generate responses in batches