Commit
·
10ec4e7
1
Parent(s):
89351df
Update dependencies
Browse files- gpt_oss_minimal.py +55 -41
gpt_oss_minimal.py
CHANGED
@@ -2,8 +2,9 @@
|
|
2 |
# requires-python = ">=3.10"
|
3 |
# dependencies = [
|
4 |
# "torch",
|
5 |
-
# "transformers>=4.
|
6 |
# "datasets",
|
|
|
7 |
# "huggingface-hub[hf_transfer]",
|
8 |
# "accelerate",
|
9 |
# ]
|
@@ -44,38 +45,52 @@ os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
|
|
44 |
|
45 |
|
46 |
def main():
|
47 |
-
parser = argparse.ArgumentParser(
|
48 |
-
|
49 |
-
|
50 |
-
parser.add_argument(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
51 |
parser.add_argument("--model-id", default="openai/gpt-oss-20b", help="Model to use")
|
52 |
parser.add_argument("--max-samples", type=int, help="Limit number of samples")
|
53 |
-
parser.add_argument(
|
|
|
|
|
54 |
args = parser.parse_args()
|
55 |
|
56 |
# Check GPU availability
|
57 |
if not torch.cuda.is_available():
|
58 |
-
print(
|
|
|
|
|
59 |
sys.exit(1)
|
60 |
-
|
61 |
print(f"GPU: {torch.cuda.get_device_name(0)}")
|
62 |
print(f"Memory: {torch.cuda.get_device_properties(0).total_memory / 1024**3:.1f}GB")
|
63 |
|
64 |
# Authenticate
|
65 |
token = os.environ.get("HF_TOKEN") or get_token()
|
66 |
if not token:
|
67 |
-
print(
|
|
|
|
|
68 |
sys.exit(1)
|
69 |
login(token=token, add_to_git_credential=False)
|
70 |
|
71 |
# Load tokenizer (following official blog)
|
72 |
print(f"Loading tokenizer: {args.model_id}")
|
73 |
tokenizer = AutoTokenizer.from_pretrained(args.model_id)
|
74 |
-
|
75 |
# Load model (without Flash Attention 3 for compatibility)
|
76 |
print(f"Loading model: {args.model_id}")
|
77 |
print("Note: MXFP4 will auto-dequantize to bf16 on non-Hopper GPUs")
|
78 |
-
|
79 |
model = AutoModelForCausalLM.from_pretrained(
|
80 |
args.model_id,
|
81 |
device_map="auto",
|
@@ -88,60 +103,57 @@ def main():
|
|
88 |
# Load dataset
|
89 |
print(f"Loading dataset: {args.input_dataset}")
|
90 |
dataset = load_dataset(args.input_dataset, split="train")
|
91 |
-
|
92 |
if args.prompt_column not in dataset.column_names:
|
93 |
print(f"ERROR: Column '{args.prompt_column}' not found")
|
94 |
print(f"Available columns: {dataset.column_names}")
|
95 |
sys.exit(1)
|
96 |
-
|
97 |
# Limit samples if requested
|
98 |
if args.max_samples:
|
99 |
dataset = dataset.select(range(min(args.max_samples, len(dataset))))
|
100 |
-
|
101 |
print(f"Processing {len(dataset)} examples")
|
102 |
|
103 |
# Process each example
|
104 |
results = []
|
105 |
for i, example in enumerate(dataset):
|
106 |
-
print(f"[{i+1}/{len(dataset)}] Processing...")
|
107 |
-
|
108 |
prompt_text = example[args.prompt_column]
|
109 |
-
|
110 |
# Create messages (user message only, as per official examples)
|
111 |
-
messages = [
|
112 |
-
|
113 |
-
]
|
114 |
-
|
115 |
# Apply chat template (following official blog)
|
116 |
inputs = tokenizer.apply_chat_template(
|
117 |
-
messages,
|
118 |
-
add_generation_prompt=True,
|
119 |
-
return_tensors="pt",
|
120 |
-
return_dict=True
|
121 |
).to(model.device)
|
122 |
-
|
123 |
# Generate
|
124 |
with torch.no_grad():
|
125 |
generated = model.generate(
|
126 |
-
**inputs,
|
127 |
max_new_tokens=args.max_new_tokens,
|
128 |
do_sample=True,
|
129 |
temperature=0.7,
|
130 |
)
|
131 |
-
|
132 |
# Decode only the generated part (excluding input)
|
133 |
response = tokenizer.decode(
|
134 |
-
generated[0][inputs["input_ids"].shape[-1]:],
|
135 |
-
skip_special_tokens=False # Keep channel markers
|
136 |
)
|
137 |
-
|
138 |
# Store raw output with channel markers
|
139 |
-
results.append(
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
|
|
|
|
145 |
# Show preview of output structure
|
146 |
if i == 0:
|
147 |
print(f"Sample output preview (first 200 chars):")
|
@@ -151,18 +163,20 @@ def main():
|
|
151 |
# Create and push dataset
|
152 |
print("\nCreating output dataset...")
|
153 |
output_dataset = Dataset.from_list(results)
|
154 |
-
|
155 |
print(f"Pushing to {args.output_dataset}...")
|
156 |
output_dataset.push_to_hub(args.output_dataset, token=token)
|
157 |
-
|
158 |
print(f"\n✅ Complete!")
|
159 |
print(f"Dataset: https://huggingface.co/datasets/{args.output_dataset}")
|
160 |
print(f"\nOutput format:")
|
161 |
print("- prompt: Original prompt")
|
162 |
print("- raw_output: Full model response with channel markers")
|
163 |
print("- model: Model ID used")
|
164 |
-
print(
|
|
|
|
|
165 |
|
166 |
|
167 |
if __name__ == "__main__":
|
168 |
-
main()
|
|
|
2 |
# requires-python = ">=3.10"
|
3 |
# dependencies = [
|
4 |
# "torch",
|
5 |
+
# "transformers>=4.55.0",
|
6 |
# "datasets",
|
7 |
+
# "hf-xet >= 1.1.7",
|
8 |
# "huggingface-hub[hf_transfer]",
|
9 |
# "accelerate",
|
10 |
# ]
|
|
|
45 |
|
46 |
|
47 |
def main():
|
48 |
+
parser = argparse.ArgumentParser(
|
49 |
+
description="Minimal GPT OSS generation for HF Jobs"
|
50 |
+
)
|
51 |
+
parser.add_argument(
|
52 |
+
"--input-dataset", required=True, help="Input dataset on HF Hub"
|
53 |
+
)
|
54 |
+
parser.add_argument(
|
55 |
+
"--output-dataset", required=True, help="Output dataset on HF Hub"
|
56 |
+
)
|
57 |
+
parser.add_argument(
|
58 |
+
"--prompt-column", default="prompt", help="Column containing prompts"
|
59 |
+
)
|
60 |
parser.add_argument("--model-id", default="openai/gpt-oss-20b", help="Model to use")
|
61 |
parser.add_argument("--max-samples", type=int, help="Limit number of samples")
|
62 |
+
parser.add_argument(
|
63 |
+
"--max-new-tokens", type=int, default=1024, help="Max tokens to generate"
|
64 |
+
)
|
65 |
args = parser.parse_args()
|
66 |
|
67 |
# Check GPU availability
|
68 |
if not torch.cuda.is_available():
|
69 |
+
print(
|
70 |
+
"ERROR: GPU required. Use HF Jobs with --flavor a10g-small or run on GPU machine"
|
71 |
+
)
|
72 |
sys.exit(1)
|
73 |
+
|
74 |
print(f"GPU: {torch.cuda.get_device_name(0)}")
|
75 |
print(f"Memory: {torch.cuda.get_device_properties(0).total_memory / 1024**3:.1f}GB")
|
76 |
|
77 |
# Authenticate
|
78 |
token = os.environ.get("HF_TOKEN") or get_token()
|
79 |
if not token:
|
80 |
+
print(
|
81 |
+
"ERROR: HF_TOKEN required. Set HF_TOKEN env var or run: huggingface-cli login"
|
82 |
+
)
|
83 |
sys.exit(1)
|
84 |
login(token=token, add_to_git_credential=False)
|
85 |
|
86 |
# Load tokenizer (following official blog)
|
87 |
print(f"Loading tokenizer: {args.model_id}")
|
88 |
tokenizer = AutoTokenizer.from_pretrained(args.model_id)
|
89 |
+
|
90 |
# Load model (without Flash Attention 3 for compatibility)
|
91 |
print(f"Loading model: {args.model_id}")
|
92 |
print("Note: MXFP4 will auto-dequantize to bf16 on non-Hopper GPUs")
|
93 |
+
|
94 |
model = AutoModelForCausalLM.from_pretrained(
|
95 |
args.model_id,
|
96 |
device_map="auto",
|
|
|
103 |
# Load dataset
|
104 |
print(f"Loading dataset: {args.input_dataset}")
|
105 |
dataset = load_dataset(args.input_dataset, split="train")
|
106 |
+
|
107 |
if args.prompt_column not in dataset.column_names:
|
108 |
print(f"ERROR: Column '{args.prompt_column}' not found")
|
109 |
print(f"Available columns: {dataset.column_names}")
|
110 |
sys.exit(1)
|
111 |
+
|
112 |
# Limit samples if requested
|
113 |
if args.max_samples:
|
114 |
dataset = dataset.select(range(min(args.max_samples, len(dataset))))
|
115 |
+
|
116 |
print(f"Processing {len(dataset)} examples")
|
117 |
|
118 |
# Process each example
|
119 |
results = []
|
120 |
for i, example in enumerate(dataset):
|
121 |
+
print(f"[{i + 1}/{len(dataset)}] Processing...")
|
122 |
+
|
123 |
prompt_text = example[args.prompt_column]
|
124 |
+
|
125 |
# Create messages (user message only, as per official examples)
|
126 |
+
messages = [{"role": "user", "content": prompt_text}]
|
127 |
+
|
|
|
|
|
128 |
# Apply chat template (following official blog)
|
129 |
inputs = tokenizer.apply_chat_template(
|
130 |
+
messages, add_generation_prompt=True, return_tensors="pt", return_dict=True
|
|
|
|
|
|
|
131 |
).to(model.device)
|
132 |
+
|
133 |
# Generate
|
134 |
with torch.no_grad():
|
135 |
generated = model.generate(
|
136 |
+
**inputs,
|
137 |
max_new_tokens=args.max_new_tokens,
|
138 |
do_sample=True,
|
139 |
temperature=0.7,
|
140 |
)
|
141 |
+
|
142 |
# Decode only the generated part (excluding input)
|
143 |
response = tokenizer.decode(
|
144 |
+
generated[0][inputs["input_ids"].shape[-1] :],
|
145 |
+
skip_special_tokens=False, # Keep channel markers
|
146 |
)
|
147 |
+
|
148 |
# Store raw output with channel markers
|
149 |
+
results.append(
|
150 |
+
{
|
151 |
+
"prompt": prompt_text,
|
152 |
+
"raw_output": response,
|
153 |
+
"model": args.model_id,
|
154 |
+
}
|
155 |
+
)
|
156 |
+
|
157 |
# Show preview of output structure
|
158 |
if i == 0:
|
159 |
print(f"Sample output preview (first 200 chars):")
|
|
|
163 |
# Create and push dataset
|
164 |
print("\nCreating output dataset...")
|
165 |
output_dataset = Dataset.from_list(results)
|
166 |
+
|
167 |
print(f"Pushing to {args.output_dataset}...")
|
168 |
output_dataset.push_to_hub(args.output_dataset, token=token)
|
169 |
+
|
170 |
print(f"\n✅ Complete!")
|
171 |
print(f"Dataset: https://huggingface.co/datasets/{args.output_dataset}")
|
172 |
print(f"\nOutput format:")
|
173 |
print("- prompt: Original prompt")
|
174 |
print("- raw_output: Full model response with channel markers")
|
175 |
print("- model: Model ID used")
|
176 |
+
print(
|
177 |
+
"\nTo extract final response, look for text after '<|channel|>final<|message|>'"
|
178 |
+
)
|
179 |
|
180 |
|
181 |
if __name__ == "__main__":
|
182 |
+
main()
|