davanstrien HF Staff commited on
Commit
7fe1197
·
verified ·
1 Parent(s): 9137048

Upload gpt_oss_vllm_harmony.py

Browse files
Files changed (1) hide show
  1. gpt_oss_vllm_harmony.py +608 -0
gpt_oss_vllm_harmony.py ADDED
@@ -0,0 +1,608 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # /// script
2
+ # requires-python = ">=3.12,<3.13" # Required for vllm==0.10.1+gptoss
3
+ # dependencies = [
4
+ # "datasets",
5
+ # "huggingface-hub[hf_transfer]",
6
+ # "torch",
7
+ # "openai-harmony", # Official OpenAI harmony library
8
+ # "vllm==0.10.1+gptoss", # Specific version for GPT OSS models
9
+ # "tqdm",
10
+ # ]
11
+ #
12
+ # [[tool.uv.index]]
13
+ # url = "https://wheels.vllm.ai/gpt-oss/"
14
+ #
15
+ # [[tool.uv.index]]
16
+ # url = "https://download.pytorch.org/whl/nightly/cu128"
17
+ #
18
+ # [tool.uv]
19
+ # index-strategy = "unsafe-best-match"
20
+ # ///
21
+ """
22
+ Generate responses with transparent reasoning using OpenAI GPT OSS models with harmony format.
23
+
24
+ This script uses the official openai_harmony library for proper message formatting
25
+ and channel parsing, as recommended in the OpenAI cookbook.
26
+
27
+ Example usage:
28
+ # Generate haiku with reasoning
29
+ uv run gpt_oss_vllm_harmony.py \\
30
+ --input-dataset davanstrien/haiku_dpo \\
31
+ --output-dataset username/haiku-reasoning \\
32
+ --prompt-column question
33
+
34
+ # Any prompt dataset with custom settings
35
+ uv run gpt_oss_vllm_harmony.py \\
36
+ --input-dataset username/prompts \\
37
+ --output-dataset username/responses-with-reasoning \\
38
+ --prompt-column prompt \\
39
+ --reasoning-level high \\
40
+ --max-samples 100
41
+
42
+ # HF Jobs execution
43
+ hf jobs uv run --flavor a10g-small \\
44
+ https://huggingface.co/datasets/uv-scripts/openai-reasoning/raw/main/gpt_oss_vllm_harmony.py \\
45
+ --input-dataset username/prompts \\
46
+ --output-dataset username/responses-with-reasoning
47
+ """
48
+
49
+ import argparse
50
+ import json
51
+ import logging
52
+ import os
53
+ import sys
54
+ import time
55
+ from datetime import datetime
56
+ from typing import Dict, List, Optional
57
+
58
+ from datasets import Dataset, load_dataset
59
+ from huggingface_hub import DatasetCard, get_token, login
60
+ from openai_harmony import (
61
+ HarmonyEncodingName,
62
+ load_harmony_encoding,
63
+ Conversation,
64
+ Message,
65
+ Role,
66
+ SystemContent,
67
+ DeveloperContent,
68
+ )
69
+ from torch import cuda
70
+ from tqdm.auto import tqdm
71
+ from vllm import LLM, SamplingParams
72
+
73
+ # Enable HF Transfer for faster downloads
74
+ os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
75
+
76
+ # TODO: Change logging level back to INFO after initial testing
77
+ logging.basicConfig(
78
+ level=logging.DEBUG, format="%(asctime)s - %(levelname)s - %(message)s"
79
+ )
80
+ logger = logging.getLogger(__name__)
81
+
82
+
83
+ def check_gpu_availability() -> int:
84
+ """Check if CUDA is available and return the number of GPUs."""
85
+ if not cuda.is_available():
86
+ logger.error("CUDA is not available. This script requires a GPU.")
87
+ logger.error(
88
+ "Please run on a machine with NVIDIA GPU or use HF Jobs with GPU flavor."
89
+ )
90
+ sys.exit(1)
91
+
92
+ num_gpus = cuda.device_count()
93
+ for i in range(num_gpus):
94
+ gpu_name = cuda.get_device_name(i)
95
+ gpu_memory = cuda.get_device_properties(i).total_memory / 1024**3
96
+ logger.info(f"GPU {i}: {gpu_name} with {gpu_memory:.1f} GB memory")
97
+
98
+ return num_gpus
99
+
100
+
101
+ def parse_harmony_messages(entries: List, prompt: str) -> Dict[str, str]:
102
+ """
103
+ Parse harmony message entries into think/content structure.
104
+
105
+ The harmony format produces structured messages with different channels:
106
+ - analysis: Chain of thought reasoning
107
+ - final: User-facing response
108
+ - commentary: Tool calls (if any)
109
+ """
110
+ think = ""
111
+ content = ""
112
+
113
+ # Log what we received for debugging
114
+ logger.debug(f"[VERBOSE] Parsing {len(entries)} harmony entries")
115
+
116
+ for i, entry in enumerate(entries):
117
+ entry_dict = entry.to_dict()
118
+ logger.debug(f"[VERBOSE] Entry {i}: {json.dumps(entry_dict, indent=2)}")
119
+
120
+ # Extract content based on the message structure
121
+ if "content" in entry_dict:
122
+ if isinstance(entry_dict["content"], list):
123
+ for content_item in entry_dict["content"]:
124
+ if content_item.get("type") == "text":
125
+ text = content_item.get("text", "")
126
+ # Determine channel based on content or metadata
127
+ # This is a simplified approach - adjust based on actual harmony output
128
+ if "analysis" in str(entry_dict).lower() or i == 0:
129
+ think += text + "\n"
130
+ else:
131
+ content += text + "\n"
132
+ elif isinstance(entry_dict["content"], str):
133
+ # Simple string content
134
+ if i == 0: # First message is often reasoning
135
+ think = entry_dict["content"]
136
+ else:
137
+ content = entry_dict["content"]
138
+
139
+ # Clean up whitespace
140
+ think = think.strip()
141
+ content = content.strip()
142
+
143
+ # If we didn't parse anything, use the first entry as content
144
+ if not think and not content and entries:
145
+ content = str(entries[0].to_dict())
146
+
147
+ return {
148
+ "prompt": prompt,
149
+ "think": think,
150
+ "content": content,
151
+ "raw_output": json.dumps([e.to_dict() for e in entries], indent=2)
152
+ }
153
+
154
+
155
+ def create_dataset_card(
156
+ input_dataset: str,
157
+ model_id: str,
158
+ prompt_column: str,
159
+ reasoning_level: str,
160
+ num_examples: int,
161
+ generation_time: str,
162
+ tensor_parallel_size: int,
163
+ temperature: float,
164
+ max_tokens: int,
165
+ ) -> str:
166
+ """Create a dataset card documenting the generation process."""
167
+ return f"""---
168
+ tags:
169
+ - generated
170
+ - synthetic
171
+ - reasoning
172
+ - openai-gpt-oss
173
+ - harmony-format
174
+ ---
175
+
176
+ # Generated Responses with Reasoning (Harmony Format)
177
+
178
+ This dataset contains AI-generated responses with transparent chain-of-thought reasoning using OpenAI GPT OSS models and the official harmony format.
179
+
180
+ ## Generation Details
181
+
182
+ - **Source Dataset**: [{input_dataset}](https://huggingface.co/datasets/{input_dataset})
183
+ - **Model**: [{model_id}](https://huggingface.co/{model_id})
184
+ - **Reasoning Level**: {reasoning_level}
185
+ - **Number of Examples**: {num_examples:,}
186
+ - **Generation Date**: {generation_time}
187
+ - **Format**: Official OpenAI Harmony format
188
+
189
+ ## Dataset Structure
190
+
191
+ Each example contains:
192
+ - `prompt`: The input prompt from the source dataset
193
+ - `think`: The model's internal reasoning process (analysis channel)
194
+ - `content`: The final response (final channel)
195
+ - `raw_output`: Complete harmony format output
196
+ - `reasoning_level`: The reasoning effort level used
197
+ - `model`: Model identifier
198
+
199
+ ## Generation Script
200
+
201
+ Generated using [uv-scripts/openai-reasoning](https://huggingface.co/datasets/uv-scripts/openai-reasoning) with official harmony format.
202
+
203
+ To reproduce:
204
+ ```bash
205
+ uv run gpt_oss_vllm_harmony.py \\
206
+ --input-dataset {input_dataset} \\
207
+ --output-dataset <your-dataset> \\
208
+ --prompt-column {prompt_column} \\
209
+ --model-id {model_id} \\
210
+ --reasoning-level {reasoning_level}
211
+ ```
212
+ """
213
+
214
+
215
+ def main(
216
+ input_dataset: str,
217
+ output_dataset_hub_id: str,
218
+ prompt_column: str = "prompt",
219
+ model_id: str = "openai/gpt-oss-20b",
220
+ reasoning_level: str = "high",
221
+ max_samples: Optional[int] = None,
222
+ temperature: float = 0.7,
223
+ max_tokens: int = 512,
224
+ gpu_memory_utilization: float = 0.90,
225
+ tensor_parallel_size: Optional[int] = None,
226
+ hf_token: Optional[str] = None,
227
+ ):
228
+ """
229
+ Main generation pipeline using official harmony format.
230
+
231
+ Args:
232
+ input_dataset: Source dataset on Hugging Face Hub
233
+ output_dataset_hub_id: Where to save results on Hugging Face Hub
234
+ prompt_column: Column containing the prompts
235
+ model_id: OpenAI GPT OSS model to use
236
+ reasoning_level: Reasoning effort level (high/medium/low)
237
+ max_samples: Maximum number of samples to process
238
+ temperature: Sampling temperature
239
+ max_tokens: Maximum tokens to generate
240
+ gpu_memory_utilization: GPU memory utilization factor
241
+ tensor_parallel_size: Number of GPUs to use (auto-detect if None)
242
+ hf_token: Hugging Face authentication token
243
+ """
244
+ generation_start_time = datetime.now().isoformat()
245
+
246
+ # GPU check and configuration
247
+ num_gpus = check_gpu_availability()
248
+ if tensor_parallel_size is None:
249
+ tensor_parallel_size = num_gpus
250
+ logger.info(
251
+ f"Auto-detected {num_gpus} GPU(s), using tensor_parallel_size={tensor_parallel_size}"
252
+ )
253
+
254
+ # Authentication
255
+ HF_TOKEN = hf_token or os.environ.get("HF_TOKEN") or get_token()
256
+
257
+ if not HF_TOKEN:
258
+ logger.error("No HuggingFace token found. Please provide token via:")
259
+ logger.error(" 1. --hf-token argument")
260
+ logger.error(" 2. HF_TOKEN environment variable")
261
+ logger.error(" 3. Run 'huggingface-cli login'")
262
+ sys.exit(1)
263
+
264
+ logger.info("HuggingFace token found, authenticating...")
265
+ login(token=HF_TOKEN)
266
+
267
+ # Initialize harmony encoding
268
+ logger.info("Loading harmony encoding...")
269
+ encoding = load_harmony_encoding(HarmonyEncodingName.HARMONY_GPT_OSS)
270
+
271
+ # Get stop tokens from harmony
272
+ stop_token_ids = encoding.stop_tokens_for_assistant_action()
273
+ logger.info(f"[VERBOSE] Harmony stop token IDs: {stop_token_ids}")
274
+
275
+ # Initialize vLLM
276
+ logger.info(f"Loading model: {model_id}")
277
+ logger.info("Note: vLLM will handle batching automatically for optimal throughput")
278
+ try:
279
+ llm = LLM(
280
+ model=model_id,
281
+ tensor_parallel_size=tensor_parallel_size,
282
+ gpu_memory_utilization=gpu_memory_utilization,
283
+ trust_remote_code=True,
284
+ dtype="bfloat16",
285
+ )
286
+ logger.info("[VERBOSE] Model loaded successfully")
287
+ except Exception as e:
288
+ logger.error(f"Failed to load model with vLLM: {e}")
289
+ if "mxfp4" in str(e).lower():
290
+ logger.error("This appears to be a quantization format issue.")
291
+ logger.error("The model uses mxfp4 quantization which requires specific support.")
292
+ sys.exit(1)
293
+
294
+ # Create sampling parameters
295
+ sampling_params = SamplingParams(
296
+ temperature=temperature,
297
+ max_tokens=max_tokens,
298
+ stop_token_ids=stop_token_ids,
299
+ )
300
+ logger.info(f"[VERBOSE] Sampling params: temp={temperature}, max_tokens={max_tokens}")
301
+
302
+ # Load dataset
303
+ logger.info(f"Loading dataset: {input_dataset}")
304
+ dataset = load_dataset(input_dataset, split="train")
305
+
306
+ # Validate prompt column
307
+ if prompt_column not in dataset.column_names:
308
+ logger.error(
309
+ f"Column '{prompt_column}' not found. Available columns: {dataset.column_names}"
310
+ )
311
+ sys.exit(1)
312
+
313
+ # Limit samples if requested
314
+ if max_samples:
315
+ dataset = dataset.select(range(min(max_samples, len(dataset))))
316
+ total_examples = len(dataset)
317
+ logger.info(f"Processing {total_examples:,} examples")
318
+
319
+ # Prepare prompts using harmony format
320
+ logger.info(f"Preparing prompts with harmony format and reasoning_level={reasoning_level}...")
321
+ prefill_ids_list = []
322
+ prompts = []
323
+
324
+ for i, example in enumerate(tqdm(dataset, desc="Preparing prompts")):
325
+ prompt_text = example[prompt_column]
326
+ prompts.append(prompt_text)
327
+
328
+ # Create harmony conversation
329
+ # Inject reasoning level into developer message
330
+ developer_content = DeveloperContent.new()
331
+ if reasoning_level:
332
+ developer_content = developer_content.with_instructions(
333
+ f"Reasoning: {reasoning_level}"
334
+ )
335
+
336
+ convo = Conversation.from_messages([
337
+ Message.from_role_and_content(Role.SYSTEM, SystemContent.new()),
338
+ Message.from_role_and_content(Role.DEVELOPER, developer_content),
339
+ Message.from_role_and_content(Role.USER, prompt_text),
340
+ ])
341
+
342
+ # Render to token IDs
343
+ prefill_ids = encoding.render_conversation_for_completion(convo, Role.ASSISTANT)
344
+ prefill_ids_list.append(prefill_ids)
345
+
346
+ # Log first few examples
347
+ if i < 10:
348
+ logger.info(f"[VERBOSE] Example {i} original text: {prompt_text[:200]}...")
349
+ logger.info(f"[VERBOSE] Example {i} prefill length: {len(prefill_ids)} tokens")
350
+
351
+ # Generate responses with vLLM
352
+ logger.info(f"Starting generation for {len(prefill_ids_list):,} prompts...")
353
+ logger.info("[VERBOSE] Using prompt_token_ids for generation")
354
+
355
+ start_time = time.time()
356
+ outputs = llm.generate(
357
+ prompt_token_ids=prefill_ids_list,
358
+ sampling_params=sampling_params,
359
+ )
360
+ end_time = time.time()
361
+
362
+ generation_time = end_time - start_time
363
+ logger.info(f"\n[VERBOSE] Generation Performance Metrics:")
364
+ logger.info(f"[VERBOSE] - Total time: {generation_time:.2f} seconds")
365
+ logger.info(f"[VERBOSE] - Throughput: {len(outputs) / generation_time:.2f} prompts/second")
366
+ logger.info(f"[VERBOSE] - Average time per prompt: {generation_time / len(outputs):.2f} seconds")
367
+
368
+ # Parse outputs using harmony format
369
+ logger.info("Parsing generated outputs with harmony format...")
370
+ results = []
371
+
372
+ # Track statistics
373
+ parse_stats = {"success": 0, "empty": 0, "error": 0}
374
+
375
+ for i, output in enumerate(tqdm(outputs, desc="Parsing outputs")):
376
+ gen = output.outputs[0]
377
+ text = gen.text
378
+ output_tokens = gen.token_ids
379
+
380
+ logger.debug(f"[VERBOSE] Output {i}: {len(output_tokens)} tokens, {len(text)} chars")
381
+
382
+ try:
383
+ # Parse with harmony
384
+ entries = encoding.parse_messages_from_completion_tokens(output_tokens, Role.ASSISTANT)
385
+
386
+ # Convert to our format
387
+ parsed = parse_harmony_messages(entries, prompts[i])
388
+
389
+ if parsed["think"] or parsed["content"]:
390
+ parse_stats["success"] += 1
391
+ else:
392
+ parse_stats["empty"] += 1
393
+
394
+ # Verbose logging for first 10 examples
395
+ if i < 10:
396
+ logger.info(f"\n[VERBOSE] ========== Example {i} Output ==========")
397
+ logger.info(f"[VERBOSE] Original prompt: {prompts[i][:200]}...")
398
+ logger.info(f"[VERBOSE] Raw text output: {text}")
399
+ logger.info(f"[VERBOSE] Harmony entries: {len(entries)}")
400
+ for j, entry in enumerate(entries):
401
+ logger.info(f"[VERBOSE] Entry {j}: {json.dumps(entry.to_dict(), indent=2)}")
402
+ logger.info(f"[VERBOSE] Parsed think ({len(parsed['think'])} chars): {parsed['think'][:500]}...")
403
+ logger.info(f"[VERBOSE] Parsed content ({len(parsed['content'])} chars): {parsed['content'][:500]}...")
404
+ logger.info(f"[VERBOSE] ====================================\n")
405
+
406
+ except Exception as e:
407
+ logger.error(f"[VERBOSE] Error parsing output {i}: {e}")
408
+ parse_stats["error"] += 1
409
+ # Fallback: use raw text
410
+ parsed = {
411
+ "prompt": prompts[i],
412
+ "think": "",
413
+ "content": text,
414
+ "raw_output": text
415
+ }
416
+
417
+ result = {
418
+ "prompt": parsed["prompt"],
419
+ "think": parsed["think"],
420
+ "content": parsed["content"],
421
+ "raw_output": parsed["raw_output"],
422
+ "reasoning_level": reasoning_level,
423
+ "model": model_id,
424
+ }
425
+ results.append(result)
426
+
427
+ # Log parsing statistics
428
+ logger.info(f"\n[VERBOSE] Parsing Statistics:")
429
+ logger.info(f"[VERBOSE] - Successfully parsed: {parse_stats['success']} ({parse_stats['success']/len(outputs)*100:.1f}%)")
430
+ logger.info(f"[VERBOSE] - Empty results: {parse_stats['empty']} ({parse_stats['empty']/len(outputs)*100:.1f}%)")
431
+ logger.info(f"[VERBOSE] - Parse errors: {parse_stats['error']} ({parse_stats['error']/len(outputs)*100:.1f}%)")
432
+
433
+ # Create dataset
434
+ logger.info("Creating output dataset...")
435
+ output_dataset = Dataset.from_list(results)
436
+
437
+ # Create dataset card
438
+ logger.info("Creating dataset card...")
439
+ card_content = create_dataset_card(
440
+ input_dataset=input_dataset,
441
+ model_id=model_id,
442
+ prompt_column=prompt_column,
443
+ reasoning_level=reasoning_level,
444
+ num_examples=total_examples,
445
+ generation_time=generation_start_time,
446
+ tensor_parallel_size=tensor_parallel_size,
447
+ temperature=temperature,
448
+ max_tokens=max_tokens,
449
+ )
450
+
451
+ # Push to hub
452
+ logger.info(f"Pushing dataset to: {output_dataset_hub_id}")
453
+ output_dataset.push_to_hub(output_dataset_hub_id, token=HF_TOKEN)
454
+
455
+ # Push dataset card
456
+ card = DatasetCard(card_content)
457
+ card.push_to_hub(output_dataset_hub_id, token=HF_TOKEN)
458
+
459
+ logger.info("✅ Generation complete!")
460
+ logger.info(
461
+ f"Dataset available at: https://huggingface.co/datasets/{output_dataset_hub_id}"
462
+ )
463
+
464
+ # Final summary
465
+ logger.info(f"\n[VERBOSE] ========== FINAL SUMMARY ==========")
466
+ logger.info(f"[VERBOSE] Model: {model_id}")
467
+ logger.info(f"[VERBOSE] Reasoning level: {reasoning_level}")
468
+ logger.info(f"[VERBOSE] Examples processed: {total_examples}")
469
+ logger.info(f"[VERBOSE] Temperature: {temperature}")
470
+ logger.info(f"[VERBOSE] Max tokens: {max_tokens}")
471
+ logger.info(f"[VERBOSE] GPU config: {tensor_parallel_size} GPU(s)")
472
+ logger.info(f"[VERBOSE] ====================================")
473
+
474
+
475
+ if __name__ == "__main__":
476
+ if len(sys.argv) > 1:
477
+ parser = argparse.ArgumentParser(
478
+ description="Generate responses with reasoning using OpenAI GPT OSS models (Harmony format)",
479
+ formatter_class=argparse.RawDescriptionHelpFormatter,
480
+ epilog="""
481
+ Examples:
482
+ # Generate haiku with reasoning
483
+ uv run gpt_oss_vllm_harmony.py \\
484
+ --input-dataset davanstrien/haiku_dpo \\
485
+ --output-dataset username/haiku-reasoning \\
486
+ --prompt-column question
487
+
488
+ # Any prompt dataset
489
+ uv run gpt_oss_vllm_harmony.py \\
490
+ --input-dataset username/prompts \\
491
+ --output-dataset username/responses-reasoning \\
492
+ --reasoning-level high \\
493
+ --max-samples 100
494
+
495
+ # Use larger 120B model (requires 4x L40S GPUs)
496
+ uv run gpt_oss_vllm_harmony.py \\
497
+ --input-dataset username/prompts \\
498
+ --output-dataset username/responses-reasoning \\
499
+ --model-id openai/gpt-oss-120b \\
500
+ --tensor-parallel-size 4
501
+ """,
502
+ )
503
+
504
+ parser.add_argument(
505
+ "--input-dataset",
506
+ type=str,
507
+ required=True,
508
+ help="Input dataset on Hugging Face Hub",
509
+ )
510
+ parser.add_argument(
511
+ "--output-dataset",
512
+ type=str,
513
+ required=True,
514
+ help="Output dataset name on Hugging Face Hub",
515
+ )
516
+ parser.add_argument(
517
+ "--prompt-column",
518
+ type=str,
519
+ default="prompt",
520
+ help="Column containing prompts (default: prompt)",
521
+ )
522
+ parser.add_argument(
523
+ "--model-id",
524
+ type=str,
525
+ default="openai/gpt-oss-20b",
526
+ help="Model to use (default: openai/gpt-oss-20b)",
527
+ )
528
+ parser.add_argument(
529
+ "--reasoning-level",
530
+ type=str,
531
+ choices=["high", "medium", "low"],
532
+ default="high",
533
+ help="Reasoning effort level (default: high)",
534
+ )
535
+ parser.add_argument(
536
+ "--max-samples", type=int, help="Maximum number of samples to process"
537
+ )
538
+ parser.add_argument(
539
+ "--temperature",
540
+ type=float,
541
+ default=0.7,
542
+ help="Sampling temperature (default: 0.7)",
543
+ )
544
+ parser.add_argument(
545
+ "--max-tokens",
546
+ type=int,
547
+ default=512,
548
+ help="Maximum tokens to generate (default: 512)",
549
+ )
550
+ parser.add_argument(
551
+ "--gpu-memory-utilization",
552
+ type=float,
553
+ default=0.90,
554
+ help="GPU memory utilization (default: 0.90)",
555
+ )
556
+ parser.add_argument(
557
+ "--tensor-parallel-size",
558
+ type=int,
559
+ help="Number of GPUs to use (default: auto-detect)",
560
+ )
561
+ parser.add_argument(
562
+ "--hf-token",
563
+ type=str,
564
+ help="Hugging Face token (can also use HF_TOKEN env var)",
565
+ )
566
+
567
+ args = parser.parse_args()
568
+
569
+ main(
570
+ input_dataset=args.input_dataset,
571
+ output_dataset_hub_id=args.output_dataset,
572
+ prompt_column=args.prompt_column,
573
+ model_id=args.model_id,
574
+ reasoning_level=args.reasoning_level,
575
+ max_samples=args.max_samples,
576
+ temperature=args.temperature,
577
+ max_tokens=args.max_tokens,
578
+ gpu_memory_utilization=args.gpu_memory_utilization,
579
+ tensor_parallel_size=args.tensor_parallel_size,
580
+ hf_token=args.hf_token,
581
+ )
582
+ else:
583
+ # Show HF Jobs example when run without arguments
584
+ print("""
585
+ OpenAI GPT OSS Reasoning Generation Script (Harmony Format)
586
+ ==========================================================
587
+
588
+ This script requires arguments. For usage information:
589
+ uv run gpt_oss_vllm_harmony.py --help
590
+
591
+ Example HF Jobs command for 20B model:
592
+ hf jobs uv run \\
593
+ --flavor a10g-large \\ # 20B model requires ~40GB memory
594
+ https://huggingface.co/datasets/uv-scripts/openai-reasoning/raw/main/gpt_oss_vllm_harmony.py \\
595
+ --input-dataset davanstrien/haiku_dpo \\
596
+ --output-dataset username/haiku-reasoning \\
597
+ --prompt-column question \\
598
+ --reasoning-level high
599
+
600
+ Example HF Jobs command for 120B model:
601
+ hf jobs uv run \\
602
+ --flavor l40s-4x \\ # 120B model requires ~240GB memory
603
+ https://huggingface.co/datasets/uv-scripts/openai-reasoning/raw/main/gpt_oss_vllm_harmony.py \\
604
+ --input-dataset username/prompts \\
605
+ --output-dataset username/responses-reasoning \\
606
+ --model-id openai/gpt-oss-120b \\
607
+ --reasoning-level high
608
+ """)