Skip to content

Commit a41f882

Browse files
committed
refactor mode -> scenario
1 parent 63592a3 commit a41f882

File tree

2 files changed

+13
-13
lines changed

2 files changed

+13
-13
lines changed

language/gpt-oss/README.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@ Then, run a benchmark script that uses the client to send/recv requests.
3939
### Run the inference
4040
```bash
4141
python3 run_mlperf.py --help
42-
usage: run_mlperf.py [-h] [--mode {offline,server}] --input-file INPUT_FILE [--max-samples MAX_SAMPLES] [--mlperf-conf MLPERF_CONF]
42+
usage: run_mlperf.py [-h] [--scenario {offline,server}] --input-file INPUT_FILE [--max-samples MAX_SAMPLES] [--mlperf-conf MLPERF_CONF]
4343
[--user-conf USER_CONF] [--accuracy] [--output-dir OUTPUT_DIR] [--backend {sglang}] [--server-url SERVER_URL]
4444
[--generation-config GENERATION_CONFIG] [--max-new-tokens MAX_NEW_TOKENS] [--num-workers NUM_WORKERS]
4545
[--max-concurrency MAX_CONCURRENCY]
@@ -48,7 +48,7 @@ Run MLPerf inference benchmarks for gpt-oss
4848

4949
options:
5050
-h, --help show this help message and exit
51-
--mode {offline,server}
51+
--scenario {offline,server}
5252
MLPerf scenario mode
5353
--input-file INPUT_FILE
5454
Path to tokenized dataset (pickle file)

language/gpt-oss/run_mlperf.py

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -6,13 +6,13 @@
66
77
Usage:
88
# Offline scenario (performance)
9-
python run_mlperf.py --mode offline --input-file data/accuracy_eval_tokenized.pkl
9+
python run_mlperf.py --scenario offline --input-file data/accuracy_eval_tokenized.pkl
1010
1111
# Server scenario (performance)
12-
python run_mlperf.py --mode server --input-file data/accuracy_eval_tokenized.pkl
12+
python run_mlperf.py --scenario server --input-file data/accuracy_eval_tokenized.pkl
1313
1414
# Accuracy mode
15-
python run_mlperf.py --mode offline --accuracy --input-file data/accuracy_eval_tokenized.pkl
15+
python run_mlperf.py --scenario offline --accuracy --input-file data/accuracy_eval_tokenized.pkl
1616
"""
1717

1818
import argparse
@@ -71,11 +71,11 @@ def create_argument_parser() -> argparse.ArgumentParser:
7171

7272
# Scenario selection
7373
parser.add_argument(
74-
"--mode",
74+
"--scenario",
7575
type=str,
7676
default="offline",
7777
choices=["offline", "server"],
78-
help="MLPerf scenario mode"
78+
help="MLPerf scenario (offline or server)"
7979
)
8080

8181
# Dataset
@@ -296,15 +296,15 @@ def do_cleanup():
296296
try:
297297
# Create output directories
298298
output_dir = Path(args.output_dir)
299-
log_dir = output_dir / args.mode / \
299+
log_dir = output_dir / args.scenario / \
300300
("accuracy" if args.accuracy else "performance")
301301
log_dir.mkdir(parents=True, exist_ok=True)
302302

303303
logger.info("=" * 80)
304304
logger.info("MLPerf Inference Benchmark Runner for GPT-OSS")
305305
logger.info("=" * 80)
306306
logger.info(f"Backend: {args.backend}")
307-
logger.info(f"Mode: {args.mode}")
307+
logger.info(f"Scenario: {args.scenario}")
308308
logger.info(f"Accuracy: {args.accuracy}")
309309
logger.info(f"Input file: {args.input_file}")
310310
logger.info(f"Output directory: {log_dir}")
@@ -370,7 +370,7 @@ def do_cleanup():
370370
# - Server: Incremented as queries arrive
371371
pbar = tqdm(
372372
total=0, # Will be updated dynamically by SUT
373-
desc=f"MLPerf {args.mode}",
373+
desc=f"MLPerf {args.scenario}",
374374
unit="query",
375375
leave=True,
376376
position=0,
@@ -381,8 +381,8 @@ def do_cleanup():
381381
)
382382

383383
# Create SUT with progress bar
384-
logger.debug(f"Creating {args.mode} SUT...")
385-
if args.mode == "offline":
384+
logger.debug(f"Creating {args.scenario} SUT...")
385+
if args.scenario == "offline":
386386
sut = OfflineSUT(
387387
backend=backend,
388388
dataset=prompts,
@@ -419,7 +419,7 @@ def do_cleanup():
419419

420420
# Configure LoadGen
421421
settings = configure_loadgen(
422-
scenario=args.mode,
422+
scenario=args.scenario,
423423
accuracy_mode=args.accuracy,
424424
mlperf_conf=args.mlperf_conf,
425425
user_conf=args.user_conf,

0 commit comments

Comments
 (0)