diff --git a/.github/configs/nvidia-master.yaml b/.github/configs/nvidia-master.yaml index f4570fd2c..7ab473b47 100644 --- a/.github/configs/nvidia-master.yaml +++ b/.github/configs/nvidia-master.yaml @@ -1917,6 +1917,31 @@ kimik2.5-int4-h200-vllm: search-space: - { tp: 8, conc-start: 4, conc-end: 64 } +minimaxm2.5-fp4-b200-vllm: + image: vllm/vllm-openai:v0.18.0 + model: nvidia/MiniMax-M2.5-NVFP4 + model-prefix: minimaxm2.5 + runner: b200 + precision: fp4 + framework: vllm + multinode: false + seq-len-configs: + - isl: 1024 + osl: 1024 + search-space: + - { tp: 4, ep: 4, conc-start: 4, conc-end: 64 } + - { tp: 2, ep: 2, conc-start: 4, conc-end: 64 } + - isl: 1024 + osl: 8192 + search-space: + - { tp: 4, ep: 4, conc-start: 4, conc-end: 64 } + - { tp: 2, ep: 2, conc-start: 4, conc-end: 64 } + - isl: 8192 + osl: 1024 + search-space: + - { tp: 4, ep: 4, conc-start: 4, conc-end: 64 } + - { tp: 2, ep: 2, conc-start: 4, conc-end: 64 } + kimik2.5-fp4-b200-vllm: image: vllm/vllm-openai:v0.17.0 model: nvidia/Kimi-K2.5-NVFP4 diff --git a/benchmarks/single_node/minimaxm2.5_fp4_b200.sh b/benchmarks/single_node/minimaxm2.5_fp4_b200.sh new file mode 100755 index 000000000..aa0c3868f --- /dev/null +++ b/benchmarks/single_node/minimaxm2.5_fp4_b200.sh @@ -0,0 +1,77 @@ +#!/usr/bin/env bash + +source "$(dirname "$0")/../benchmark_lib.sh" + +check_env_vars \ + MODEL \ + TP \ + CONC \ + ISL \ + OSL \ + MAX_MODEL_LEN \ + RANDOM_RANGE_RATIO \ + RESULT_FILENAME + +if [[ -n "$SLURM_JOB_ID" ]]; then + echo "JOB $SLURM_JOB_ID running on $SLURMD_NODENAME" +fi + +hf download "$MODEL" + +nvidia-smi + +export TORCH_CUDA_ARCH_LIST="10.0" +export PYTHONNOUSERSITE=1 + +SERVER_LOG=/workspace/server.log +PORT=${PORT:-8888} + +# EP support: conditionally enable expert parallel based on EP_SIZE env var +if [ "${EP_SIZE:-1}" -gt 1 ]; then + EP=" --enable-expert-parallel" +else + EP=" " +fi + +# Start GPU monitoring (power, temperature, clocks every second) +start_gpu_monitor + +set -x +vllm serve $MODEL --host 0.0.0.0 --port $PORT \ +--tensor-parallel-size=$TP \ +--gpu-memory-utilization 0.90 \ +--max-model-len $MAX_MODEL_LEN \ +--max-num-seqs $CONC \ +--no-enable-prefix-caching \ +--compilation_config.pass_config.fuse_allreduce_rms true \ +--trust-remote-code${EP} > $SERVER_LOG 2>&1 & + +SERVER_PID=$! + +# Wait for server to be ready +wait_for_server_ready --port "$PORT" --server-log "$SERVER_LOG" --server-pid "$SERVER_PID" + +pip install -q datasets pandas + +run_benchmark_serving \ + --model "$MODEL" \ + --port "$PORT" \ + --backend vllm \ + --input-len "$ISL" \ + --output-len "$OSL" \ + --random-range-ratio "$RANDOM_RANGE_RATIO" \ + --num-prompts $(( CONC * 10 )) \ + --max-concurrency "$CONC" \ + --result-filename "$RESULT_FILENAME" \ + --result-dir /workspace/ \ + --trust-remote-code + +# After throughput, run evaluation only if RUN_EVAL is true +if [ "${RUN_EVAL}" = "true" ]; then + run_eval --framework lm-eval --port "$PORT" --concurrent-requests $CONC + append_lm_eval_summary +fi + +# Stop GPU monitoring +stop_gpu_monitor +set +x diff --git a/perf-changelog.yaml b/perf-changelog.yaml index 2648e746e..acc9fb634 100644 --- a/perf-changelog.yaml +++ b/perf-changelog.yaml @@ -1,3 +1,11 @@ +- config-keys: + - minimaxm2.5-fp4-b200-vllm + description: + - "Add MiniMax M2.5 NVFP4 single-node B200 vLLM benchmark (TP4, TP2)" + - "Uses vllm/vllm-openai:v0.18.0 image with --no-enable-prefix-caching" + - "Concurrency 4-64, all three seq-len configs (1k1k, 1k8k, 8k1k)" + pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/XXX + - config-keys: - dsr1-fp8-b200-dynamo-trt - dsr1-fp8-h200-dynamo-trt