diff --git a/.github/configs/amd-master.yaml b/.github/configs/amd-master.yaml index c362604f1..00c9f8bb2 100644 --- a/.github/configs/amd-master.yaml +++ b/.github/configs/amd-master.yaml @@ -140,7 +140,7 @@ dsr1-fp8-mi355x-sglang: - { tp: 8, conc-start: 4, conc-end: 64 } qwen3.5-bf16-mi355x-sglang: - image: rocm/sgl-dev:v0.5.8.post1-rocm720-mi35x-20260215 + image: lmsysorg/sglang-rocm:v0.5.9-rocm720-mi35x-20260325 model: Qwen/Qwen3.5-397B-A17B model-prefix: qwen3.5 runner: mi355x @@ -228,7 +228,7 @@ qwen3.5-fp8-mi325x-sglang: - { tp: 8, conc-start: 4, conc-end: 64 } qwen3.5-fp8-mi355x-sglang: - image: rocm/sgl-dev:v0.5.8.post1-rocm720-mi35x-20260218 + image: lmsysorg/sglang-rocm:v0.5.9-rocm720-mi35x-20260325 model: Qwen/Qwen3.5-397B-A17B-FP8 model-prefix: qwen3.5 runner: mi355x diff --git a/benchmarks/single_node/qwen3.5_bf16_mi355x.sh b/benchmarks/single_node/qwen3.5_bf16_mi355x.sh index f77390707..fb5550d0f 100755 --- a/benchmarks/single_node/qwen3.5_bf16_mi355x.sh +++ b/benchmarks/single_node/qwen3.5_bf16_mi355x.sh @@ -19,6 +19,8 @@ hf download "$MODEL" SERVER_LOG=/workspace/server.log PORT=${PORT:-8888} +CONTEXT_LENGTH=$((ISL + OSL + 20)) +MAX_PREFILL_TOKENS=32768 # Start GPU monitoring (power, temperature, clocks every second) start_gpu_monitor @@ -30,6 +32,13 @@ python3 -m sglang.launch_server \ --port $PORT \ --tensor-parallel-size $TP \ --trust-remote-code \ + --tokenizer-worker-num 6 \ + --enable-aiter-allreduce-fusion \ + --cuda-graph-max-bs $CONC \ + --context-length $CONTEXT_LENGTH \ + --disable-radix-cache \ + --max-prefill-tokens $MAX_PREFILL_TOKENS \ + --scheduler-recv-interval 30 \ --mem-fraction-static 0.8 > $SERVER_LOG 2>&1 & SERVER_PID=$! diff --git a/benchmarks/single_node/qwen3.5_fp8_mi355x.sh b/benchmarks/single_node/qwen3.5_fp8_mi355x.sh index f77390707..fb5550d0f 100644 --- a/benchmarks/single_node/qwen3.5_fp8_mi355x.sh +++ b/benchmarks/single_node/qwen3.5_fp8_mi355x.sh @@ -19,6 +19,8 @@ hf download "$MODEL" SERVER_LOG=/workspace/server.log PORT=${PORT:-8888} +CONTEXT_LENGTH=$((ISL + OSL + 20)) +MAX_PREFILL_TOKENS=32768 # Start GPU monitoring (power, temperature, clocks every second) start_gpu_monitor @@ -30,6 +32,13 @@ python3 -m sglang.launch_server \ --port $PORT \ --tensor-parallel-size $TP \ --trust-remote-code \ + --tokenizer-worker-num 6 \ + --enable-aiter-allreduce-fusion \ + --cuda-graph-max-bs $CONC \ + --context-length $CONTEXT_LENGTH \ + --disable-radix-cache \ + --max-prefill-tokens $MAX_PREFILL_TOKENS \ + --scheduler-recv-interval 30 \ --mem-fraction-static 0.8 > $SERVER_LOG 2>&1 & SERVER_PID=$! diff --git a/perf-changelog.yaml b/perf-changelog.yaml index 03fb6e082..7a6e789ac 100644 --- a/perf-changelog.yaml +++ b/perf-changelog.yaml @@ -1067,4 +1067,11 @@ - "dsr1-fp8-b200-sglang-mtp: v0.5.8-cu130-amd64 → v0.5.9-cu130" - "dsr1-fp8-h200-sglang: v0.5.9-cu129-amd64 → v0.5.9-cu130" pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/943 + +- config-keys: + - qwen3.5-fp8-mi355x-sglang + - qwen3.5-bf16-mi355x-sglang + description: + - "Update cli args of Qwen3.5 FP8 and BF16 SGLang benchmarks for MI355X to achieve better performance" + pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/942