Skip to content

Conversation

elvircrn
Copy link
Contributor

@elvircrn elvircrn commented Aug 19, 2025

Purpose

Vectorize with alignment generated scalar instructions where vectorization was possible.

This PR fixes this, nudging the compiler to actually produce a vectorized load.

I ran vllm bench throughput --model nm-testing/DeepSeek-Coder-V2-Lite-Instruct-FP8 --input-len 1000 --output-len 100 --trust_remote_code --enforce_eager

Proposed changes:

Throughput: 48.24 requests/s, 52982.55 total tokens/s, 4824.34 output tokens/s
Total num prompt tokens:  998235
Total num output tokens:  100000

Base branch:

Throughput: 47.74 requests/s, 52425.60 total tokens/s, 4773.62 output tokens/s
Total num prompt tokens:  998235
Total num output tokens:  100000

Accuracy tests via lm_eval --model vllm --model_args "pretrained=nm-testing/DeepSeek-Coder-V2-Lite-Instruct-FP8,max_model_len=32768,enable_expert_parallel=True,enforce_eager=True" --trust_remote_code --tasks gsm8k --num_fewshot 5 --batch_size auto

This branch:

|Tasks|Version|     Filter     |n-shot|  Metric   |   |Value |   |Stderr|
|-----|------:|----------------|-----:|-----------|---|-----:|---|-----:|
|gsm8k|      3|flexible-extract|     5|exact_match|↑  |0.7566|±  |0.0118|
|     |       |strict-match    |     5|exact_match|↑  |0.7384|±  |0.0121|

Base branch:

|Tasks|Version|     Filter     |n-shot|  Metric   |   |Value |   |Stderr|
|-----|------:|----------------|-----:|-----------|---|-----:|---|-----:|
|gsm8k|      3|flexible-extract|     5|exact_match|↑  |0.7566|±  |0.0118|
|     |       |strict-match    |     5|exact_match|↑  |0.7384|±  |0.0121|

====

Given the following source:

// nvcc -O3 -arch=sm_90 -ptx

// ===== vectorization.cuh =====
#include <stdint.h>

namespace vllm {

// Vectorization container: tightly packed, aligned to total byte width.
template <typename scalar_t, int vec_size>
struct alignas(sizeof(scalar_t) * vec_size) vec_n_t {
  scalar_t val[vec_size];
};

// A minimal device/host min to avoid pulling in <algorithm>.
template <typename T>
__host__ __device__ inline T my_min(T a, T b) { return (a < b) ? a : b; }

template <int VEC_SIZE, typename InT, typename OutT, typename ScaOp>
struct DefaultVecOp {
  ScaOp scalar_op;

  __device__ __forceinline__ void operator()(
      vec_n_t<OutT, VEC_SIZE>& dst, const vec_n_t<InT, VEC_SIZE>& src) const {
#pragma unroll
    for (int i = 0; i < VEC_SIZE; ++i) {
      scalar_op(dst.val[i], src.val[i]);
    }
  }
};

// ORIGINAL version: passes v_in[i] directly into vec_op.
// This often leads the compiler to emit scalar element loads/stores.
template <int VEC_SIZE, typename InT, typename OutT, typename VecOp,
          typename ScaOp>
__device__ inline void vectorize_with_alignment(
    const InT* __restrict__ in, OutT* __restrict__ out,
    int len, int tid, int stride,
    VecOp&& vec_op,       // vec_n_t<InT,V> -> vec_n_t<OutT,V>
    ScaOp&& scalar_op) {  // InT -> OutT
  static_assert(VEC_SIZE > 0 && (VEC_SIZE & (VEC_SIZE - 1)) == 0,
                "VEC_SIZE must be a positive power-of-two");
  constexpr int WIDTH = VEC_SIZE * sizeof(InT);
  uintptr_t addr = reinterpret_cast<uintptr_t>(in);

  bool can_vec = ((addr & (WIDTH - 1)) == 0) && ((len & (VEC_SIZE - 1)) == 0);
  if (can_vec) {
    int num_vec = len / VEC_SIZE;
    using vin_t  = vec_n_t<InT,  VEC_SIZE>;
    using vout_t = vec_n_t<OutT, VEC_SIZE>;
    const vin_t* __restrict__ v_in  = reinterpret_cast<const vin_t*>(in);
    vout_t*       __restrict__ v_out = reinterpret_cast<vout_t*>(out);

    for (int i = tid; i < num_vec; i += stride) {
      vout_t tmp;
      // Pass a reference to a global pack directly.
      vec_op(tmp, v_in[i]);
      v_out[i] = tmp;
    }
    return;
  }

  int misalignment_offset = addr & (WIDTH - 1);
  int alignment_bytes = WIDTH - misalignment_offset;
  int prefix_elems = alignment_bytes & (WIDTH - 1);
  prefix_elems /= sizeof(InT);
  prefix_elems = my_min(prefix_elems, len);

  // (Scalar prefix)
  for (int i = tid; i < prefix_elems; i += stride) {
    scalar_op(out[i], in[i]);
  }

  in  += prefix_elems;
  out += prefix_elems;
  len -= prefix_elems;

  int num_vec = len / VEC_SIZE;
  using vin_t2  = vec_n_t<InT,  VEC_SIZE>;
  using vout_t2 = vec_n_t<OutT, VEC_SIZE>;
  const vin_t2* __restrict__ v_in2  = reinterpret_cast<const vin_t2*>(in);
  vout_t2*       __restrict__ v_out2 = reinterpret_cast<vout_t2*>(out);

  // (Vector main)
  for (int i = tid; i < num_vec; i += stride) {
    vout_t2 tmp;
    vec_op(tmp, v_in2[i]);
    v_out2[i] = tmp;
  }

  // (Scalar tail)
  int tail_start = num_vec * VEC_SIZE;
  for (int i = tid + tail_start; i < len; i += stride) {
    scalar_op(out[i], in[i]);
  }
}

// FIXED version: load the pack once into a local, then call vec_op.
// This typically yields ld.global.v4.f32 / st.global.v4.f32 for float,4.
template <int VEC_SIZE, typename InT, typename OutT, typename VecOp,
          typename ScaOp>
__device__ inline void vectorize_with_alignment_fixed(
    const InT* __restrict__ in, OutT* __restrict__ out,
    int len, int tid, int stride,
    VecOp&& vec_op, ScaOp&& scalar_op) {
  static_assert(VEC_SIZE > 0 && (VEC_SIZE & (VEC_SIZE - 1)) == 0, "");
  constexpr int WIDTH = VEC_SIZE * sizeof(InT);
  uintptr_t addr = reinterpret_cast<uintptr_t>(in);

  bool can_vec = ((addr & (WIDTH - 1)) == 0) && ((len & (VEC_SIZE - 1)) == 0);
  if (can_vec) {
    int num_vec = len / VEC_SIZE;
    using vin_t  = vec_n_t<InT,  VEC_SIZE>;
    using vout_t = vec_n_t<OutT, VEC_SIZE>;
    const vin_t* __restrict__ v_in  = reinterpret_cast<const vin_t*>(in);
    vout_t*       __restrict__ v_out = reinterpret_cast<vout_t*>(out);

    for (int i = tid; i < num_vec; i += stride) {
      vout_t tmp;
      // *** Key change: make a local copy of the entire pack ***
      vin_t src = v_in[i];      // <- encourages a single vector ld
      vec_op(tmp, src);
      v_out[i] = tmp;           // <- encourages a single vector st
    }
    return;
  }

  // Fallback path (kept simple for the demo): do scalar.
  for (int i = tid; i < len; i += stride) {
    scalar_op(out[i], in[i]);
  }
}

} // namespace vllm

// ===== demo.cu =====
using namespace vllm;

// Simple scalar op: y = x + 1
struct Add1 {
  __device__ __forceinline__ void operator()(float& y, float x) const {
    y = x + 1.0f;
  }
};

// Bad path: uses original vectorize_with_alignment (often scalar ld/st)
extern "C" __global__ void bad_kernel(float* __restrict__ out,
                                      const float* __restrict__ in) {
  constexpr int V = 4;  // 4 * 4B = 16B pack
  DefaultVecOp<V, float, float, Add1> vop{Add1{}};

  // Assume a single pack for demo (len == 4), tid==0, stride==1
  vllm::vectorize_with_alignment<V>(in, out, /*len=*/4,
                                    /*tid=*/0, /*stride=*/1,
                                    vop, Add1{});
}

// Good path: uses fixed version that loads/stores whole packs
extern "C" __global__ void good_kernel(float* __restrict__ out,
                                       const float* __restrict__ in) {
  constexpr int V = 4;  // 16B
  DefaultVecOp<V, float, float, Add1> vop{Add1{}};

  vllm::vectorize_with_alignment_fixed<V>(in, out, /*len=*/4,
                                          /*tid=*/0, /*stride=*/1,
                                          vop, Add1{});
}

compiled with -lineinfo -g -O3 -arch=sm_90 -ptx

we see the generated code for the old function:


.visible .func void vllm::vectorize_with_alignment_fixed<4, float, float, vllm::DefaultVecOp<4, float, float, Add1>&, Add1>(float const*, float*, int, int, int, vllm::DefaultVecOp<4, float, float, Add1>&, Add1&&)(
        .param .b64 void vllm::vectorize_with_alignment_fixed<4, float, float, vllm::DefaultVecOp<4, float, float, Add1>&, Add1>(float const*, float*, int, int, int, vllm::DefaultVecOp<4, float, float, Add1>&, Add1&&)_param_0,
        .param .b64 void vllm::vectorize_with_alignment_fixed<4, float, float, vllm::DefaultVecOp<4, float, float, Add1>&, Add1>(float const*, float*, int, int, int, vllm::DefaultVecOp<4, float, float, Add1>&, Add1&&)_param_1,
        .param .b32 void vllm::vectorize_with_alignment_fixed<4, float, float, vllm::DefaultVecOp<4, float, float, Add1>&, Add1>(float const*, float*, int, int, int, vllm::DefaultVecOp<4, float, float, Add1>&, Add1&&)_param_2,
        .param .b32 void vllm::vectorize_with_alignment_fixed<4, float, float, vllm::DefaultVecOp<4, float, float, Add1>&, Add1>(float const*, float*, int, int, int, vllm::DefaultVecOp<4, float, float, Add1>&, Add1&&)_param_3,
        .param .b32 void vllm::vectorize_with_alignment_fixed<4, float, float, vllm::DefaultVecOp<4, float, float, Add1>&, Add1>(float const*, float*, int, int, int, vllm::DefaultVecOp<4, float, float, Add1>&, Add1&&)_param_4,
        .param .b64 void vllm::vectorize_with_alignment_fixed<4, float, float, vllm::DefaultVecOp<4, float, float, Add1>&, Add1>(float const*, float*, int, int, int, vllm::DefaultVecOp<4, float, float, Add1>&, Add1&&)_param_5,
        .param .b64 void vllm::vectorize_with_alignment_fixed<4, float, float, vllm::DefaultVecOp<4, float, float, Add1>&, Add1>(float const*, float*, int, int, int, vllm::DefaultVecOp<4, float, float, Add1>&, Add1&&)_param_6
)
{

        ld.param.u64    %rd17, [void vllm::vectorize_with_alignment_fixed<4, float, float, vllm::DefaultVecOp<4, float, float, Add1>&, Add1>(float const*, float*, int, int, int, vllm::DefaultVecOp<4, float, float, Add1>&, Add1&&)_param_0];
        ld.param.u64    %rd18, [void vllm::vectorize_with_alignment_fixed<4, float, float, vllm::DefaultVecOp<4, float, float, Add1>&, Add1>(float const*, float*, int, int, int, vllm::DefaultVecOp<4, float, float, Add1>&, Add1&&)_param_1];
        ld.param.u32    %r20, [void vllm::vectorize_with_alignment_fixed<4, float, float, vllm::DefaultVecOp<4, float, float, Add1>&, Add1>(float const*, float*, int, int, int, vllm::DefaultVecOp<4, float, float, Add1>&, Add1&&)_param_2];
        ld.param.u32    %r45, [void vllm::vectorize_with_alignment_fixed<4, float, float, vllm::DefaultVecOp<4, float, float, Add1>&, Add1>(float const*, float*, int, int, int, vllm::DefaultVecOp<4, float, float, Add1>&, Add1&&)_param_3];
        ld.param.u32    %r22, [void vllm::vectorize_with_alignment_fixed<4, float, float, vllm::DefaultVecOp<4, float, float, Add1>&, Add1>(float const*, float*, int, int, int, vllm::DefaultVecOp<4, float, float, Add1>&, Add1&&)_param_4];
        and.b64         %rd19, %rd17, 15;
        setp.eq.s64     %p1, %rd19, 0;
        and.b32         %r23, %r20, 3;
        setp.eq.s32     %p2, %r23, 0;
        and.pred        %p3, %p1, %p2;
        @%p3 bra        $L__BB3_8;
        bra.uni         $L__BB3_1;

$L__BB3_8:
        shr.s32         %r30, %r20, 31;
        shr.u32         %r31, %r30, 30;
        add.s32         %r32, %r20, %r31;
        shr.s32         %r10, %r32, 2;
        setp.le.s32     %p9, %r10, %r45;
        @%p9 bra        $L__BB3_15;

        not.b32         %r33, %r45;
        add.s32         %r34, %r10, %r33;
        div.u32         %r11, %r34, %r22;
        add.s32         %r35, %r11, 1;
        and.b32         %r44, %r35, 3;
        setp.eq.s32     %p10, %r44, 0;
        @%p10 bra       $L__BB3_12;

        mul.wide.s32    %rd30, %r45, 4;
        shl.b64         %rd31, %rd30, 2;
        add.s64         %rd44, %rd17, %rd31;
        mul.wide.s32    %rd10, %r22, 16;
        add.s64         %rd43, %rd18, %rd31;

$L__BB3_11:
        ld.v4.f32       {%f11, %f12, %f13, %f14}, [%rd44];
        add.f32         %f19, %f14, 0f3F800000;
        add.f32         %f20, %f13, 0f3F800000;
        add.f32         %f21, %f12, 0f3F800000;
        add.f32         %f22, %f11, 0f3F800000;
        st.v4.f32       [%rd43], {%f22, %f21, %f20, %f19};
        add.s32         %r45, %r45, %r22;
        add.s64         %rd44, %rd44, %rd10;
        add.s64         %rd43, %rd43, %rd10;
        add.s32         %r44, %r44, -1;
        setp.ne.s32     %p11, %r44, 0;
        @%p11 bra       $L__BB3_11;

$L__BB3_12:
        setp.lt.u32     %p12, %r11, 3;
        @%p12 bra       $L__BB3_15;

        mul.wide.s32    %rd16, %r22, 16;

$L__BB3_14:
        mul.wide.s32    %rd32, %r45, 16;
        add.s64         %rd33, %rd17, %rd32;
        ld.v4.f32       {%f23, %f24, %f25, %f26}, [%rd33];
        add.s64         %rd34, %rd18, %rd32;
        add.f32         %f31, %f26, 0f3F800000;
        add.f32         %f32, %f25, 0f3F800000;
        add.f32         %f33, %f24, 0f3F800000;
        add.f32         %f34, %f23, 0f3F800000;
        st.v4.f32       [%rd34], {%f34, %f33, %f32, %f31};
        add.s64         %rd35, %rd33, %rd16;
        ld.v4.f32       {%f35, %f36, %f37, %f38}, [%rd35];
        add.f32         %f43, %f38, 0f3F800000;
        add.f32         %f44, %f37, 0f3F800000;
        add.f32         %f45, %f36, 0f3F800000;
        add.f32         %f46, %f35, 0f3F800000;
        add.s64         %rd36, %rd34, %rd16;
        st.v4.f32       [%rd36], {%f46, %f45, %f44, %f43};
        add.s32         %r36, %r45, %r22;
        add.s32         %r37, %r36, %r22;
        add.s64         %rd37, %rd35, %rd16;
        ld.v4.f32       {%f47, %f48, %f49, %f50}, [%rd37];
        add.f32         %f55, %f50, 0f3F800000;
        add.f32         %f56, %f49, 0f3F800000;
        add.f32         %f57, %f48, 0f3F800000;
        add.f32         %f58, %f47, 0f3F800000;
        add.s64         %rd38, %rd36, %rd16;
        st.v4.f32       [%rd38], {%f58, %f57, %f56, %f55};
        add.s32         %r38, %r37, %r22;
        add.s64         %rd39, %rd37, %rd16;
        ld.v4.f32       {%f59, %f60, %f61, %f62}, [%rd39];
        add.f32         %f67, %f62, 0f3F800000;
        add.f32         %f68, %f61, 0f3F800000;
        add.f32         %f69, %f60, 0f3F800000;
        add.f32         %f70, %f59, 0f3F800000;
        add.s64         %rd40, %rd38, %rd16;
        st.v4.f32       [%rd40], {%f70, %f69, %f68, %f67};
        add.s32         %r45, %r38, %r22;
        setp.lt.s32     %p13, %r45, %r10;
        @%p13 bra       $L__BB3_14;
        bra.uni         $L__BB3_15;

$L__BB3_1:
        setp.ge.s32     %p4, %r45, %r20;
        @%p4 bra        $L__BB3_15;

        not.b32         %r24, %r45;
        add.s32         %r25, %r24, %r20;
        div.u32         %r1, %r25, %r22;
        add.s32         %r26, %r1, 1;
        and.b32         %r40, %r26, 3;
        setp.eq.s32     %p5, %r40, 0;
        @%p5 bra        $L__BB3_5;

        mul.wide.s32    %rd20, %r45, 4;
        add.s64         %rd42, %rd17, %rd20;
        mul.wide.s32    %rd2, %r22, 4;
        add.s64         %rd41, %rd18, %rd20;

$L__BB3_4:
        ld.f32  %f1, [%rd42];
        add.f32         %f2, %f1, 0f3F800000;
        st.f32  [%rd41], %f2;
        add.s32         %r45, %r45, %r22;
        add.s64         %rd42, %rd42, %rd2;
        add.s64         %rd41, %rd41, %rd2;
        add.s32         %r40, %r40, -1;
        setp.ne.s32     %p6, %r40, 0;
        @%p6 bra        $L__BB3_4;

$L__BB3_5:
        setp.lt.u32     %p7, %r1, 3;
        @%p7 bra        $L__BB3_15;

        mul.wide.s32    %rd8, %r22, 4;

$L__BB3_7:
        mul.wide.s32    %rd21, %r45, 4;
        add.s64         %rd22, %rd18, %rd21;
        add.s64         %rd23, %rd17, %rd21;
        ld.f32  %f3, [%rd23];
        add.f32         %f4, %f3, 0f3F800000;
        st.f32  [%rd22], %f4;
        add.s64         %rd24, %rd23, %rd8;
        ld.f32  %f5, [%rd24];
        add.f32         %f6, %f5, 0f3F800000;
        add.s64         %rd25, %rd22, %rd8;
        st.f32  [%rd25], %f6;
        add.s32         %r27, %r45, %r22;
        add.s32         %r28, %r27, %r22;
        add.s64         %rd26, %rd24, %rd8;
        ld.f32  %f7, [%rd26];
        add.f32         %f8, %f7, 0f3F800000;
        add.s64         %rd27, %rd25, %rd8;
        st.f32  [%rd27], %f8;
        add.s32         %r29, %r28, %r22;
        add.s64         %rd28, %rd26, %rd8;
        ld.f32  %f9, [%rd28];
        add.f32         %f10, %f9, 0f3F800000;
        add.s64         %rd29, %rd27, %rd8;
        st.f32  [%rd29], %f10;
        add.s32         %r45, %r29, %r22;
        setp.lt.s32     %p8, %r45, %r20;
        @%p8 bra        $L__BB3_7;

$L__BB3_15:
        ret;

vs the newly-updated vectorize_with_alignment

.visible .func void vllm::vectorize_with_alignment<4, float, float, vllm::DefaultVecOp<4, float, float, Add1>&, Add1>(float const*, float*, int, int, int, vllm::DefaultVecOp<4, float, float, Add1>&, Add1&&)(
        .param .b64 void vllm::vectorize_with_alignment<4, float, float, vllm::DefaultVecOp<4, float, float, Add1>&, Add1>(float const*, float*, int, int, int, vllm::DefaultVecOp<4, float, float, Add1>&, Add1&&)_param_0,
        .param .b64 void vllm::vectorize_with_alignment<4, float, float, vllm::DefaultVecOp<4, float, float, Add1>&, Add1>(float const*, float*, int, int, int, vllm::DefaultVecOp<4, float, float, Add1>&, Add1&&)_param_1,
        .param .b32 void vllm::vectorize_with_alignment<4, float, float, vllm::DefaultVecOp<4, float, float, Add1>&, Add1>(float const*, float*, int, int, int, vllm::DefaultVecOp<4, float, float, Add1>&, Add1&&)_param_2,
        .param .b32 void vllm::vectorize_with_alignment<4, float, float, vllm::DefaultVecOp<4, float, float, Add1>&, Add1>(float const*, float*, int, int, int, vllm::DefaultVecOp<4, float, float, Add1>&, Add1&&)_param_3,
        .param .b32 void vllm::vectorize_with_alignment<4, float, float, vllm::DefaultVecOp<4, float, float, Add1>&, Add1>(float const*, float*, int, int, int, vllm::DefaultVecOp<4, float, float, Add1>&, Add1&&)_param_4,
        .param .b64 void vllm::vectorize_with_alignment<4, float, float, vllm::DefaultVecOp<4, float, float, Add1>&, Add1>(float const*, float*, int, int, int, vllm::DefaultVecOp<4, float, float, Add1>&, Add1&&)_param_5,
        .param .b64 void vllm::vectorize_with_alignment<4, float, float, vllm::DefaultVecOp<4, float, float, Add1>&, Add1>(float const*, float*, int, int, int, vllm::DefaultVecOp<4, float, float, Add1>&, Add1&&)_param_6
)
{

        ld.param.u64    %rd37, [void vllm::vectorize_with_alignment<4, float, float, vllm::DefaultVecOp<4, float, float, Add1>&, Add1>(float const*, float*, int, int, int, vllm::DefaultVecOp<4, float, float, Add1>&, Add1&&)_param_0];
        ld.param.u64    %rd38, [void vllm::vectorize_with_alignment<4, float, float, vllm::DefaultVecOp<4, float, float, Add1>&, Add1>(float const*, float*, int, int, int, vllm::DefaultVecOp<4, float, float, Add1>&, Add1&&)_param_1];
        ld.param.u32    %r43, [void vllm::vectorize_with_alignment<4, float, float, vllm::DefaultVecOp<4, float, float, Add1>&, Add1>(float const*, float*, int, int, int, vllm::DefaultVecOp<4, float, float, Add1>&, Add1&&)_param_2];
        ld.param.u32    %r96, [void vllm::vectorize_with_alignment<4, float, float, vllm::DefaultVecOp<4, float, float, Add1>&, Add1>(float const*, float*, int, int, int, vllm::DefaultVecOp<4, float, float, Add1>&, Add1&&)_param_3];
        ld.param.u32    %r45, [void vllm::vectorize_with_alignment<4, float, float, vllm::DefaultVecOp<4, float, float, Add1>&, Add1>(float const*, float*, int, int, int, vllm::DefaultVecOp<4, float, float, Add1>&, Add1&&)_param_4];
        and.b64         %rd39, %rd37, 15;
        setp.eq.s64     %p1, %rd39, 0;
        and.b32         %r46, %r43, 3;
        setp.eq.s32     %p2, %r46, 0;
        and.pred        %p3, %p1, %p2;
        @%p3 bra        $L__BB1_22;
        bra.uni         $L__BB1_1;

$L__BB1_22:
        shr.s32         %r73, %r43, 31;
        shr.u32         %r74, %r73, 30;
        add.s32         %r75, %r43, %r74;
        shr.s32         %r33, %r75, 2;
        setp.le.s32     %p19, %r33, %r96;
        @%p19 bra       $L__BB1_29;

        not.b32         %r76, %r96;
        add.s32         %r77, %r33, %r76;
        div.u32         %r34, %r77, %r45;
        add.s32         %r78, %r34, 1;
        and.b32         %r95, %r78, 3;
        setp.eq.s32     %p20, %r95, 0;
        @%p20 bra       $L__BB1_26;

        mul.wide.s32    %rd82, %r96, 4;
        shl.b64         %rd83, %rd82, 2;
        add.s64         %rd101, %rd38, %rd83;
        mul.wide.s32    %rd30, %r45, 16;
        add.s64         %rd84, %rd37, %rd83;
        add.s64         %rd100, %rd84, 8;

$L__BB1_25:
        ld.f32  %f61, [%rd100+-8];
        ld.f32  %f62, [%rd100+-4];
        ld.f32  %f63, [%rd100];
        ld.f32  %f64, [%rd100+4];
        add.f32         %f65, %f64, 0f3F800000;
        add.f32         %f66, %f63, 0f3F800000;
        add.f32         %f67, %f62, 0f3F800000;
        add.f32         %f68, %f61, 0f3F800000;
        st.v4.f32       [%rd101], {%f68, %f67, %f66, %f65};
        add.s32         %r96, %r96, %r45;
        add.s64         %rd101, %rd101, %rd30;
        add.s64         %rd100, %rd100, %rd30;
        add.s32         %r95, %r95, -1;
        setp.ne.s32     %p21, %r95, 0;
        @%p21 bra       $L__BB1_25;

$L__BB1_26:
        setp.lt.u32     %p22, %r34, 3;
        @%p22 bra       $L__BB1_29;

        mul.wide.s32    %rd36, %r45, 16;

$L__BB1_28:
        mul.wide.s32    %rd85, %r96, 16;
        add.s64         %rd86, %rd37, %rd85;
        ld.f32  %f69, [%rd86];
        ld.f32  %f70, [%rd86+4];
        ld.f32  %f71, [%rd86+8];
        ld.f32  %f72, [%rd86+12];
        add.s64         %rd87, %rd38, %rd85;
        add.f32         %f73, %f72, 0f3F800000;
        add.f32         %f74, %f71, 0f3F800000;
        add.f32         %f75, %f70, 0f3F800000;
        add.f32         %f76, %f69, 0f3F800000;
        st.v4.f32       [%rd87], {%f76, %f75, %f74, %f73};
        add.s64         %rd88, %rd86, %rd36;
        ld.f32  %f77, [%rd88];
        ld.f32  %f78, [%rd88+4];
        ld.f32  %f79, [%rd88+8];
        ld.f32  %f80, [%rd88+12];
        add.f32         %f81, %f80, 0f3F800000;
        add.f32         %f82, %f79, 0f3F800000;
        add.f32         %f83, %f78, 0f3F800000;
        add.f32         %f84, %f77, 0f3F800000;
        add.s64         %rd89, %rd87, %rd36;
        st.v4.f32       [%rd89], {%f84, %f83, %f82, %f81};
        add.s32         %r79, %r96, %r45;
        add.s32         %r80, %r79, %r45;
        add.s64         %rd90, %rd88, %rd36;
        ld.f32  %f85, [%rd90];
        ld.f32  %f86, [%rd90+4];
        ld.f32  %f87, [%rd90+8];
        ld.f32  %f88, [%rd90+12];
        add.f32         %f89, %f88, 0f3F800000;
        add.f32         %f90, %f87, 0f3F800000;
        add.f32         %f91, %f86, 0f3F800000;
        add.f32         %f92, %f85, 0f3F800000;
        add.s64         %rd91, %rd89, %rd36;
        st.v4.f32       [%rd91], {%f92, %f91, %f90, %f89};
        add.s32         %r81, %r80, %r45;
        add.s64         %rd92, %rd90, %rd36;
        ld.f32  %f93, [%rd92];
        ld.f32  %f94, [%rd92+4];
        ld.f32  %f95, [%rd92+8];
        ld.f32  %f96, [%rd92+12];
        add.f32         %f97, %f96, 0f3F800000;
        add.f32         %f98, %f95, 0f3F800000;
        add.f32         %f99, %f94, 0f3F800000;
        add.f32         %f100, %f93, 0f3F800000;
        add.s64         %rd93, %rd91, %rd36;
        st.v4.f32       [%rd93], {%f100, %f99, %f98, %f97};
        add.s32         %r96, %r81, %r45;
        setp.lt.s32     %p23, %r96, %r33;
        @%p23 bra       $L__BB1_28;
        bra.uni         $L__BB1_29;

$L__BB1_1:
        neg.s64         %rd40, %rd37;
        and.b64         %rd41, %rd40, 12;
        shr.u64         %rd42, %rd41, 2;
        cvt.u32.u64     %r47, %rd42;
        min.s32         %r1, %r47, %r43;
        setp.le.s32     %p4, %r1, %r96;
        @%p4 bra        $L__BB1_8;

        not.b32         %r48, %r1;
        add.s32         %r49, %r48, %r96;
        mov.u32         %r50, -2;
        sub.s32         %r51, %r50, %r49;
        div.u32         %r2, %r51, %r45;
        add.s32         %r52, %r2, 1;
        and.b32         %r83, %r52, 3;
        setp.eq.s32     %p5, %r83, 0;
        mov.u32         %r84, %r96;
        @%p5 bra        $L__BB1_5;

        mul.wide.s32    %rd43, %r96, 4;
        add.s64         %rd95, %rd37, %rd43;
        mul.wide.s32    %rd2, %r45, 4;
        add.s64         %rd94, %rd38, %rd43;
        mov.u32         %r84, %r96;

$L__BB1_4:
        ld.f32  %f1, [%rd95];
        add.f32         %f2, %f1, 0f3F800000;
        st.f32  [%rd94], %f2;
        add.s32         %r84, %r84, %r45;
        add.s64         %rd95, %rd95, %rd2;
        add.s64         %rd94, %rd94, %rd2;
        add.s32         %r83, %r83, -1;
        setp.ne.s32     %p6, %r83, 0;
        @%p6 bra        $L__BB1_4;

$L__BB1_5:
        setp.lt.u32     %p7, %r2, 3;
        @%p7 bra        $L__BB1_8;

        mul.wide.s32    %rd8, %r45, 4;

$L__BB1_7:
        mul.wide.s32    %rd44, %r84, 4;
        add.s64         %rd45, %rd38, %rd44;
        add.s64         %rd46, %rd37, %rd44;
        ld.f32  %f3, [%rd46];
        add.f32         %f4, %f3, 0f3F800000;
        st.f32  [%rd45], %f4;
        add.s64         %rd47, %rd46, %rd8;
        ld.f32  %f5, [%rd47];
        add.f32         %f6, %f5, 0f3F800000;
        add.s64         %rd48, %rd45, %rd8;
        st.f32  [%rd48], %f6;
        add.s32         %r53, %r84, %r45;
        add.s32         %r54, %r53, %r45;
        add.s64         %rd49, %rd47, %rd8;
        ld.f32  %f7, [%rd49];
        add.f32         %f8, %f7, 0f3F800000;
        add.s64         %rd50, %rd48, %rd8;
        st.f32  [%rd50], %f8;
        add.s32         %r55, %r54, %r45;
        add.s64         %rd51, %rd49, %rd8;
        ld.f32  %f9, [%rd51];
        add.f32         %f10, %f9, 0f3F800000;
        add.s64         %rd52, %rd50, %rd8;
        st.f32  [%rd52], %f10;
        add.s32         %r84, %r55, %r45;
        setp.lt.s32     %p8, %r84, %r1;
        @%p8 bra        $L__BB1_7;

$L__BB1_8:
        cvt.s64.s32     %rd9, %r1;
        mul.wide.s32    %rd53, %r1, 4;
        add.s64         %rd10, %rd37, %rd53;
        add.s64         %rd11, %rd38, %rd53;
        sub.s32         %r11, %r43, %r1;
        shr.s32         %r56, %r11, 31;
        shr.u32         %r57, %r56, 30;
        add.s32         %r58, %r11, %r57;
        shr.s32         %r12, %r58, 2;
        setp.le.s32     %p9, %r12, %r96;
        @%p9 bra        $L__BB1_15;

        not.b32         %r59, %r96;
        add.s32         %r60, %r12, %r59;
        div.u32         %r13, %r60, %r45;
        add.s32         %r61, %r13, 1;
        and.b32         %r87, %r61, 3;
        setp.eq.s32     %p10, %r87, 0;
        mov.u32         %r88, %r96;
        @%p10 bra       $L__BB1_12;

        mul.wide.s32    %rd54, %r96, 4;
        add.s64         %rd55, %rd54, %rd9;
        shl.b64         %rd56, %rd55, 2;
        add.s64         %rd97, %rd38, %rd56;
        mul.wide.s32    %rd13, %r45, 16;
        add.s64         %rd57, %rd37, %rd56;
        add.s64         %rd96, %rd57, 8;
        mov.u32         %r88, %r96;

$L__BB1_11:
        ld.f32  %f11, [%rd96+-8];
        ld.f32  %f12, [%rd96+-4];
        ld.f32  %f13, [%rd96];
        ld.f32  %f14, [%rd96+4];
        add.f32         %f15, %f14, 0f3F800000;
        add.f32         %f16, %f13, 0f3F800000;
        add.f32         %f17, %f12, 0f3F800000;
        add.f32         %f18, %f11, 0f3F800000;
        st.v4.f32       [%rd97], {%f18, %f17, %f16, %f15};
        add.s32         %r88, %r88, %r45;
        add.s64         %rd97, %rd97, %rd13;
        add.s64         %rd96, %rd96, %rd13;
        add.s32         %r87, %r87, -1;
        setp.ne.s32     %p11, %r87, 0;
        @%p11 bra       $L__BB1_11;

$L__BB1_12:
        setp.lt.u32     %p12, %r13, 3;
        @%p12 bra       $L__BB1_15;

        mul.wide.s32    %rd19, %r45, 16;
        add.s64         %rd20, %rd19, -4;

$L__BB1_14:
        mul.wide.s32    %rd58, %r88, 16;
        add.s64         %rd59, %rd10, %rd58;
        ld.f32  %f19, [%rd59];
        add.s64         %rd60, %rd59, 4;
        ld.f32  %f20, [%rd59+4];
        ld.f32  %f21, [%rd59+8];
        ld.f32  %f22, [%rd59+12];
        add.s64         %rd61, %rd11, %rd58;
        add.f32         %f23, %f22, 0f3F800000;
        add.f32         %f24, %f21, 0f3F800000;
        add.f32         %f25, %f20, 0f3F800000;
        add.f32         %f26, %f19, 0f3F800000;
        st.v4.f32       [%rd61], {%f26, %f25, %f24, %f23};
        add.s64         %rd62, %rd60, %rd19;
        add.s64         %rd63, %rd59, %rd19;
        ld.f32  %f27, [%rd63];
        ld.f32  %f28, [%rd63+4];
        ld.f32  %f29, [%rd63+8];
        ld.f32  %f30, [%rd63+12];
        add.f32         %f31, %f30, 0f3F800000;
        add.f32         %f32, %f29, 0f3F800000;
        add.f32         %f33, %f28, 0f3F800000;
        add.f32         %f34, %f27, 0f3F800000;
        add.s64         %rd64, %rd61, %rd19;
        st.v4.f32       [%rd64], {%f34, %f33, %f32, %f31};
        add.s32         %r62, %r88, %r45;
        add.s32         %r63, %r62, %r45;
        add.s64         %rd65, %rd62, %rd19;
        add.s64         %rd66, %rd62, %rd20;
        ld.f32  %f35, [%rd66];
        ld.f32  %f36, [%rd66+4];
        ld.f32  %f37, [%rd66+8];
        ld.f32  %f38, [%rd66+12];
        add.f32         %f39, %f38, 0f3F800000;
        add.f32         %f40, %f37, 0f3F800000;
        add.f32         %f41, %f36, 0f3F800000;
        add.f32         %f42, %f35, 0f3F800000;
        add.s64         %rd67, %rd64, %rd19;
        st.v4.f32       [%rd67], {%f42, %f41, %f40, %f39};
        add.s32         %r64, %r63, %r45;
        add.s64         %rd68, %rd65, %rd20;
        ld.f32  %f43, [%rd68];
        ld.f32  %f44, [%rd68+4];
        ld.f32  %f45, [%rd68+8];
        ld.f32  %f46, [%rd68+12];
        add.f32         %f47, %f46, 0f3F800000;
        add.f32         %f48, %f45, 0f3F800000;
        add.f32         %f49, %f44, 0f3F800000;
        add.f32         %f50, %f43, 0f3F800000;
        add.s64         %rd69, %rd67, %rd19;
        st.v4.f32       [%rd69], {%f50, %f49, %f48, %f47};
        add.s32         %r88, %r64, %r45;
        setp.lt.s32     %p13, %r88, %r12;
        @%p13 bra       $L__BB1_14;

$L__BB1_15:
        shl.b32         %r22, %r12, 2;
        add.s32         %r92, %r22, %r96;
        setp.ge.s32     %p14, %r92, %r11;
        @%p14 bra       $L__BB1_29;

        not.b32         %r65, %r1;
        add.s32         %r66, %r65, %r43;
        sub.s32         %r67, %r66, %r96;
        sub.s32         %r68, %r67, %r22;
        div.u32         %r24, %r68, %r45;
        add.s32         %r69, %r24, 1;
        and.b32         %r91, %r69, 3;
        setp.eq.s32     %p15, %r91, 0;
        @%p15 bra       $L__BB1_19;

        cvt.s64.s32     %rd70, %r92;
        add.s64         %rd71, %rd9, %rd70;
        shl.b64         %rd72, %rd71, 2;
        add.s64         %rd99, %rd37, %rd72;
        mul.wide.s32    %rd22, %r45, 4;
        add.s64         %rd98, %rd38, %rd72;

$L__BB1_18:
        ld.f32  %f51, [%rd99];
        add.f32         %f52, %f51, 0f3F800000;
        st.f32  [%rd98], %f52;
        add.s32         %r92, %r92, %r45;
        add.s64         %rd99, %rd99, %rd22;
        add.s64         %rd98, %rd98, %rd22;
        add.s32         %r91, %r91, -1;
        setp.ne.s32     %p16, %r91, 0;
        @%p16 bra       $L__BB1_18;

$L__BB1_19:
        setp.lt.u32     %p17, %r24, 3;
        @%p17 bra       $L__BB1_29;

        mul.wide.s32    %rd28, %r45, 4;

$L__BB1_21:
        mul.wide.s32    %rd73, %r92, 4;
        add.s64         %rd74, %rd11, %rd73;
        add.s64         %rd75, %rd10, %rd73;
        ld.f32  %f53, [%rd75];
        add.f32         %f54, %f53, 0f3F800000;
        st.f32  [%rd74], %f54;
        add.s64         %rd76, %rd75, %rd28;
        ld.f32  %f55, [%rd76];
        add.f32         %f56, %f55, 0f3F800000;
        add.s64         %rd77, %rd74, %rd28;
        st.f32  [%rd77], %f56;
        add.s32         %r70, %r92, %r45;
        add.s32         %r71, %r70, %r45;
        add.s64         %rd78, %rd76, %rd28;
        ld.f32  %f57, [%rd78];
        add.f32         %f58, %f57, 0f3F800000;
        add.s64         %rd79, %rd77, %rd28;
        st.f32  [%rd79], %f58;
        add.s32         %r72, %r71, %r45;
        add.s64         %rd80, %rd78, %rd28;
        ld.f32  %f59, [%rd80];
        add.f32         %f60, %f59, 0f3F800000;
        add.s64         %rd81, %rd79, %rd28;
        st.f32  [%rd81], %f60;
        add.s32         %r92, %r72, %r45;
        setp.lt.s32     %p18, %r92, %r11;
        @%p18 bra       $L__BB1_21;

$L__BB1_29:
        ret;

}
.visible .entry good_kernel(
        .param .u64 good_kernel_param_0,
        .param .u64 good_kernel_param_1
)
{

        ld.param.u64    %rd3, [good_kernel_param_0];
        ld.param.u64    %rd4, [good_kernel_param_1];
        cvta.to.global.u64      %rd2, %rd3;
        and.b64         %rd5, %rd4, 15;
        setp.eq.s64     %p1, %rd5, 0;
        cvta.to.global.u64      %rd1, %rd4;
        @%p1 bra        $L__BB2_2;

        ld.global.nc.f32        %f1, [%rd1];
        add.f32         %f2, %f1, 0f3F800000;
        st.global.f32   [%rd2], %f2;
        ld.global.nc.f32        %f3, [%rd1+4];
        add.f32         %f4, %f3, 0f3F800000;
        st.global.f32   [%rd2+4], %f4;
        ld.global.nc.f32        %f5, [%rd1+8];
        add.f32         %f6, %f5, 0f3F800000;
        st.global.f32   [%rd2+8], %f6;
        ld.global.nc.f32        %f7, [%rd1+12];
        add.f32         %f8, %f7, 0f3F800000;
        st.global.f32   [%rd2+12], %f8;
        bra.uni         $L__BB2_3;

$L__BB2_2:
        ld.global.nc.v4.f32     {%f9, %f10, %f11, %f12}, [%rd1];
        add.f32         %f17, %f12, 0f3F800000;
        add.f32         %f18, %f11, 0f3F800000;
        add.f32         %f19, %f10, 0f3F800000;
        add.f32         %f20, %f9, 0f3F800000;
        st.global.v4.f32        [%rd2], {%f20, %f19, %f18, %f17};

$L__BB2_3:
        ret;


does not emit vectorized load instructions (see ld.v4.f32 for this).

Furthermore, tests were done in a broader context and ncu now reports that the updated code has significantly less uncoalesced global loads:

image

Thank you @LucasWilkinson for the find.

Copy link

👋 Hi! Thank you for contributing to the vLLM project.

💬 Join our developer Slack at https://slack.vllm.ai to discuss your PR in #pr-reviews, coordinate on features in #feat- channels, or join special interest groups in #sig- channels.

Just a reminder: PRs would not trigger full CI run by default. Instead, it would only run fastcheck CI which starts running only a small and essential subset of CI tests to quickly catch errors. You can run other CI tests on top of those by going to your fastcheck build on Buildkite UI (linked in the PR checks section) and unblock them. If you do not have permission to unblock, ping simon-mo or khluu to add you in our Buildkite org.

Once the PR is approved and ready to go, your PR reviewer(s) can run CI to test the changes comprehensively before merging.

To run CI, PR reviewers can either: Add ready label to the PR or enable auto-merge.

🚀

Copy link
Contributor

@gemini-code-assist gemini-code-assist bot left a comment

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Code Review

This pull request effectively addresses a performance issue where vectorize_with_alignment failed to produce vectorized global loads. By explicitly creating a local copy of the data, you've successfully guided the compiler to generate the desired vectorized instructions. The provided PTX output and ncu metrics clearly demonstrate the improvement. My review includes one suggestion to apply this same optimization to the vectorize_read_with_alignment function to ensure consistent performance improvements.

Comment on lines 44 to 48
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

high

This is an excellent fix that correctly encourages the compiler to generate vectorized load instructions. A similar performance issue exists in the vectorize_read_with_alignment function within this file (on lines 132 and 157), where data is also passed directly from a global pointer. To ensure consistent vectorization, you should apply the same pattern there.

For example:

// In vectorize_read_with_alignment's fast path (line 132)
for (int i = tid; i < num_vec; i += stride) {
  vin_t src = v_in[i];
  vec_op(src);
}

This change should be applied to the vectorized loop in the slow path as well (line 157).

Copy link
Member

@yewentao256 yewentao256 left a comment

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks for the optimization!
Could you also test on E2E model performance & accuracy?
Commands could be seen here as an example #21961

@elvircrn elvircrn force-pushed the vectorize_with_alignment_fix branch from 868649b to 1dc7136 Compare August 19, 2025 17:19
@elvircrn elvircrn marked this pull request as draft August 19, 2025 17:38
@elvircrn
Copy link
Contributor Author

Converted to draft until I test out lm_eval from #21961.

@mgoin mgoin self-assigned this Aug 20, 2025
@mgoin
Copy link
Member

mgoin commented Aug 20, 2025

Nice find, looking forward for e2e results to validate

@elvircrn elvircrn marked this pull request as ready for review August 20, 2025 07:29
Copy link

dosubot bot commented Aug 20, 2025

Related Documentation

No published documentation to review for changes on this repository.
Write your first living document

How did I do? Any feedback?  Join Discord

@elvircrn
Copy link
Contributor Author

@mgoin @yewentao256 @ProExpertProg I removed the draft tag.

@elvircrn
Copy link
Contributor Author

elvircrn commented Aug 20, 2025

I also ran vllm bench throughput --model nm-testing/DeepSeek-Coder-V2-Lite-Instruct-FP8 --input-len 1000 --output-len 100 --trust_remote_code --enforce_eager

Proposed changes:

Throughput: 48.24 requests/s, 52982.55 total tokens/s, 4824.34 output tokens/s
Total num prompt tokens:  998235
Total num output tokens:  100000

Base branch:

Throughput: 47.74 requests/s, 52425.60 total tokens/s, 4773.62 output tokens/s
Total num prompt tokens:  998235
Total num output tokens:  100000

@elvircrn
Copy link
Contributor Author

elvircrn commented Aug 20, 2025

Re-ran accuracy tests via lm_eval --model vllm --model_args "pretrained=nm-testing/DeepSeek-Coder-V2-Lite-Instruct-FP8,max_model_len=32768,enable_expert_parallel=True,enforce_eager=True" --trust_remote_code --tasks gsm8k --num_fewshot 5 --batch_size auto

This branch:

|Tasks|Version|     Filter     |n-shot|  Metric   |   |Value |   |Stderr|
|-----|------:|----------------|-----:|-----------|---|-----:|---|-----:|
|gsm8k|      3|flexible-extract|     5|exact_match|↑  |0.7566|±  |0.0118|
|     |       |strict-match    |     5|exact_match|↑  |0.7384|±  |0.0121|

Base branch:

|Tasks|Version|     Filter     |n-shot|  Metric   |   |Value |   |Stderr|
|-----|------:|----------------|-----:|-----------|---|-----:|---|-----:|
|gsm8k|      3|flexible-extract|     5|exact_match|↑  |0.7566|±  |0.0118|
|     |       |strict-match    |     5|exact_match|↑  |0.7384|±  |0.0121|

Copy link
Member

@yewentao256 yewentao256 left a comment

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Looks good to me, thanks for the work!

@yewentao256 yewentao256 added the ready ONLY add when PR is ready to merge/full CI is needed label Aug 20, 2025
Copy link
Member

@mgoin mgoin left a comment

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Great find! Please put the perf and eval in the PR description

@mgoin mgoin enabled auto-merge (squash) August 21, 2025 13:58
@mgoin mgoin merged commit 044931f into vllm-project:main Aug 21, 2025
72 checks passed
Xu-Wenqing pushed a commit to Xu-Wenqing/vllm that referenced this pull request Aug 23, 2025
epwalsh pushed a commit to epwalsh/vllm that referenced this pull request Aug 28, 2025
xiao-llm pushed a commit to xiao-llm/vllm that referenced this pull request Aug 28, 2025
zhewenl pushed a commit to zhewenl/vllm that referenced this pull request Aug 28, 2025
mengxingkongzhouhan pushed a commit to mengxingkongzhouhan/vllm that referenced this pull request Aug 30, 2025
zhewenl pushed a commit to zhewenl/vllm that referenced this pull request Sep 3, 2025
FeiDaLI pushed a commit to FeiDaLI/vllm that referenced this pull request Sep 25, 2025
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment

Labels

ready ONLY add when PR is ready to merge/full CI is needed

Projects

None yet

Development

Successfully merging this pull request may close these issues.

4 participants