Skip to content
Merged
Changes from all commits
Commits
Show all changes
22 commits
Select commit Hold shift + click to select a range
ae61ab4
Reduce allocation overhead in quantized sdpa
kimishpatel Nov 5, 2025
99902b8
Update on "Reduce allocation overhead in quantized sdpa"
kimishpatel Nov 6, 2025
72292a7
Update on "Reduce allocation overhead in quantized sdpa"
kimishpatel Nov 6, 2025
93e17be
Update on "Reduce allocation overhead in quantized sdpa"
kimishpatel Nov 10, 2025
602a3a7
Update on "Reduce allocation overhead in quantized sdpa"
kimishpatel Nov 14, 2025
9a72754
Update on "Reduce allocation overhead in quantized sdpa"
kimishpatel Nov 20, 2025
d389555
Update on "Reduce allocation overhead in quantized sdpa"
kimishpatel Nov 21, 2025
223495c
Update on "Reduce allocation overhead in quantized sdpa"
kimishpatel Nov 22, 2025
4c1faee
Update on "Reduce allocation overhead in quantized sdpa"
kimishpatel Nov 23, 2025
4464d3e
Update on "Reduce allocation overhead in quantized sdpa"
kimishpatel Nov 23, 2025
e571e70
Update on "Reduce allocation overhead in quantized sdpa"
kimishpatel Nov 24, 2025
4890054
Update on "Reduce allocation overhead in quantized sdpa"
kimishpatel Nov 24, 2025
0496db7
Update on "Reduce allocation overhead in quantized sdpa"
kimishpatel Nov 24, 2025
ec33b54
Update on "Reduce allocation overhead in quantized sdpa"
kimishpatel Nov 25, 2025
d2ce926
Update on "Reduce allocation overhead in quantized sdpa"
kimishpatel Nov 25, 2025
dacb047
Update on "Reduce allocation overhead in quantized sdpa"
kimishpatel Dec 4, 2025
fd8e7e8
Update on "Reduce allocation overhead in quantized sdpa"
kimishpatel Dec 4, 2025
8b575ab
Update on "Reduce allocation overhead in quantized sdpa"
kimishpatel Dec 4, 2025
713df2d
Update on "Reduce allocation overhead in quantized sdpa"
kimishpatel Dec 4, 2025
1d03b6c
Update on "Reduce allocation overhead in quantized sdpa"
kimishpatel Dec 4, 2025
05f7021
Update on "Reduce allocation overhead in quantized sdpa"
kimishpatel Dec 4, 2025
4e529d1
Update on "Reduce allocation overhead in quantized sdpa"
kimishpatel Dec 4, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
30 changes: 23 additions & 7 deletions extension/llm/custom_ops/op_sdpa_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -213,13 +213,13 @@ void dequant_and_gemm(
const int64_t v_stride_n,
float* o_data,
const int64_t o_stride_m,
const float beta) {
std::vector<float> dequantized_v_data(v_data.m * v_data.n);
const float beta,
float* buf_qdq_ptr) {
dequantize_per_channel_optimized(
static_cast<const int8_t*>(v_data.data),
static_cast<const float*>(v_data.scales),
static_cast<const int8_t*>(v_data.zero_points),
dequantized_v_data.data(),
buf_qdq_ptr,
-128,
127,
1,
Expand All @@ -237,7 +237,7 @@ void dequant_and_gemm(
m,
k,
static_cast<float>(1),
dequantized_v_data.data(),
buf_qdq_ptr,
v_data.n,
qk_data,
qk_stride_m,
Expand All @@ -257,7 +257,8 @@ void _qk_at_v_gemm(
const int64_t v_stride_n,
accum_t* o_data,
const int64_t o_stride_m,
const accum_t beta) {
const accum_t beta,
accum_t* buf_qdq_ptr) {
if (v_data.dtype == ScalarType::Char) {
if constexpr (std::is_same<accum_t, float>::value) {
if (m > 4) {
Expand All @@ -273,7 +274,8 @@ void _qk_at_v_gemm(
v_stride_n,
o_data,
o_stride_m,
beta);
beta,
buf_qdq_ptr);
} else {
// For smaller batch sizes, use quantized gemm
int a_stride_m_tmp, b_stride_n_tmp;
Expand Down Expand Up @@ -773,6 +775,17 @@ void cpu_flash_attention(
// at::Tensor buf_reduced = at::empty(
// {num_thread, qSplitSize, is_reduced_type ? kvSplitSize : 0},
// query.options());
int64_t size_per_thread_qdq_vec = kvSplitSize * headSize;
// Lets align size_per_thread_qdq_vec to 64 bytes, for coalesced cache reads,
Copy link

Copilot AI Nov 17, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The comment says "align to 64 bytes" but kAlignment = 32 aligns to 32 elements. Since size_per_thread_qdq_vec is an element count (not byte count), and assuming accum_t is float (4 bytes), this aligns to 128 bytes (32 * 4), not 64 bytes.

Either:

  1. Change kAlignment to 16 if 64-byte alignment is desired, or
  2. Update the comment to say "align to 32 elements" or "align to 128 bytes (for float)"
Suggested change
// Lets align size_per_thread_qdq_vec to 64 bytes, for coalesced cache reads,
// Lets align size_per_thread_qdq_vec to 32 elements (128 bytes for float), for coalesced cache reads,

Copilot uses AI. Check for mistakes.
// by padding with right number of per thread elements
constexpr int64_t kAlignment = 64;
size_per_thread_qdq_vec =
(size_per_thread_qdq_vec + kAlignment - 1) & (-(kAlignment - 1));
Copy link

Copilot AI Nov 17, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The alignment calculation is incorrect. The formula (x + kAlignment - 1) & (-(kAlignment - 1)) uses the wrong mask.

For aligning to a power-of-2 boundary, the correct formula is:

(size_per_thread_qdq_vec + kAlignment - 1) & (-kAlignment)

or equivalently:

(size_per_thread_qdq_vec + kAlignment - 1) & ~(kAlignment - 1)

The current code uses -(kAlignment - 1) which equals -31 = 0xFFFFFFE1, but the correct mask should be -32 = 0xFFFFFFE0 to properly zero out the bottom 5 bits.

Suggested change
(size_per_thread_qdq_vec + kAlignment - 1) & (-(kAlignment - 1));
(size_per_thread_qdq_vec + kAlignment - 1) & -kAlignment;

Copilot uses AI. Check for mistakes.
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@copilot are you sure? Please double check again

int64_t size_per_thread_qdq_bytes = size_per_thread_qdq_vec * sizeof(accum_t);
int64_t size_qdq_bytes = size_per_thread_qdq_bytes * num_thread;
std::vector<char> scratch_for_quant_dequant_vec(size_qdq_bytes);
accum_t* scratch_for_quant_dequant =
reinterpret_cast<accum_t*>(scratch_for_quant_dequant_vec.data());

// Data ptrs
const scalar_t* q_data = query.const_data_ptr<scalar_t>();
Expand All @@ -797,6 +810,8 @@ void cpu_flash_attention(
scalar_t* qk_reduced_data = is_reduced_type
? buf_reduced_data + ompIdx * qSplitSize * kvSplitSize
: nullptr;
accum_t* buf_qdq_ptr =
scratch_for_quant_dequant + ompIdx * size_per_thread_qdq_vec;

for (int64_t z = begin; z < end; z++) {
int64_t m = k * qSplitSize;
Expand Down Expand Up @@ -1053,7 +1068,8 @@ void cpu_flash_attention(
vStrideN,
dst_data,
headSize,
n == 0 ? static_cast<accum_t>(0) : static_cast<accum_t>(1));
n == 0 ? static_cast<accum_t>(0) : static_cast<accum_t>(1),
buf_qdq_ptr);
}
// dst <- dst / sum[row]
// reorder MHA output with strides
Expand Down
Loading