CUDA: fix typo in FlashAttention code (llama/13926)

This commit is contained in:
Johannes Gäßler 2025-05-30 21:22:03 +02:00 committed by Georgi Gerganov
parent 6c0472ab8f
commit a5aff28198

View File

@ -1246,7 +1246,7 @@ static __global__ void flash_attn_ext_f16(
NO_DEVICE_CODE; NO_DEVICE_CODE;
return; return;
} }
#endif __CUDA_ARCH__ == GGML_CUDA_CC_TURING #endif // __CUDA_ARCH__ == GGML_CUDA_CC_TURING
static_assert(!mla || DKQ >= DV, "MLA needs DKQ >= DV"); static_assert(!mla || DKQ >= DV, "MLA needs DKQ >= DV");