From a5aff2819890eb270c0c325e8d440de4232e1243 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20G=C3=A4=C3=9Fler?= Date: Fri, 30 May 2025 21:22:03 +0200 Subject: [PATCH] CUDA: fix typo in FlashAttention code (llama/13926) --- ggml/src/ggml-cuda/fattn-mma-f16.cuh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ggml/src/ggml-cuda/fattn-mma-f16.cuh b/ggml/src/ggml-cuda/fattn-mma-f16.cuh index 7120053b..925f39e8 100644 --- a/ggml/src/ggml-cuda/fattn-mma-f16.cuh +++ b/ggml/src/ggml-cuda/fattn-mma-f16.cuh @@ -1246,7 +1246,7 @@ static __global__ void flash_attn_ext_f16( NO_DEVICE_CODE; return; } -#endif __CUDA_ARCH__ == GGML_CUDA_CC_TURING +#endif // __CUDA_ARCH__ == GGML_CUDA_CC_TURING static_assert(!mla || DKQ >= DV, "MLA needs DKQ >= DV");