mirror of
https://github.com/ggerganov/whisper.cpp.git
synced 2025-08-10 14:48:47 +02:00
CUDA: fix typo in FlashAttention code (llama/13926)
This commit is contained in:
committed by
Georgi Gerganov
parent
6c0472ab8f
commit
a5aff28198
@ -1246,7 +1246,7 @@ static __global__ void flash_attn_ext_f16(
|
||||
NO_DEVICE_CODE;
|
||||
return;
|
||||
}
|
||||
#endif __CUDA_ARCH__ == GGML_CUDA_CC_TURING
|
||||
#endif // __CUDA_ARCH__ == GGML_CUDA_CC_TURING
|
||||
|
||||
static_assert(!mla || DKQ >= DV, "MLA needs DKQ >= DV");
|
||||
|
||||
|
Reference in New Issue
Block a user