kompute : llama-bench support and ggml_cpu_has_kompute() (llama/5226)

This commit is contained in:
Jared Van Bortel
2024-01-30 19:04:37 -05:00
committed by Georgi Gerganov
parent f75e1197f1
commit f850a067ed
2 changed files with 11 additions and 1 deletions

11
ggml.c
View File

@ -20473,6 +20473,14 @@ int ggml_cpu_has_vulkan(void) {
#endif
}
int ggml_cpu_has_kompute(void) {
#if defined(GGML_USE_KOMPUTE)
return 1;
#else
return 0;
#endif
}
int ggml_cpu_has_sycl(void) {
#if defined(GGML_USE_SYCL)
return 1;
@ -20482,7 +20490,8 @@ int ggml_cpu_has_sycl(void) {
}
int ggml_cpu_has_gpublas(void) {
return ggml_cpu_has_cublas() || ggml_cpu_has_clblast() || ggml_cpu_has_vulkan() || ggml_cpu_has_sycl();
return ggml_cpu_has_cublas() || ggml_cpu_has_clblast() || ggml_cpu_has_vulkan() || ggml_cpu_has_kompute() ||
ggml_cpu_has_sycl();
}
int ggml_cpu_has_sse3(void) {