ggml : add and use ggml_cpu_has_llamafile() (llama/8664)

This commit is contained in:
Georgi Gerganov 2024-07-25 12:37:42 +03:00
parent 7598acf525
commit c06970dd72
2 changed files with 9 additions and 0 deletions

View File

@ -2400,6 +2400,7 @@ extern "C" {
GGML_API int ggml_cpu_has_vsx (void);
GGML_API int ggml_cpu_has_matmul_int8(void);
GGML_API int ggml_cpu_has_cann (void);
GGML_API int ggml_cpu_has_llamafile (void);
//
// Internal types and functions exposed for tests and benchmarks

View File

@ -22004,6 +22004,14 @@ int ggml_cpu_has_cann(void) {
#endif
}
int ggml_cpu_has_llamafile(void) {
#if defined(GGML_USE_LLAMAFILE)
return 1;
#else
return 0;
#endif
}
int ggml_cpu_has_gpublas(void) {
return ggml_cpu_has_cuda() || ggml_cpu_has_vulkan() || ggml_cpu_has_kompute() || ggml_cpu_has_sycl();
}