llama-bench : add support for the RPC backend (llama/7435)

This commit is contained in:
Radoslav Gerganov 2024-05-29 14:45:44 +03:00 committed by Georgi Gerganov
parent 8f5dc729d9
commit a535d348dd
2 changed files with 9 additions and 0 deletions

8
ggml.c
View File

@ -22872,6 +22872,14 @@ int ggml_cpu_has_sycl(void) {
#endif
}
int ggml_cpu_has_rpc(void) {
#if defined(GGML_USE_RPC)
return 1;
#else
return 0;
#endif
}
int ggml_cpu_has_gpublas(void) {
return ggml_cpu_has_cuda() || ggml_cpu_has_clblast() || ggml_cpu_has_vulkan() || ggml_cpu_has_kompute() ||
ggml_cpu_has_sycl();

1
ggml.h
View File

@ -2428,6 +2428,7 @@ extern "C" {
GGML_API int ggml_cpu_has_sse3 (void);
GGML_API int ggml_cpu_has_ssse3 (void);
GGML_API int ggml_cpu_has_sycl (void);
GGML_API int ggml_cpu_has_rpc (void);
GGML_API int ggml_cpu_has_vsx (void);
GGML_API int ggml_cpu_has_matmul_int8(void);