2023-11-03 20:35:05 +01:00
# pragma once
// ggml-backend internal header
# include "ggml-backend.h"
# ifdef __cplusplus
extern " C " {
# endif
//
// Backend buffer
//
2023-12-07 21:27:19 +01:00
// buffer type
typedef void * ggml_backend_buffer_type_context_t ;
struct ggml_backend_buffer_type_i {
2024-01-16 12:16:33 +01:00
const char * ( * GGML_CALL get_name ) ( ggml_backend_buffer_type_t buft ) ;
ggml_backend_buffer_t ( * GGML_CALL alloc_buffer ) ( ggml_backend_buffer_type_t buft , size_t size ) ;
size_t ( * GGML_CALL get_alignment ) ( ggml_backend_buffer_type_t buft ) ; // tensor alignment
size_t ( * GGML_CALL get_alloc_size ) ( ggml_backend_buffer_type_t buft , const struct ggml_tensor * tensor ) ; // data size needed to allocate the tensor, including padding
bool ( * GGML_CALL supports_backend ) ( ggml_backend_buffer_type_t buft , ggml_backend_t backend ) ; // check if the buffer type is usable by the backend
2023-12-22 16:53:39 +01:00
// check if tensor data is in host memory
// should be equivalent to supports_backend(buft, ggml_backend_cpu_init())
2024-01-16 12:16:33 +01:00
bool ( * GGML_CALL is_host ) ( ggml_backend_buffer_type_t buft ) ;
2023-12-07 21:27:19 +01:00
} ;
struct ggml_backend_buffer_type {
struct ggml_backend_buffer_type_i iface ;
ggml_backend_buffer_type_context_t context ;
} ;
// buffer
2023-11-03 20:35:05 +01:00
typedef void * ggml_backend_buffer_context_t ;
struct ggml_backend_buffer_i {
2024-01-16 12:16:33 +01:00
const char * ( * GGML_CALL get_name ) ( ggml_backend_buffer_t buffer ) ;
void ( * GGML_CALL free_buffer ) ( ggml_backend_buffer_t buffer ) ;
void * ( * GGML_CALL get_base ) ( ggml_backend_buffer_t buffer ) ;
void ( * GGML_CALL init_tensor ) ( ggml_backend_buffer_t buffer , struct ggml_tensor * tensor ) ;
void ( * GGML_CALL set_tensor ) ( ggml_backend_buffer_t buffer , struct ggml_tensor * tensor , const void * data , size_t offset , size_t size ) ;
void ( * GGML_CALL get_tensor ) ( ggml_backend_buffer_t buffer , const struct ggml_tensor * tensor , void * data , size_t offset , size_t size ) ;
bool ( * GGML_CALL cpy_tensor ) ( ggml_backend_buffer_t buffer , const struct ggml_tensor * src , struct ggml_tensor * dst ) ; // dst is in the buffer, src may be in any buffer
void ( * GGML_CALL clear ) ( ggml_backend_buffer_t buffer , uint8_t value ) ;
void ( * GGML_CALL reset ) ( ggml_backend_buffer_t buffer ) ; // reset any internal state due to tensor initialization, such as tensor extras
2023-11-03 20:35:05 +01:00
} ;
struct ggml_backend_buffer {
2023-12-07 21:27:19 +01:00
struct ggml_backend_buffer_i iface ;
ggml_backend_buffer_type_t buft ;
2023-11-03 20:35:05 +01:00
ggml_backend_buffer_context_t context ;
size_t size ;
2024-01-12 20:07:38 +01:00
enum ggml_backend_buffer_usage usage ;
2023-11-03 20:35:05 +01:00
} ;
2024-01-16 12:16:33 +01:00
GGML_CALL ggml_backend_buffer_t ggml_backend_buffer_init (
2023-12-07 21:27:19 +01:00
ggml_backend_buffer_type_t buft ,
2023-11-03 20:35:05 +01:00
struct ggml_backend_buffer_i iface ,
ggml_backend_buffer_context_t context ,
size_t size ) ;
2024-01-12 20:07:38 +01:00
// do not use directly, use ggml_backend_tensor_copy instead
bool ggml_backend_buffer_copy_tensor ( const struct ggml_tensor * src , struct ggml_tensor * dst ) ;
2023-12-07 21:27:19 +01:00
2023-11-03 20:35:05 +01:00
//
// Backend
//
typedef void * ggml_backend_context_t ;
struct ggml_backend_i {
2024-01-16 12:16:33 +01:00
const char * ( * GGML_CALL get_name ) ( ggml_backend_t backend ) ;
2023-11-03 20:35:05 +01:00
2024-01-16 12:16:33 +01:00
void ( * GGML_CALL free ) ( ggml_backend_t backend ) ;
2023-11-03 20:35:05 +01:00
// buffer allocation
2024-01-16 12:16:33 +01:00
ggml_backend_buffer_type_t ( * GGML_CALL get_default_buffer_type ) ( ggml_backend_t backend ) ;
2023-11-03 20:35:05 +01:00
2024-01-12 20:07:38 +01:00
// (optional) asynchronous tensor data access
2024-01-16 12:16:33 +01:00
void ( * GGML_CALL set_tensor_async ) ( ggml_backend_t backend , struct ggml_tensor * tensor , const void * data , size_t offset , size_t size ) ;
void ( * GGML_CALL get_tensor_async ) ( ggml_backend_t backend , const struct ggml_tensor * tensor , void * data , size_t offset , size_t size ) ;
bool ( * GGML_CALL cpy_tensor_async ) ( ggml_backend_t backend , const struct ggml_tensor * src , struct ggml_tensor * dst ) ;
2023-11-03 20:35:05 +01:00
2024-01-12 20:07:38 +01:00
// (optional) complete all pending operations
2024-01-16 12:16:33 +01:00
void ( * GGML_CALL synchronize ) ( ggml_backend_t backend ) ;
2023-11-03 20:35:05 +01:00
// compute graph with a plan
2024-01-16 12:16:33 +01:00
ggml_backend_graph_plan_t ( * GGML_CALL graph_plan_create ) ( ggml_backend_t backend , const struct ggml_cgraph * cgraph ) ;
void ( * GGML_CALL graph_plan_free ) ( ggml_backend_t backend , ggml_backend_graph_plan_t plan ) ;
void ( * GGML_CALL graph_plan_compute ) ( ggml_backend_t backend , ggml_backend_graph_plan_t plan ) ;
2023-11-03 20:35:05 +01:00
2024-01-12 20:07:38 +01:00
// compute graph without a plan (async)
2024-01-16 12:16:33 +01:00
bool ( * GGML_CALL graph_compute ) ( ggml_backend_t backend , struct ggml_cgraph * cgraph ) ;
2023-11-03 20:35:05 +01:00
// check if the backend supports an operation
2024-01-16 12:16:33 +01:00
bool ( * GGML_CALL supports_op ) ( ggml_backend_t backend , const struct ggml_tensor * op ) ;
2023-11-03 20:35:05 +01:00
} ;
struct ggml_backend {
struct ggml_backend_i iface ;
ggml_backend_context_t context ;
} ;
2023-12-07 21:27:19 +01:00
//
// Backend registry
//
2024-01-16 12:16:33 +01:00
typedef ggml_backend_t ( * GGML_CALL ggml_backend_init_fn ) ( const char * params , void * user_data ) ;
2023-12-07 21:27:19 +01:00
2024-01-16 12:16:33 +01:00
GGML_CALL void ggml_backend_register ( const char * name , ggml_backend_init_fn init_fn , ggml_backend_buffer_type_t default_buffer_type , void * user_data ) ;
2023-12-07 21:27:19 +01:00
2023-11-03 20:35:05 +01:00
# ifdef __cplusplus
}
# endif