mirror of
https://github.com/ggerganov/whisper.cpp.git
synced 2025-08-13 13:38:00 +02:00
whisper : remove ggml_repeat for conv bias + single backend
This commit is contained in:
@ -6725,7 +6725,6 @@ inline void ggml_cuda_op_im2col(
|
|||||||
const int64_t OH = is_2D ? dst->ne[2] : 1;
|
const int64_t OH = is_2D ? dst->ne[2] : 1;
|
||||||
const int64_t OW = dst->ne[1];
|
const int64_t OW = dst->ne[1];
|
||||||
|
|
||||||
|
|
||||||
im2col_f32_f16_cuda(src1_dd, (half*) dst_dd,
|
im2col_f32_f16_cuda(src1_dd, (half*) dst_dd,
|
||||||
OH, IW, IH, OW, IC, KH, KW, N,
|
OH, IW, IH, OW, IC, KH, KW, N,
|
||||||
src1->nb[is_2D ? 3 : 2] / 4, // nb is byte offset, src is type float32
|
src1->nb[is_2D ? 3 : 2] / 4, // nb is byte offset, src is type float32
|
||||||
|
149
whisper.cpp
149
whisper.cpp
@ -817,22 +817,9 @@ struct whisper_context {
|
|||||||
|
|
||||||
whisper_state * state = nullptr;
|
whisper_state * state = nullptr;
|
||||||
|
|
||||||
ggml_backend_t backend_cpu = nullptr;
|
ggml_backend_t backend = nullptr;
|
||||||
ggml_backend_t backend_gpu = nullptr;
|
|
||||||
|
|
||||||
std::string path_model; // populated by whisper_init_from_file_with_params()
|
std::string path_model; // populated by whisper_init_from_file_with_params()
|
||||||
|
|
||||||
ggml_backend_t backend_kv() const {
|
|
||||||
return backend_gpu ? backend_gpu : backend_cpu;
|
|
||||||
}
|
|
||||||
|
|
||||||
ggml_backend_t backend_conv() const {
|
|
||||||
return backend_gpu ? backend_gpu : backend_cpu;
|
|
||||||
}
|
|
||||||
|
|
||||||
ggml_backend_t backend_main() const {
|
|
||||||
return backend_gpu ? backend_gpu : backend_cpu;
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct whisper_global {
|
struct whisper_global {
|
||||||
@ -1193,10 +1180,10 @@ static bool whisper_model_load(struct whisper_model_loader * loader, whisper_con
|
|||||||
model.e_pe = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_audio_state, n_audio_ctx);
|
model.e_pe = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_audio_state, n_audio_ctx);
|
||||||
|
|
||||||
model.e_conv_1_w = ggml_new_tensor_3d(ctx, vtype, 3, n_mels, n_audio_state);
|
model.e_conv_1_w = ggml_new_tensor_3d(ctx, vtype, 3, n_mels, n_audio_state);
|
||||||
model.e_conv_1_b = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, 1, n_audio_state);
|
model.e_conv_1_b = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, 2*n_audio_ctx, n_audio_state);
|
||||||
|
|
||||||
model.e_conv_2_w = ggml_new_tensor_3d(ctx, vtype, 3, n_audio_state, n_audio_state);
|
model.e_conv_2_w = ggml_new_tensor_3d(ctx, vtype, 3, n_audio_state, n_audio_state);
|
||||||
model.e_conv_2_b = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, 1, n_audio_state);
|
model.e_conv_2_b = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_audio_ctx, n_audio_state);
|
||||||
|
|
||||||
model.e_ln_w = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_audio_state);
|
model.e_ln_w = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_audio_state);
|
||||||
model.e_ln_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_audio_state);
|
model.e_ln_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_audio_state);
|
||||||
@ -1392,26 +1379,22 @@ static bool whisper_model_load(struct whisper_model_loader * loader, whisper_con
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
if (backend_gpu) {
|
if (backend_gpu) {
|
||||||
wctx.backend_gpu = backend_gpu;
|
wctx.backend = backend_gpu;
|
||||||
} else {
|
} else {
|
||||||
wctx.backend_gpu = nullptr;
|
wctx.backend = ggml_backend_cpu_init();
|
||||||
}
|
}
|
||||||
|
|
||||||
// always add the CPU backend as a fallback
|
|
||||||
wctx.backend_cpu = ggml_backend_cpu_init();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
size_t size_conv = 0;
|
|
||||||
size_t size_main = 0;
|
size_t size_main = 0;
|
||||||
|
|
||||||
for (const auto & t : model.tensors) {
|
for (const auto & t : model.tensors) {
|
||||||
size_main += ggml_nbytes(t.second) + ggml_tensor_overhead();
|
size_main += ggml_nbytes(t.second) + ggml_tensor_overhead();
|
||||||
}
|
}
|
||||||
|
|
||||||
model.data->buffer_main = ggml_backend_alloc_buffer(wctx.backend_main(), size_main);
|
model.data->buffer_main = ggml_backend_alloc_buffer(wctx.backend, size_main);
|
||||||
|
|
||||||
WHISPER_LOG_INFO("%s: %8s buffer size = %8.2f MB\n", __func__, ggml_backend_name(wctx.backend_main()), size_main / 1024.0 / 1024.0);
|
WHISPER_LOG_INFO("%s: %8s buffer size = %8.2f MB\n", __func__, ggml_backend_name(wctx.backend), size_main / 1024.0 / 1024.0);
|
||||||
}
|
}
|
||||||
|
|
||||||
ggml_allocr * alloc_main = ggml_allocr_new_from_buffer(model.data->buffer_main);
|
ggml_allocr * alloc_main = ggml_allocr_new_from_buffer(model.data->buffer_main);
|
||||||
@ -1462,6 +1445,10 @@ static bool whisper_model_load(struct whisper_model_loader * loader, whisper_con
|
|||||||
}
|
}
|
||||||
|
|
||||||
auto tensor = model.tensors[name.data()];
|
auto tensor = model.tensors[name.data()];
|
||||||
|
|
||||||
|
const bool is_conv_bias = (name == "encoder.conv1.bias" || name == "encoder.conv2.bias");
|
||||||
|
|
||||||
|
if (!is_conv_bias) {
|
||||||
if (ggml_nelements(tensor) != nelements) {
|
if (ggml_nelements(tensor) != nelements) {
|
||||||
WHISPER_LOG_ERROR("%s: tensor '%s' has wrong size in model file\n", __func__, name.data());
|
WHISPER_LOG_ERROR("%s: tensor '%s' has wrong size in model file\n", __func__, name.data());
|
||||||
WHISPER_LOG_ERROR("%s: shape: [%d, %d, %d], expected: [%d, %d, %d]\n",
|
WHISPER_LOG_ERROR("%s: shape: [%d, %d, %d], expected: [%d, %d, %d]\n",
|
||||||
@ -1482,23 +1469,43 @@ static bool whisper_model_load(struct whisper_model_loader * loader, whisper_con
|
|||||||
__func__, name.data(), ggml_nbytes(tensor), nelements*bpe);
|
__func__, name.data(), ggml_nbytes(tensor), nelements*bpe);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
ggml_backend * backend = wctx.backend_main();
|
ggml_backend_t backend = wctx.backend;
|
||||||
|
|
||||||
//printf("%s: [%5.5s] %s\n", __func__, ggml_backend_name(backend), name.c_str());
|
//printf("%s: [%5.5s] %s\n", __func__, ggml_backend_name(backend), name.c_str());
|
||||||
|
|
||||||
if (ggml_backend_is_cpu(backend)
|
if ((ggml_backend_is_cpu(backend)
|
||||||
#ifdef GGML_USE_METAL
|
#ifdef GGML_USE_METAL
|
||||||
|| ggml_backend_is_metal(backend)
|
|| ggml_backend_is_metal(backend)
|
||||||
#endif
|
#endif
|
||||||
) {
|
) && !is_conv_bias) {
|
||||||
// for the CPU and Metal backend, we can read directly into the tensor
|
// for the CPU and Metal backend, we can read directly into the tensor
|
||||||
loader->read(loader->context, tensor->data, ggml_nbytes(tensor));
|
loader->read(loader->context, tensor->data, ggml_nbytes(tensor));
|
||||||
BYTESWAP_TENSOR(tensor);
|
BYTESWAP_TENSOR(tensor);
|
||||||
} else {
|
} else {
|
||||||
// read into a temporary buffer first, then copy to device memory
|
// read into a temporary buffer first, then copy to device memory
|
||||||
read_buf.resize(ggml_nbytes(tensor));
|
read_buf.resize(ggml_nbytes(tensor));
|
||||||
|
|
||||||
|
// we repeat the 2 bias tensors along dim 0:
|
||||||
|
// [1, 512] -> [3000, 512] (conv1.bias)
|
||||||
|
// [1, 512] -> [1500, 512] (conv2.bias)
|
||||||
|
if (is_conv_bias) {
|
||||||
|
loader->read(loader->context, read_buf.data(), read_buf.size() / tensor->ne[0]);
|
||||||
|
|
||||||
|
float * data_f32 = (float *) read_buf.data();
|
||||||
|
for (int64_t y = 0; y < tensor->ne[1]; ++y) {
|
||||||
|
const int64_t yy = tensor->ne[1] - y - 1;
|
||||||
|
const float val = data_f32[yy];
|
||||||
|
|
||||||
|
for (int64_t x = 0; x < tensor->ne[0]; ++x) {
|
||||||
|
data_f32[yy*tensor->ne[0] + x] = val;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
loader->read(loader->context, read_buf.data(), read_buf.size());
|
loader->read(loader->context, read_buf.data(), read_buf.size());
|
||||||
|
}
|
||||||
|
|
||||||
ggml_backend_tensor_set(tensor, read_buf.data(), 0, ggml_nbytes(tensor));
|
ggml_backend_tensor_set(tensor, read_buf.data(), 0, ggml_nbytes(tensor));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1597,6 +1604,7 @@ static struct ggml_cgraph * whisper_build_graph_conv(
|
|||||||
// convolution + gelu
|
// convolution + gelu
|
||||||
{
|
{
|
||||||
cur = ggml_conv_1d_ph(ctx0, model.e_conv_1_w, mel, 1, 1);
|
cur = ggml_conv_1d_ph(ctx0, model.e_conv_1_w, mel, 1, 1);
|
||||||
|
//cur = ggml_add(ctx0, cur, model.e_conv_1_b);
|
||||||
cur = ggml_add(ctx0,
|
cur = ggml_add(ctx0,
|
||||||
ggml_repeat(ctx0,
|
ggml_repeat(ctx0,
|
||||||
model.e_conv_1_b,
|
model.e_conv_1_b,
|
||||||
@ -1606,6 +1614,7 @@ static struct ggml_cgraph * whisper_build_graph_conv(
|
|||||||
cur = ggml_gelu(ctx0, cur);
|
cur = ggml_gelu(ctx0, cur);
|
||||||
|
|
||||||
cur = ggml_conv_1d_ph(ctx0, model.e_conv_2_w, cur, 2, 1);
|
cur = ggml_conv_1d_ph(ctx0, model.e_conv_2_w, cur, 2, 1);
|
||||||
|
//cur = ggml_add(ctx0, cur, model.e_conv_2_b);
|
||||||
cur = ggml_add(ctx0,
|
cur = ggml_add(ctx0,
|
||||||
ggml_repeat(ctx0,
|
ggml_repeat(ctx0,
|
||||||
model.e_conv_2_b,
|
model.e_conv_2_b,
|
||||||
@ -1669,6 +1678,14 @@ static struct ggml_cgraph * whisper_build_graph_encoder(
|
|||||||
|
|
||||||
ggml_allocr * alloc = wstate.alloc_encode.alloc;
|
ggml_allocr * alloc = wstate.alloc_encode.alloc;
|
||||||
|
|
||||||
|
//struct ggml_tensor * cur = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_ctx, n_state);
|
||||||
|
//ggml_allocr_alloc(alloc, cur);
|
||||||
|
|
||||||
|
//if (!ggml_allocr_is_measure(alloc)) {
|
||||||
|
// ggml_backend_tensor_copy(wstate.embd_conv, cur);
|
||||||
|
//}
|
||||||
|
struct ggml_tensor * cur = ggml_view_tensor(ctx0, wstate.embd_conv);
|
||||||
|
|
||||||
struct ggml_tensor * KQscale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1);
|
struct ggml_tensor * KQscale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1);
|
||||||
ggml_allocr_alloc(alloc, KQscale);
|
ggml_allocr_alloc(alloc, KQscale);
|
||||||
|
|
||||||
@ -1677,13 +1694,6 @@ static struct ggml_cgraph * whisper_build_graph_encoder(
|
|||||||
ggml_backend_tensor_set(KQscale, &val, 0, sizeof(float));
|
ggml_backend_tensor_set(KQscale, &val, 0, sizeof(float));
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor * cur = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_ctx, n_state);
|
|
||||||
ggml_allocr_alloc(alloc, cur);
|
|
||||||
|
|
||||||
if (!ggml_allocr_is_measure(alloc)) {
|
|
||||||
ggml_backend_tensor_copy(wstate.embd_conv, cur);
|
|
||||||
}
|
|
||||||
|
|
||||||
// ===================================================================
|
// ===================================================================
|
||||||
// NOTE: experimenting with partial evaluation of the encoder (ignore)
|
// NOTE: experimenting with partial evaluation of the encoder (ignore)
|
||||||
//static int iter = -1;
|
//static int iter = -1;
|
||||||
@ -1923,12 +1933,13 @@ static struct ggml_cgraph * whisper_build_graph_cross(
|
|||||||
|
|
||||||
ggml_allocr * alloc = wstate.alloc_cross.alloc;
|
ggml_allocr * alloc = wstate.alloc_cross.alloc;
|
||||||
|
|
||||||
struct ggml_tensor * cur = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_state, n_ctx);
|
//struct ggml_tensor * cur = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_state, n_ctx);
|
||||||
ggml_allocr_alloc(alloc, cur);
|
//ggml_allocr_alloc(alloc, cur);
|
||||||
|
|
||||||
if (!ggml_allocr_is_measure(alloc)) {
|
//if (!ggml_allocr_is_measure(alloc)) {
|
||||||
ggml_backend_tensor_copy(wstate.embd_enc, cur);
|
// ggml_backend_tensor_copy(wstate.embd_enc, cur);
|
||||||
}
|
//}
|
||||||
|
struct ggml_tensor * cur = ggml_view_tensor(ctx0, wstate.embd_enc);
|
||||||
|
|
||||||
struct ggml_tensor * Kscale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1);
|
struct ggml_tensor * Kscale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1);
|
||||||
ggml_allocr_alloc(alloc, Kscale);
|
ggml_allocr_alloc(alloc, Kscale);
|
||||||
@ -2006,15 +2017,15 @@ static bool whisper_encode_internal(
|
|||||||
ggml_allocr_alloc_graph(alloc, gf);
|
ggml_allocr_alloc_graph(alloc, gf);
|
||||||
|
|
||||||
if (!whisper_encode_external(wstate)) {
|
if (!whisper_encode_external(wstate)) {
|
||||||
if (ggml_backend_is_cpu(wctx.backend_conv())) {
|
if (ggml_backend_is_cpu(wctx.backend)) {
|
||||||
ggml_backend_cpu_set_n_threads(wctx.backend_conv(), n_threads);
|
ggml_backend_cpu_set_n_threads(wctx.backend, n_threads);
|
||||||
}
|
}
|
||||||
#ifdef GGML_USE_METAL
|
#ifdef GGML_USE_METAL
|
||||||
if (ggml_backend_is_metal(wctx.backend_conv())) {
|
if (ggml_backend_is_metal(wctx.backend)) {
|
||||||
ggml_backend_metal_set_n_cb(wctx.backend_conv(), n_threads);
|
ggml_backend_metal_set_n_cb(wctx.backend, n_threads);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
ggml_backend_graph_compute(wctx.backend_conv(), gf);
|
ggml_backend_graph_compute(wctx.backend, gf);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2028,15 +2039,15 @@ static bool whisper_encode_internal(
|
|||||||
|
|
||||||
ggml_allocr_alloc_graph(alloc, gf);
|
ggml_allocr_alloc_graph(alloc, gf);
|
||||||
|
|
||||||
if (ggml_backend_is_cpu(wctx.backend_main())) {
|
if (ggml_backend_is_cpu(wctx.backend)) {
|
||||||
ggml_backend_cpu_set_n_threads(wctx.backend_main(), n_threads);
|
ggml_backend_cpu_set_n_threads(wctx.backend, n_threads);
|
||||||
}
|
}
|
||||||
#ifdef GGML_USE_METAL
|
#ifdef GGML_USE_METAL
|
||||||
if (ggml_backend_is_metal(wctx.backend_main())) {
|
if (ggml_backend_is_metal(wctx.backend)) {
|
||||||
ggml_backend_metal_set_n_cb(wctx.backend_main(), n_threads);
|
ggml_backend_metal_set_n_cb(wctx.backend, n_threads);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
ggml_backend_graph_compute(wctx.backend_main(), gf);
|
ggml_backend_graph_compute(wctx.backend, gf);
|
||||||
}
|
}
|
||||||
|
|
||||||
// cross
|
// cross
|
||||||
@ -2049,15 +2060,15 @@ static bool whisper_encode_internal(
|
|||||||
|
|
||||||
ggml_allocr_alloc_graph(alloc, gf);
|
ggml_allocr_alloc_graph(alloc, gf);
|
||||||
|
|
||||||
if (ggml_backend_is_cpu(wctx.backend_main())) {
|
if (ggml_backend_is_cpu(wctx.backend)) {
|
||||||
ggml_backend_cpu_set_n_threads(wctx.backend_main(), n_threads);
|
ggml_backend_cpu_set_n_threads(wctx.backend, n_threads);
|
||||||
}
|
}
|
||||||
#ifdef GGML_USE_METAL
|
#ifdef GGML_USE_METAL
|
||||||
if (ggml_backend_is_metal(wctx.backend_main())) {
|
if (ggml_backend_is_metal(wctx.backend)) {
|
||||||
ggml_backend_metal_set_n_cb(wctx.backend_main(), n_threads);
|
ggml_backend_metal_set_n_cb(wctx.backend, n_threads);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
ggml_backend_graph_compute(wctx.backend_main(), gf);
|
ggml_backend_graph_compute(wctx.backend, gf);
|
||||||
}
|
}
|
||||||
|
|
||||||
wstate.t_encode_us += ggml_time_us() - t_start_us;
|
wstate.t_encode_us += ggml_time_us() - t_start_us;
|
||||||
@ -2448,15 +2459,15 @@ static bool whisper_decode_internal(
|
|||||||
|
|
||||||
logits = gf->nodes[gf->n_nodes - 1];
|
logits = gf->nodes[gf->n_nodes - 1];
|
||||||
|
|
||||||
if (ggml_backend_is_cpu(wctx.backend_main())) {
|
if (ggml_backend_is_cpu(wctx.backend)) {
|
||||||
ggml_backend_cpu_set_n_threads(wctx.backend_main(), n_threads);
|
ggml_backend_cpu_set_n_threads(wctx.backend, n_threads);
|
||||||
}
|
}
|
||||||
#ifdef GGML_USE_METAL
|
#ifdef GGML_USE_METAL
|
||||||
if (ggml_backend_is_metal(wctx.backend_main())) {
|
if (ggml_backend_is_metal(wctx.backend)) {
|
||||||
ggml_backend_metal_set_n_cb(wctx.backend_main(), n_threads);
|
ggml_backend_metal_set_n_cb(wctx.backend, n_threads);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
ggml_backend_graph_compute(wctx.backend_main(), gf);
|
ggml_backend_graph_compute(wctx.backend, gf);
|
||||||
}
|
}
|
||||||
|
|
||||||
// extract logits for all N tokens
|
// extract logits for all N tokens
|
||||||
@ -2899,7 +2910,7 @@ struct whisper_state * whisper_init_state(whisper_context * ctx) {
|
|||||||
|
|
||||||
whisper_state * state = new whisper_state;
|
whisper_state * state = new whisper_state;
|
||||||
|
|
||||||
if (!kv_cache_init(ctx->model.hparams, state->decoders[0].kv_self, ctx->backend_kv(), ctx->itype, ctx->model.hparams.n_text_ctx)) {
|
if (!kv_cache_init(ctx->model.hparams, state->decoders[0].kv_self, ctx->backend, ctx->itype, ctx->model.hparams.n_text_ctx)) {
|
||||||
WHISPER_LOG_ERROR("%s: kv_cache_init() failed for self-attention cache\n", __func__);
|
WHISPER_LOG_ERROR("%s: kv_cache_init() failed for self-attention cache\n", __func__);
|
||||||
delete state;
|
delete state;
|
||||||
return nullptr;
|
return nullptr;
|
||||||
@ -2910,7 +2921,7 @@ struct whisper_state * whisper_init_state(whisper_context * ctx) {
|
|||||||
WHISPER_LOG_INFO("%s: kv self size = %7.2f MB\n", __func__, memory_size / 1024.0 / 1024.0);
|
WHISPER_LOG_INFO("%s: kv self size = %7.2f MB\n", __func__, memory_size / 1024.0 / 1024.0);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!kv_cache_init(ctx->model.hparams, state->kv_cross, ctx->backend_kv(), ctx->itype, ctx->model.hparams.n_audio_ctx)) {
|
if (!kv_cache_init(ctx->model.hparams, state->kv_cross, ctx->backend, ctx->itype, ctx->model.hparams.n_audio_ctx)) {
|
||||||
WHISPER_LOG_ERROR("%s: kv_cache_init() failed for cross-attention cache\n", __func__);
|
WHISPER_LOG_ERROR("%s: kv_cache_init() failed for cross-attention cache\n", __func__);
|
||||||
delete state;
|
delete state;
|
||||||
return nullptr;
|
return nullptr;
|
||||||
@ -2952,7 +2963,7 @@ struct whisper_state * whisper_init_state(whisper_context * ctx) {
|
|||||||
|
|
||||||
// conv allocator
|
// conv allocator
|
||||||
{
|
{
|
||||||
whisper_allocr_graph_init(state->alloc_conv, ctx->backend_conv(),
|
whisper_allocr_graph_init(state->alloc_conv, ctx->backend,
|
||||||
[&]() {
|
[&]() {
|
||||||
return whisper_build_graph_conv(*ctx, *state, 0);
|
return whisper_build_graph_conv(*ctx, *state, 0);
|
||||||
});
|
});
|
||||||
@ -2962,7 +2973,7 @@ struct whisper_state * whisper_init_state(whisper_context * ctx) {
|
|||||||
|
|
||||||
// encoder allocator
|
// encoder allocator
|
||||||
if (!whisper_encode_external(*state)) {
|
if (!whisper_encode_external(*state)) {
|
||||||
whisper_allocr_graph_init(state->alloc_encode, ctx->backend_main(),
|
whisper_allocr_graph_init(state->alloc_encode, ctx->backend,
|
||||||
[&]() {
|
[&]() {
|
||||||
return whisper_build_graph_encoder(*ctx, *state);
|
return whisper_build_graph_encoder(*ctx, *state);
|
||||||
});
|
});
|
||||||
@ -2972,7 +2983,7 @@ struct whisper_state * whisper_init_state(whisper_context * ctx) {
|
|||||||
|
|
||||||
// cross allocator
|
// cross allocator
|
||||||
{
|
{
|
||||||
whisper_allocr_graph_init(state->alloc_cross, ctx->backend_main(),
|
whisper_allocr_graph_init(state->alloc_cross, ctx->backend,
|
||||||
[&]() {
|
[&]() {
|
||||||
return whisper_build_graph_cross(*ctx, *state);
|
return whisper_build_graph_cross(*ctx, *state);
|
||||||
});
|
});
|
||||||
@ -2982,7 +2993,7 @@ struct whisper_state * whisper_init_state(whisper_context * ctx) {
|
|||||||
|
|
||||||
// decoder allocator
|
// decoder allocator
|
||||||
{
|
{
|
||||||
whisper_allocr_graph_init(state->alloc_decode, ctx->backend_main(),
|
whisper_allocr_graph_init(state->alloc_decode, ctx->backend,
|
||||||
[&]() {
|
[&]() {
|
||||||
const auto & hparams = ctx->model.hparams;
|
const auto & hparams = ctx->model.hparams;
|
||||||
|
|
||||||
@ -3264,11 +3275,7 @@ void whisper_free(struct whisper_context * ctx) {
|
|||||||
|
|
||||||
whisper_free_state(ctx->state);
|
whisper_free_state(ctx->state);
|
||||||
|
|
||||||
ggml_backend_free(ctx->backend_cpu);
|
ggml_backend_free(ctx->backend);
|
||||||
|
|
||||||
if (ctx->backend_gpu) {
|
|
||||||
ggml_backend_free(ctx->backend_gpu);
|
|
||||||
}
|
|
||||||
|
|
||||||
delete ctx;
|
delete ctx;
|
||||||
}
|
}
|
||||||
@ -4566,7 +4573,7 @@ int whisper_full_with_state(
|
|||||||
|
|
||||||
if (decoder.kv_self.ctx == nullptr) {
|
if (decoder.kv_self.ctx == nullptr) {
|
||||||
decoder.kv_self = state->decoders[0].kv_self;
|
decoder.kv_self = state->decoders[0].kv_self;
|
||||||
if (!kv_cache_reinit(decoder.kv_self, ctx->backend_kv())) {
|
if (!kv_cache_reinit(decoder.kv_self, ctx->backend)) {
|
||||||
WHISPER_LOG_ERROR("%s: kv_cache_reinit() failed for self-attention, decoder %d\n", __func__, j);
|
WHISPER_LOG_ERROR("%s: kv_cache_reinit() failed for self-attention, decoder %d\n", __func__, j);
|
||||||
return -4;
|
return -4;
|
||||||
}
|
}
|
||||||
|
Reference in New Issue
Block a user