whisper : remove whisper_load_backends function (#3196)

* whisper : remove whisper_load_backends function

This commit removes the `whisper_load_backends` function, which was used
to load all GGML backends.

The motivation for this change push the responsibility of loading
backends to user applications to give them more control over which
backends to load and when. See the references below for more context.

Resolves: https://github.com/ggml-org/whisper.cpp/issues/3182
Refs: https://github.com/ggml-org/whisper.cpp/pull/3042#issuecomment-2801778733
Refs: https://github.com/ggml-org/whisper.cpp/pull/3042#issuecomment-2801928990

* ruby : add check for rwc is NULL

This commit adds a check to ensure that the `rwc` pointer is not NULL
before attempting to mark its members in the garbage collector.

The motivation for this is an attempt to see if this fixed the CI build
as I'm not able to reproduce the issue locally.

Refs: https://github.com/ggml-org/whisper.cpp/actions/runs/15299612277/job/43036694928?pr=3196
This commit is contained in:
Daniel Bevenius 2025-05-29 08:03:17 +02:00 committed by GitHub
parent 1f5fdbecb4
commit 73a8c5fb94
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
12 changed files with 23 additions and 16 deletions

View File

@ -77,6 +77,8 @@ static ID id_vad_params;
static void
rb_whisper_callbcack_container_mark(ruby_whisper_callback_container *rwc)
{
if (rwc == NULL) return;
rb_gc_mark(rwc->user_data);
rb_gc_mark(rwc->callback);
rb_gc_mark(rwc->callbacks);

View File

@ -156,6 +156,8 @@ static int whisper_bench_full(const whisper_params & params) {
}
int main(int argc, char ** argv) {
ggml_backend_load_all();
whisper_params params;
if (whisper_params_parse(argc, argv, params) == false) {

View File

@ -909,6 +909,8 @@ static void output_lrc(struct whisper_context * ctx, std::ofstream & fout, const
static void cb_log_disable(enum ggml_log_level , const char * , void * ) { }
int main(int argc, char ** argv) {
ggml_backend_load_all();
#if defined(_WIN32)
// Set the console output code page to UTF-8, while command line arguments
// are still encoded in the system's code page. In this way, we can print
@ -988,7 +990,6 @@ int main(int argc, char ** argv) {
}
// whisper init
struct whisper_context_params cparams = whisper_context_default_params();
cparams.use_gpu = params.use_gpu;

View File

@ -678,6 +678,8 @@ static int process_general_transcription(struct whisper_context * ctx, audio_asy
}
int main(int argc, char ** argv) {
ggml_backend_load_all();
whisper_params params;
if (whisper_params_parse(argc, argv, params) == false) {

View File

@ -424,6 +424,8 @@ static void process_loop(struct whisper_context * ctx, audio_async &audio, const
}
int main(int argc, char ** argv) {
ggml_backend_load_all();
whisper_params params;
if (whisper_params_parse(argc, argv, params) == false) {
return 1;

View File

@ -1,4 +1,5 @@
#include "ggml.h"
#include "ggml-backend.h"
#include "common.h"
#include "common-ggml.h"
@ -176,6 +177,8 @@ static bool whisper_model_quantize(const std::string & fname_inp, const std::str
}
int main(int argc, char ** argv) {
ggml_backend_load_all();
if (argc != 4) {
fprintf(stderr, "usage: %s model-f32.bin model-quant.bin type\n", argv[0]);
ggml_print_ftypes(stderr);

View File

@ -516,6 +516,8 @@ void get_req_parameters(const Request & req, whisper_params & params)
} // namespace
int main(int argc, char ** argv) {
ggml_backend_load_all();
whisper_params params;
server_params sparams;

View File

@ -116,6 +116,8 @@ void whisper_print_usage(int /*argc*/, char ** argv, const whisper_params & para
}
int main(int argc, char ** argv) {
ggml_backend_load_all();
whisper_params params;
if (whisper_params_parse(argc, argv, params) == false) {

View File

@ -291,6 +291,8 @@ The transcript only includes text, it does not include markup like HTML and Mark
{0}{4})";
int main(int argc, char ** argv) {
ggml_backend_load_all();
whisper_params params;
if (whisper_params_parse(argc, argv, params) == false) {

View File

@ -83,6 +83,8 @@ static bool vad_params_parse(int argc, char ** argv, cli_params & params) {
static void cb_log_disable(enum ggml_log_level , const char * , void * ) { }
int main(int argc, char ** argv) {
ggml_backend_load_all();
cli_params cli_params;
if (!vad_params_parse(argc, argv, cli_params)) {

View File

@ -168,6 +168,8 @@ bool get_audio(std::vector<float> & pcmf32_cur) {
}
int main(int argc, char ** argv) {
ggml_backend_load_all();
whisper_params params;
if (whisper_params_parse(argc, argv, params) == false) {

View File

@ -206,15 +206,6 @@ static bool ggml_graph_compute_helper(
return t;
}
static void whisper_load_backends() {
#ifdef GGML_BACKEND_DL
static std::once_flag flag;
std::call_once(flag, []() {
ggml_backend_load_all();
});
#endif
}
// TODO: move these functions to ggml-base with support for ggml-backend?
static ggml_tensor * whisper_set_f32(struct ggml_tensor * t, float v) {
@ -1322,8 +1313,6 @@ static size_t aheads_masks_nbytes(struct whisper_aheads_masks & aheads_masks) {
static ggml_backend_t whisper_backend_init_gpu(const whisper_context_params & params) {
ggml_log_set(g_state.log_callback, g_state.log_callback_user_data);
whisper_load_backends();
ggml_backend_dev_t dev = nullptr;
int cnt = 0;
@ -4335,8 +4324,6 @@ static int whisper_has_openvino(void) {
const char * whisper_print_system_info(void) {
static std::string s;
whisper_load_backends();
s = "";
s += "WHISPER : ";
s += "COREML = " + std::to_string(whisper_has_coreml()) + " | ";
@ -8154,8 +8141,6 @@ WHISPER_API int whisper_bench_ggml_mul_mat(int n_threads) {
}
WHISPER_API const char * whisper_bench_ggml_mul_mat_str(int n_threads) {
whisper_load_backends();
static std::string s;
s = "";
char strbuf[256];