Skip to content

whisper : remove whisper_load_backends function #3196

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
May 29, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions bindings/ruby/ext/ruby_whisper_params.c
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,8 @@ static ID id_vad_params;
static void
rb_whisper_callbcack_container_mark(ruby_whisper_callback_container *rwc)
{
if (rwc == NULL) return;

rb_gc_mark(rwc->user_data);
rb_gc_mark(rwc->callback);
rb_gc_mark(rwc->callbacks);
Expand Down
2 changes: 2 additions & 0 deletions examples/bench/bench.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -156,6 +156,8 @@ static int whisper_bench_full(const whisper_params & params) {
}

int main(int argc, char ** argv) {
ggml_backend_load_all();

whisper_params params;

if (whisper_params_parse(argc, argv, params) == false) {
Expand Down
3 changes: 2 additions & 1 deletion examples/cli/cli.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -909,6 +909,8 @@ static void output_lrc(struct whisper_context * ctx, std::ofstream & fout, const
static void cb_log_disable(enum ggml_log_level , const char * , void * ) { }

int main(int argc, char ** argv) {
ggml_backend_load_all();

#if defined(_WIN32)
// Set the console output code page to UTF-8, while command line arguments
// are still encoded in the system's code page. In this way, we can print
Expand Down Expand Up @@ -988,7 +990,6 @@ int main(int argc, char ** argv) {
}

// whisper init

struct whisper_context_params cparams = whisper_context_default_params();

cparams.use_gpu = params.use_gpu;
Expand Down
2 changes: 2 additions & 0 deletions examples/command/command.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -678,6 +678,8 @@ static int process_general_transcription(struct whisper_context * ctx, audio_asy
}

int main(int argc, char ** argv) {
ggml_backend_load_all();

whisper_params params;

if (whisper_params_parse(argc, argv, params) == false) {
Expand Down
2 changes: 2 additions & 0 deletions examples/lsp/lsp.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -424,6 +424,8 @@ static void process_loop(struct whisper_context * ctx, audio_async &audio, const
}

int main(int argc, char ** argv) {
ggml_backend_load_all();

whisper_params params;
if (whisper_params_parse(argc, argv, params) == false) {
return 1;
Expand Down
3 changes: 3 additions & 0 deletions examples/quantize/quantize.cpp
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
#include "ggml.h"
#include "ggml-backend.h"

#include "common.h"
#include "common-ggml.h"
Expand Down Expand Up @@ -176,6 +177,8 @@ static bool whisper_model_quantize(const std::string & fname_inp, const std::str
}

int main(int argc, char ** argv) {
ggml_backend_load_all();

if (argc != 4) {
fprintf(stderr, "usage: %s model-f32.bin model-quant.bin type\n", argv[0]);
ggml_print_ftypes(stderr);
Expand Down
2 changes: 2 additions & 0 deletions examples/server/server.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -516,6 +516,8 @@ void get_req_parameters(const Request & req, whisper_params & params)
} // namespace

int main(int argc, char ** argv) {
ggml_backend_load_all();

whisper_params params;
server_params sparams;

Expand Down
2 changes: 2 additions & 0 deletions examples/stream/stream.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -116,6 +116,8 @@ void whisper_print_usage(int /*argc*/, char ** argv, const whisper_params & para
}

int main(int argc, char ** argv) {
ggml_backend_load_all();

whisper_params params;

if (whisper_params_parse(argc, argv, params) == false) {
Expand Down
2 changes: 2 additions & 0 deletions examples/talk-llama/talk-llama.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -291,6 +291,8 @@ The transcript only includes text, it does not include markup like HTML and Mark
{0}{4})";

int main(int argc, char ** argv) {
ggml_backend_load_all();

whisper_params params;

if (whisper_params_parse(argc, argv, params) == false) {
Expand Down
2 changes: 2 additions & 0 deletions examples/vad-speech-segments/speech.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,8 @@ static bool vad_params_parse(int argc, char ** argv, cli_params & params) {
static void cb_log_disable(enum ggml_log_level , const char * , void * ) { }

int main(int argc, char ** argv) {
ggml_backend_load_all();

cli_params cli_params;

if (!vad_params_parse(argc, argv, cli_params)) {
Expand Down
2 changes: 2 additions & 0 deletions examples/wchess/wchess.cmd/wchess.cmd.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -168,6 +168,8 @@ bool get_audio(std::vector<float> & pcmf32_cur) {
}

int main(int argc, char ** argv) {
ggml_backend_load_all();

whisper_params params;

if (whisper_params_parse(argc, argv, params) == false) {
Expand Down
15 changes: 0 additions & 15 deletions src/whisper.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -206,15 +206,6 @@ static bool ggml_graph_compute_helper(
return t;
}

static void whisper_load_backends() {
#ifdef GGML_BACKEND_DL
static std::once_flag flag;
std::call_once(flag, []() {
ggml_backend_load_all();
});
#endif
}

// TODO: move these functions to ggml-base with support for ggml-backend?

static ggml_tensor * whisper_set_f32(struct ggml_tensor * t, float v) {
Expand Down Expand Up @@ -1322,8 +1313,6 @@ static size_t aheads_masks_nbytes(struct whisper_aheads_masks & aheads_masks) {
static ggml_backend_t whisper_backend_init_gpu(const whisper_context_params & params) {
ggml_log_set(g_state.log_callback, g_state.log_callback_user_data);

whisper_load_backends();

ggml_backend_dev_t dev = nullptr;

int cnt = 0;
Expand Down Expand Up @@ -4335,8 +4324,6 @@ static int whisper_has_openvino(void) {
const char * whisper_print_system_info(void) {
static std::string s;

whisper_load_backends();

s = "";
s += "WHISPER : ";
s += "COREML = " + std::to_string(whisper_has_coreml()) + " | ";
Expand Down Expand Up @@ -8154,8 +8141,6 @@ WHISPER_API int whisper_bench_ggml_mul_mat(int n_threads) {
}

WHISPER_API const char * whisper_bench_ggml_mul_mat_str(int n_threads) {
whisper_load_backends();

static std::string s;
s = "";
char strbuf[256];
Expand Down