Skip to content

Commit 1eb69c0

Browse files
committed
whisper : remove whisper_load_backends function
This commit removes the `whisper_load_backends` function, which was used to load all GGML backends. The motivation for this change push the responsibility of loading backends to user applications to give them more control over which backends to load and when. See the references below for more context. Resolves: #3182 Refs: #3042 (comment) Refs: #3042 (comment)
1 parent 1f5fdbe commit 1eb69c0

File tree

11 files changed

+21
-16
lines changed

11 files changed

+21
-16
lines changed

examples/bench/bench.cpp

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -156,6 +156,8 @@ static int whisper_bench_full(const whisper_params & params) {
156156
}
157157

158158
int main(int argc, char ** argv) {
159+
ggml_backend_load_all();
160+
159161
whisper_params params;
160162

161163
if (whisper_params_parse(argc, argv, params) == false) {

examples/cli/cli.cpp

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -909,6 +909,8 @@ static void output_lrc(struct whisper_context * ctx, std::ofstream & fout, const
909909
static void cb_log_disable(enum ggml_log_level , const char * , void * ) { }
910910

911911
int main(int argc, char ** argv) {
912+
ggml_backend_load_all();
913+
912914
#if defined(_WIN32)
913915
// Set the console output code page to UTF-8, while command line arguments
914916
// are still encoded in the system's code page. In this way, we can print
@@ -988,7 +990,6 @@ int main(int argc, char ** argv) {
988990
}
989991

990992
// whisper init
991-
992993
struct whisper_context_params cparams = whisper_context_default_params();
993994

994995
cparams.use_gpu = params.use_gpu;

examples/command/command.cpp

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -678,6 +678,8 @@ static int process_general_transcription(struct whisper_context * ctx, audio_asy
678678
}
679679

680680
int main(int argc, char ** argv) {
681+
ggml_backend_load_all();
682+
681683
whisper_params params;
682684

683685
if (whisper_params_parse(argc, argv, params) == false) {

examples/lsp/lsp.cpp

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -424,6 +424,8 @@ static void process_loop(struct whisper_context * ctx, audio_async &audio, const
424424
}
425425

426426
int main(int argc, char ** argv) {
427+
ggml_backend_load_all();
428+
427429
whisper_params params;
428430
if (whisper_params_parse(argc, argv, params) == false) {
429431
return 1;

examples/quantize/quantize.cpp

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
#include "ggml.h"
2+
#include "ggml-backend.h"
23

34
#include "common.h"
45
#include "common-ggml.h"
@@ -176,6 +177,8 @@ static bool whisper_model_quantize(const std::string & fname_inp, const std::str
176177
}
177178

178179
int main(int argc, char ** argv) {
180+
ggml_backend_load_all();
181+
179182
if (argc != 4) {
180183
fprintf(stderr, "usage: %s model-f32.bin model-quant.bin type\n", argv[0]);
181184
ggml_print_ftypes(stderr);

examples/server/server.cpp

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -516,6 +516,8 @@ void get_req_parameters(const Request & req, whisper_params & params)
516516
} // namespace
517517

518518
int main(int argc, char ** argv) {
519+
ggml_backend_load_all();
520+
519521
whisper_params params;
520522
server_params sparams;
521523

examples/stream/stream.cpp

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -116,6 +116,8 @@ void whisper_print_usage(int /*argc*/, char ** argv, const whisper_params & para
116116
}
117117

118118
int main(int argc, char ** argv) {
119+
ggml_backend_load_all();
120+
119121
whisper_params params;
120122

121123
if (whisper_params_parse(argc, argv, params) == false) {

examples/talk-llama/talk-llama.cpp

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -291,6 +291,8 @@ The transcript only includes text, it does not include markup like HTML and Mark
291291
{0}{4})";
292292

293293
int main(int argc, char ** argv) {
294+
ggml_backend_load_all();
295+
294296
whisper_params params;
295297

296298
if (whisper_params_parse(argc, argv, params) == false) {

examples/vad-speech-segments/speech.cpp

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -83,6 +83,8 @@ static bool vad_params_parse(int argc, char ** argv, cli_params & params) {
8383
static void cb_log_disable(enum ggml_log_level , const char * , void * ) { }
8484

8585
int main(int argc, char ** argv) {
86+
ggml_backend_load_all();
87+
8688
cli_params cli_params;
8789

8890
if (!vad_params_parse(argc, argv, cli_params)) {

examples/wchess/wchess.cmd/wchess.cmd.cpp

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -168,6 +168,8 @@ bool get_audio(std::vector<float> & pcmf32_cur) {
168168
}
169169

170170
int main(int argc, char ** argv) {
171+
ggml_backend_load_all();
172+
171173
whisper_params params;
172174

173175
if (whisper_params_parse(argc, argv, params) == false) {

src/whisper.cpp

Lines changed: 0 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -206,15 +206,6 @@ static bool ggml_graph_compute_helper(
206206
return t;
207207
}
208208

209-
static void whisper_load_backends() {
210-
#ifdef GGML_BACKEND_DL
211-
static std::once_flag flag;
212-
std::call_once(flag, []() {
213-
ggml_backend_load_all();
214-
});
215-
#endif
216-
}
217-
218209
// TODO: move these functions to ggml-base with support for ggml-backend?
219210

220211
static ggml_tensor * whisper_set_f32(struct ggml_tensor * t, float v) {
@@ -1322,8 +1313,6 @@ static size_t aheads_masks_nbytes(struct whisper_aheads_masks & aheads_masks) {
13221313
static ggml_backend_t whisper_backend_init_gpu(const whisper_context_params & params) {
13231314
ggml_log_set(g_state.log_callback, g_state.log_callback_user_data);
13241315

1325-
whisper_load_backends();
1326-
13271316
ggml_backend_dev_t dev = nullptr;
13281317

13291318
int cnt = 0;
@@ -4335,8 +4324,6 @@ static int whisper_has_openvino(void) {
43354324
const char * whisper_print_system_info(void) {
43364325
static std::string s;
43374326

4338-
whisper_load_backends();
4339-
43404327
s = "";
43414328
s += "WHISPER : ";
43424329
s += "COREML = " + std::to_string(whisper_has_coreml()) + " | ";
@@ -8154,8 +8141,6 @@ WHISPER_API int whisper_bench_ggml_mul_mat(int n_threads) {
81548141
}
81558142

81568143
WHISPER_API const char * whisper_bench_ggml_mul_mat_str(int n_threads) {
8157-
whisper_load_backends();
8158-
81598144
static std::string s;
81608145
s = "";
81618146
char strbuf[256];

0 commit comments

Comments
 (0)