Skip to content

Commit 4caa64b

Browse files
committed
whisper : remove extra backend instance (huh?)
1 parent 2877b02 commit 4caa64b

File tree

1 file changed

+4
-15
lines changed

1 file changed

+4
-15
lines changed

whisper.cpp

Lines changed: 4 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -819,8 +819,6 @@ struct whisper_state {
819819

820820
whisper_decoder decoders[WHISPER_MAX_DECODERS];
821821

822-
ggml_backend_t backend = nullptr;
823-
824822
// ggml-alloc:
825823
// - stores meta info about the intermediate tensors into the `meta` buffers
826824
// - stores the actual tensor data into the `data` buffers
@@ -2240,7 +2238,7 @@ static bool whisper_encode_internal(
22402238
}
22412239

22422240
if (!whisper_encode_external(wstate)) {
2243-
if (!ggml_graph_compute_helper(wstate.backend, gf, n_threads)) {
2241+
if (!ggml_graph_compute_helper(wctx.backend, gf, n_threads)) {
22442242
return false;
22452243
}
22462244
} else {
@@ -2263,7 +2261,7 @@ static bool whisper_encode_internal(
22632261
return false;
22642262
}
22652263

2266-
if (!ggml_graph_compute_helper(wstate.backend, gf, n_threads)) {
2264+
if (!ggml_graph_compute_helper(wctx.backend, gf, n_threads)) {
22672265
return false;
22682266
}
22692267
}
@@ -2279,7 +2277,7 @@ static bool whisper_encode_internal(
22792277
return false;
22802278
}
22812279

2282-
if (!ggml_graph_compute_helper(wstate.backend, gf, n_threads)) {
2280+
if (!ggml_graph_compute_helper(wctx.backend, gf, n_threads)) {
22832281
return false;
22842282
}
22852283
}
@@ -2744,7 +2742,7 @@ static bool whisper_decode_internal(
27442742

27452743
logits = gf->nodes[gf->n_nodes - 1];
27462744

2747-
if (!ggml_graph_compute_helper(wstate.backend, gf, n_threads)) {
2745+
if (!ggml_graph_compute_helper(wctx.backend, gf, n_threads)) {
27482746
return false;
27492747
}
27502748
}
@@ -3191,13 +3189,6 @@ struct whisper_state * whisper_init_state(whisper_context * ctx) {
31913189

31923190
whisper_state * state = new whisper_state;
31933191

3194-
state->backend = whisper_backend_init(ctx->params);
3195-
if (!state->backend) {
3196-
WHISPER_LOG_ERROR("%s: whisper_backend_init() failed\n", __func__);
3197-
whisper_free_state(state);
3198-
return nullptr;
3199-
}
3200-
32013192
// at this point, we don't know yet how many decoders will be used, so we overallocate 3x ctx
32023193
// in theory, there can be a case where this is not enough, but in practice it should always be enough
32033194
const int factor = 3;
@@ -3623,8 +3614,6 @@ void whisper_free_state(struct whisper_state * state) {
36233614
ggml_gallocr_free(state->alloc_cross.alloc);
36243615
ggml_gallocr_free(state->alloc_decode.alloc);
36253616

3626-
ggml_backend_free(state->backend);
3627-
36283617
// [EXPERIMENTAL] Token-level timestamps with DTW
36293618
aheads_masks_free(state->aheads_masks);
36303619

0 commit comments

Comments
 (0)