We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent b8c8dda commit cc06f11Copy full SHA for cc06f11
llama.cpp
@@ -165,8 +165,8 @@ struct llama_layer {
165
};
166
167
struct llama_kv_cache {
168
- struct ggml_tensor * k;
169
- struct ggml_tensor * v;
+ struct ggml_tensor * k = NULL;
+ struct ggml_tensor * v = NULL;
170
171
struct ggml_context * ctx = NULL;
172
@@ -180,8 +180,12 @@ struct llama_kv_cache {
180
}
181
182
#ifdef GGML_USE_CUBLAS
183
+ if (k) {
184
ggml_cuda_free_data(k);
185
+ }
186
+ if (v) {
187
ggml_cuda_free_data(v);
188
189
#endif // GGML_USE_CUBLAS
190
191
0 commit comments