Skip to content

Commit d47379b

Browse files
peter277NeoZhangJianyu
authored andcommitted
examples, ggml : fix GCC compiler warnings (ggml-org#10983)
Warning types fixed (observed under MSYS2 GCC 14.2.0): * format '%ld' expects argument of type 'long int', but argument has type 'size_t' * llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp:81:46: warning: missing initializer for member '_STARTUPINFOA::lpDesktop' [-Wmissing-field-initializers] (emitted for all struct field except first)
1 parent ae502e2 commit d47379b

File tree

4 files changed

+7
-6
lines changed

4 files changed

+7
-6
lines changed

examples/cvector-generator/mean.hpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ static void run(
1515
for (size_t il = 0; il < v_input.size(); ++il) {
1616
// prepare output vector
1717
struct ggml_tensor * ctrl_out = v_output[il];
18-
ggml_format_name(ctrl_out, "direction.%ld", il+1);
18+
ggml_format_name(ctrl_out, "direction.%zu", il+1);
1919

2020
// calculate mean vector
2121
struct ggml_tensor * t_layer = v_input[il];

examples/cvector-generator/pca.hpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -302,7 +302,7 @@ static void run_pca(
302302

303303
// prepare output vector
304304
struct ggml_tensor * ctrl_out = v_output[il];
305-
ggml_format_name(ctrl_out, "direction.%ld", il+1);
305+
ggml_format_name(ctrl_out, "direction.%zu", il+1);
306306

307307
// run power_iteration
308308
params.i_layer = il;

examples/export-lora/export-lora.cpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -265,8 +265,8 @@ struct lora_merge_ctx {
265265
fout.write((const char *)data.data(), data.size());
266266
}
267267

268-
printf("%s : merged %ld tensors with lora adapters\n", __func__, n_merged);
269-
printf("%s : wrote %ld tensors to output file\n", __func__, trans.size());
268+
printf("%s : merged %zu tensors with lora adapters\n", __func__, n_merged);
269+
printf("%s : wrote %zu tensors to output file\n", __func__, trans.size());
270270
}
271271

272272
void copy_tensor(struct ggml_tensor * base) {
@@ -352,7 +352,7 @@ struct lora_merge_ctx {
352352
const float scale = alpha ? adapters[i]->scale * alpha / rank : adapters[i]->scale;
353353
delta = ggml_scale(ctx0, delta, scale);
354354
cur = ggml_add(ctx0, delta, cur);
355-
printf("%s : + merging from adapter[%ld] type=%s\n", __func__, i, ggml_type_name(inp_a[i]->type));
355+
printf("%s : + merging from adapter[%zu] type=%s\n", __func__, i, ggml_type_name(inp_a[i]->type));
356356
printf("%s : input_scale=%f calculated_scale=%f rank=%d\n", __func__, adapters[i]->scale, scale, (int) inp_b[i]->ne[0]);
357357
}
358358
cur = ggml_cast(ctx0, cur, out->type);

ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -78,7 +78,8 @@ void execute_command(const std::string& command, std::string& stdout_str, std::s
7878
}
7979

8080
PROCESS_INFORMATION pi;
81-
STARTUPINFOA si = { sizeof(STARTUPINFOA) };
81+
STARTUPINFOA si = {};
82+
si.cb = sizeof(STARTUPINFOA);
8283
si.dwFlags = STARTF_USESTDHANDLES;
8384
si.hStdOutput = stdout_write;
8485
si.hStdError = stderr_write;

0 commit comments

Comments
 (0)