Skip to content

Commit 4b7e245

Browse files
committed
minor : fix compile warnings
1 parent 5ea4339 commit 4b7e245

File tree

4 files changed

+12
-10
lines changed

4 files changed

+12
-10
lines changed

examples/common.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -749,7 +749,7 @@ bool console_readline(console_state & con_st, std::string & line) {
749749
break;
750750
}
751751

752-
if (input_char == WEOF || input_char == 0x04 /* Ctrl+D*/) {
752+
if (input_char == (char32_t) WEOF || input_char == 0x04 /* Ctrl+D*/) {
753753
end_of_stream = true;
754754
break;
755755
}
@@ -764,7 +764,7 @@ bool console_readline(console_state & con_st, std::string & line) {
764764
char32_t code = getchar32();
765765
if (code == '[' || code == 0x1B) {
766766
// Discard the rest of the escape sequence
767-
while ((code = getchar32()) != WEOF) {
767+
while ((code = getchar32()) != (char32_t) WEOF) {
768768
if ((code >= 'A' && code <= 'Z') || (code >= 'a' && code <= 'z') || code == '~') {
769769
break;
770770
}

examples/common.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -44,15 +44,15 @@ struct gpt_params {
4444
float mirostat_tau = 5.00f; // target entropy
4545
float mirostat_eta = 0.10f; // learning rate
4646

47-
std::string model = "models/7B/ggml-model.bin"; // model path
48-
std::string prompt = "";
47+
std::string model = "models/7B/ggml-model.bin"; // model path
48+
std::string prompt = "";
4949
std::string path_prompt_cache = ""; // path to file for saving/loading prompt eval state
5050
std::string input_prefix = ""; // string to prefix user inputs with
5151
std::string input_suffix = ""; // string to suffix user inputs with
5252
std::vector<std::string> antiprompt; // string upon seeing which more user input is prompted
5353

5454
std::string lora_adapter = ""; // lora adapter path
55-
std::string lora_base = ""; // base model path for the lora adapter
55+
std::string lora_base = ""; // base model path for the lora adapter
5656

5757
bool memory_f16 = true; // use f16 instead of f32 for memory kv
5858
bool random_prompt = false; // do not randomize prompt if none provided

llama.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -941,7 +941,7 @@ static void llama_model_load_internal(
941941
size_t ctx_size;
942942
size_t mmapped_size;
943943
ml->calc_sizes(&ctx_size, &mmapped_size);
944-
fprintf(stderr, "%s: ggml ctx size = %6.2f KB\n", __func__, ctx_size/1024.0);
944+
fprintf(stderr, "%s: ggml ctx size = %6.2f MB\n", __func__, ctx_size/1024.0/1024.0);
945945

946946
// print memory requirements
947947
{

tests/test-sampling.cpp

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,16 @@
1-
#include "llama.h"
21
#include "ggml.h"
3-
#include <cassert>
4-
#include <cmath>
2+
#include "llama.h"
3+
4+
#ifdef NDEBUG
5+
#undef NDEBUG
6+
#endif
7+
58
#include <numeric>
69
#include <cassert>
710
#include <iostream>
811
#include <vector>
912
#include <algorithm>
1013

11-
1214
void dump(const llama_token_data_array * candidates) {
1315
for (size_t i = 0; i < candidates->size; i++) {
1416
printf("%d: %f (%f)\n", candidates->data[i].id, candidates->data[i].p, candidates->data[i].logit);

0 commit comments

Comments
 (0)