Skip to content

Commit b942fe3

Browse files
Windows test
1 parent 38e3148 commit b942fe3

File tree

1 file changed

+10
-0
lines changed

1 file changed

+10
-0
lines changed

llama.cpp

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1481,6 +1481,7 @@ static std::vector<llama_vocab::id> llama_tokenize(const llama_vocab & vocab, co
14811481

14821482
void llama_sample_softmax(struct llama_context * ctx, llama_token_data_array * candidates) {
14831483
assert(candidates->size > 0);
1484+
printf("llama_sample_softmax\n");
14841485

14851486
const int64_t t_start_sample_us = ggml_time_us();
14861487

@@ -1491,15 +1492,24 @@ void llama_sample_softmax(struct llama_context * ctx, llama_token_data_array * c
14911492
});
14921493
candidates->sorted = true;
14931494
}
1495+
printf("llama_sample_softmax 2\n");
14941496

14951497
float max_l = candidates->data[0].logit;
1498+
printf("max_l = %f\n", max_l);
1499+
fflush(stdout);
14961500
float cum_sum = 0.0f;
14971501
for (size_t i = 0; i < candidates->size; ++i) {
1502+
printf("i = %d, logit = %f\n", i, candidates->data[i].logit);
1503+
fflush(stdout);
14981504
float p = expf(candidates->data[i].logit - max_l);
14991505
candidates->data[i].p = p;
15001506
cum_sum += p;
15011507
}
1508+
printf("cum_sum = %f\n", cum_sum);
1509+
fflush(stdout);
15021510
for (size_t i = 0; i < candidates->size; ++i) {
1511+
printf("i = %d, p = %f\n", i, candidates->data[i].logit);
1512+
fflush(stdout);
15031513
candidates->data[i].p /= cum_sum;
15041514
}
15051515

0 commit comments

Comments
 (0)