Skip to content

Commit d0e10bf

Browse files
committed
server : more n_past fixes
1 parent 8772d3e commit d0e10bf

File tree

2 files changed

+61
-62
lines changed

2 files changed

+61
-62
lines changed

examples/server/chat.sh

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -48,6 +48,7 @@ chat_completion() {
4848
top_p: 0.9,
4949
n_keep: $n_keep,
5050
n_predict: 256,
51+
cache_prompt: true,
5152
stop: ["\n### Human:"],
5253
stream: true
5354
}')"

examples/server/server.cpp

Lines changed: 60 additions & 62 deletions
Original file line numberDiff line numberDiff line change
@@ -185,7 +185,7 @@ struct llama_client_slot
185185
llama_sampling_context *ctx_sampling = nullptr;
186186

187187
int32_t ga_i = 0; // group-attention state
188-
int32_t ga_n = 1;// group-attention factor
188+
int32_t ga_n = 1; // group-attention factor
189189
int32_t ga_w = 512; // group-attention width
190190

191191
// multimodal
@@ -1293,6 +1293,7 @@ struct llama_server_context
12931293
for (llama_client_slot &slot : slots)
12941294
{
12951295
slot.cache_tokens.clear();
1296+
slot.n_past = 0;
12961297
}
12971298
}
12981299

@@ -1429,7 +1430,6 @@ struct llama_server_context
14291430
// TODO: we always have to take into account the "system_tokens"
14301431
// this is not great and needs to be improved somehow
14311432
llama_batch_add(batch, slot.sampled, system_tokens.size() + slot.n_past, { slot.id }, true);
1432-
14331433
slot.n_past += 1;
14341434
}
14351435

@@ -1540,25 +1540,6 @@ struct llama_server_context
15401540
slot.n_past = common_part(slot.cache_tokens, prompt_tokens);
15411541
slot.num_prompt_tokens_processed = slot.num_prompt_tokens - slot.n_past;
15421542

1543-
if (slot.ga_n != 1)
1544-
{
1545-
int ga_i = 0;
1546-
int32_t ga_n = slot.ga_n;
1547-
int32_t ga_w = slot.ga_w;
1548-
int32_t slot_npast = 0;
1549-
for (int k = 0; k < slot.n_past; ++k)
1550-
{
1551-
while (slot_npast >= ga_i + ga_w) {
1552-
const int bd = (ga_w/ga_n)*(ga_n - 1);
1553-
slot_npast -= bd;
1554-
ga_i += ga_w/ga_n;
1555-
}
1556-
slot_npast++;
1557-
}
1558-
slot.n_past = slot_npast;
1559-
slot.ga_i = ga_i;
1560-
}
1561-
15621543
LOG_TEE("slot %d : in cache: %i tokens | to process: %i tokens\n", slot.id, slot.n_past, slot.num_prompt_tokens_processed);
15631544
}
15641545

@@ -1573,25 +1554,44 @@ struct llama_server_context
15731554
// we have to evaluate at least 1 token to generate logits.
15741555
LOG_TEE("slot %d : we have to evaluate at least 1 token to generate logits\n", slot.id);
15751556
slot.n_past--;
1576-
if (slot.ga_i > 0)
1577-
{
1578-
slot.n_past--;
1579-
}
15801557
}
15811558

15821559
LOG_VERBOSE("prompt ingested", {
1583-
{"n_past", slot.n_past},
1584-
{"cached", tokens_to_str(ctx, slot.cache_tokens.cbegin(), slot.cache_tokens.cbegin() + slot.n_past)},
1560+
{"n_past", slot.n_past},
1561+
{"cached", tokens_to_str(ctx, slot.cache_tokens.cbegin(), slot.cache_tokens.cbegin() + slot.n_past)},
15851562
{"to_eval", tokens_to_str(ctx, slot.cache_tokens.cbegin() + slot.n_past, slot.cache_tokens.cend())},
15861563
});
15871564

1565+
if (slot.ga_n != 1)
1566+
{
1567+
int ga_i = 0;
1568+
int32_t ga_n = slot.ga_n;
1569+
int32_t ga_w = slot.ga_w;
1570+
int32_t slot_npast = 0;
1571+
for (int k = 0; k < slot.n_past; ++k)
1572+
{
1573+
while (slot_npast >= ga_i + ga_w) {
1574+
const int bd = (ga_w/ga_n)*(ga_n - 1);
1575+
slot_npast -= bd;
1576+
ga_i += ga_w/ga_n;
1577+
}
1578+
slot_npast++;
1579+
}
1580+
slot.n_past = slot_npast;
1581+
slot.ga_i = ga_i;
1582+
1583+
LOG_TEE("slot %d : applied self-extend to prompt: %i tokens\n", slot.id, slot.n_past);
1584+
}
1585+
15881586
const bool has_images = process_images(slot);
15891587

15901588
// process the prefix of first image
15911589
std::vector<llama_token> prefix_tokens = has_images ? tokenize(slot.images[0].prefix_prompt, add_bos_token) : prompt_tokens;
1592-
int ga_i = slot.ga_i;
1590+
1591+
int32_t ga_i = slot.ga_i;
15931592
int32_t ga_n = slot.ga_n;
15941593
int32_t ga_w = slot.ga_w;
1594+
15951595
for (; slot.n_past < (int) prefix_tokens.size(); ++slot.n_past)
15961596
{
15971597
if (slot.ga_n != 1)
@@ -1603,7 +1603,6 @@ struct llama_server_context
16031603
}
16041604
}
16051605
llama_batch_add(batch, prefix_tokens[slot.n_past], system_tokens.size() + slot.n_past, {slot.id }, false);
1606-
slot.n_past += 1;
16071606
}
16081607

16091608
if (has_images && !ingest_images(slot, n_batch))
@@ -1660,7 +1659,6 @@ struct llama_server_context
16601659

16611660
LOG_TEE("\nn_past_old = %d, n_past = %d, ga_i = %d\n\n", slot.n_past + bd, slot.n_past, slot.ga_i);
16621661
}
1663-
slot.n_past += n_tokens;
16641662
}
16651663
}
16661664
llama_batch batch_view =
@@ -1779,51 +1777,51 @@ static void server_print_usage(const char *argv0, const gpt_params &params,
17791777
printf(" not recommended: doubles context memory required and no measurable increase in quality\n");
17801778
if (llama_mlock_supported())
17811779
{
1782-
printf(" --mlock force system to keep model in RAM rather than swapping or compressing\n");
1780+
printf(" --mlock force system to keep model in RAM rather than swapping or compressing\n");
17831781
}
17841782
if (llama_mmap_supported())
17851783
{
1786-
printf(" --no-mmap do not memory-map model (slower load but may reduce pageouts if not using mlock)\n");
1784+
printf(" --no-mmap do not memory-map model (slower load but may reduce pageouts if not using mlock)\n");
17871785
}
1788-
printf(" --numa attempt optimizations that help on some NUMA systems\n");
1786+
printf(" --numa attempt optimizations that help on some NUMA systems\n");
17891787
#ifdef LLAMA_SUPPORTS_GPU_OFFLOAD
17901788
printf(" -ngl N, --n-gpu-layers N\n");
1791-
printf(" number of layers to store in VRAM\n");
1789+
printf(" number of layers to store in VRAM\n");
17921790
printf(" -sm SPLIT_MODE, --split-mode SPLIT_MODE\n");
1793-
printf(" how to split the model across multiple GPUs, one of:\n");
1794-
printf(" - none: use one GPU only\n");
1795-
printf(" - layer (default): split layers and KV across GPUs\n");
1796-
printf(" - row: split rows across GPUs\n");
1791+
printf(" how to split the model across multiple GPUs, one of:\n");
1792+
printf(" - none: use one GPU only\n");
1793+
printf(" - layer (default): split layers and KV across GPUs\n");
1794+
printf(" - row: split rows across GPUs\n");
17971795
printf(" -ts SPLIT --tensor-split SPLIT\n");
1798-
printf(" fraction of the model to offload to each GPU, comma-separated list of proportions, e.g. 3,1\n");
1799-
printf(" -mg i, --main-gpu i the GPU to use for the model (with split-mode = none),\n");
1800-
printf(" or for intermediate results and KV (with split-mode = row)\n");
1796+
printf(" fraction of the model to offload to each GPU, comma-separated list of proportions, e.g. 3,1\n");
1797+
printf(" -mg i, --main-gpu i the GPU to use for the model (with split-mode = none),\n");
1798+
printf(" or for intermediate results and KV (with split-mode = row)\n");
18011799
#endif
18021800
printf(" -m FNAME, --model FNAME\n");
1803-
printf(" model path (default: %s)\n", params.model.c_str());
1801+
printf(" model path (default: %s)\n", params.model.c_str());
18041802
printf(" -a ALIAS, --alias ALIAS\n");
1805-
printf(" set an alias for the model, will be added as `model` field in completion response\n");
1806-
printf(" --lora FNAME apply LoRA adapter (implies --no-mmap)\n");
1807-
printf(" --lora-base FNAME optional model to use as a base for the layers modified by the LoRA adapter\n");
1808-
printf(" --host ip address to listen (default (default: %s)\n", sparams.hostname.c_str());
1809-
printf(" --port PORT port to listen (default (default: %d)\n", sparams.port);
1810-
printf(" --path PUBLIC_PATH path from which to serve static files (default %s)\n", sparams.public_path.c_str());
1811-
printf(" --api-key API_KEY optional api key to enhance server security. If set, requests must include this key for access.\n");
1812-
printf(" --api-key-file FNAME path to file containing api keys delimited by new lines. If set, requests must include one of the keys for access.\n");
1813-
printf(" -to N, --timeout N server read/write timeout in seconds (default: %d)\n", sparams.read_timeout);
1814-
printf(" --embedding enable embedding vector output (default: %s)\n", params.embedding ? "enabled" : "disabled");
1815-
printf(" -np N, --parallel N number of slots for process requests (default: %d)\n", params.n_parallel);
1816-
printf(" -cb, --cont-batching enable continuous batching (a.k.a dynamic batching) (default: disabled)\n");
1817-
printf(" -spf FNAME, --system-prompt-file FNAME\n");
1818-
printf(" Set a file to load a system prompt (initial prompt of all slots), this is useful for chat applications.\n");
1819-
printf(" --mmproj MMPROJ_FILE path to a multimodal projector file for LLaVA.\n");
1820-
printf(" --log-disable disables logging to a file.\n");
1803+
printf(" set an alias for the model, will be added as `model` field in completion response\n");
1804+
printf(" --lora FNAME apply LoRA adapter (implies --no-mmap)\n");
1805+
printf(" --lora-base FNAME optional model to use as a base for the layers modified by the LoRA adapter\n");
1806+
printf(" --host ip address to listen (default (default: %s)\n", sparams.hostname.c_str());
1807+
printf(" --port PORT port to listen (default (default: %d)\n", sparams.port);
1808+
printf(" --path PUBLIC_PATH path from which to serve static files (default %s)\n", sparams.public_path.c_str());
1809+
printf(" --api-key API_KEY optional api key to enhance server security. If set, requests must include this key for access.\n");
1810+
printf(" --api-key-file FNAME path to file containing api keys delimited by new lines. If set, requests must include one of the keys for access.\n");
1811+
printf(" -to N, --timeout N server read/write timeout in seconds (default: %d)\n", sparams.read_timeout);
1812+
printf(" --embedding enable embedding vector output (default: %s)\n", params.embedding ? "enabled" : "disabled");
1813+
printf(" -np N, --parallel N number of slots for process requests (default: %d)\n", params.n_parallel);
1814+
printf(" -cb, --cont-batching enable continuous batching (a.k.a dynamic batching) (default: disabled)\n");
1815+
printf(" -spf FNAME, --system-prompt-file FNAME\n");
1816+
printf(" set a file to load a system prompt (initial prompt of all slots), this is useful for chat applications.\n");
1817+
printf(" --mmproj MMPROJ_FILE path to a multimodal projector file for LLaVA.\n");
1818+
printf(" --log-disable disables logging to a file.\n");
18211819
printf("\n");
18221820
printf(" --override-kv KEY=TYPE:VALUE\n");
1823-
printf(" advanced option to override model metadata by key. may be specified multiple times.\n");
1824-
printf(" types: int, float, bool. example: --override-kv tokenizer.ggml.add_bos_token=bool:false\n");
1825-
printf(" -gan N, --grp-attn-n N Set the group attention factor to extend context size through self-extend(default: 1=disabled), used together with group attention width `--grp-attn-w`");
1826-
printf(" -gaw N, --grp-attn-w N Set the group attention width to extend context size through self-extend(default: 512), used together with group attention factor `--grp-attn-n`");
1821+
printf(" advanced option to override model metadata by key. may be specified multiple times.\n");
1822+
printf(" types: int, float, bool. example: --override-kv tokenizer.ggml.add_bos_token=bool:false\n");
1823+
printf(" -gan N, --grp-attn-n N set the group attention factor to extend context size through self-extend(default: 1=disabled), used together with group attention width `--grp-attn-w`");
1824+
printf(" -gaw N, --grp-attn-w N set the group attention width to extend context size through self-extend(default: 512), used together with group attention factor `--grp-attn-n`");
18271825
printf("\n");
18281826
}
18291827

0 commit comments

Comments
 (0)