From 48d5a1f8d067cc7fc59163d140f1b204f66509f6 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Wed, 23 Oct 2024 17:15:57 +0300 Subject: [PATCH] server : don't overfill the batch during infill ggml-ci --- examples/server/server.cpp | 1 + examples/server/utils.hpp | 6 ++++-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/examples/server/server.cpp b/examples/server/server.cpp index ff1d9b03cec5d..077c7ad1adffb 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -1880,6 +1880,7 @@ struct server_context { if (slot.state == SLOT_STATE_STARTED) { slot.t_start_process_prompt = ggml_time_us(); slot.t_start_generation = 0; + slot.n_past = 0; slot.n_prompt_tokens = prompt_tokens.size(); slot.state = SLOT_STATE_PROCESSING_PROMPT; diff --git a/examples/server/utils.hpp b/examples/server/utils.hpp index 8112420624185..562635555e0ab 100644 --- a/examples/server/utils.hpp +++ b/examples/server/utils.hpp @@ -266,8 +266,10 @@ static llama_tokens format_infill( } // for now pick FIM context to fit in a batch (ratio prefix:suffix = 3:1, TODO: configurable?) - const int n_suffix_take = std::min(tokens_suffix.size(), (n_batch/4)); - const int n_prefix_take = std::min(tokens_prefix.size(), 3*(n_batch/4) - 3); + const int n_prefix_take = std::min(tokens_prefix.size(), 3*(n_batch/4)); + const int n_suffix_take = std::min(tokens_suffix.size(), std::max(0, (n_batch/4) - (2 + tokens_prompt.size()))); + + SRV_DBG("n_prefix_take = %d, n_suffix_take = %d, total = %d\n", n_prefix_take, n_suffix_take, (n_prefix_take + n_suffix_take)); // fill the rest of the context with extra chunks const int n_extra_take = std::min(std::max(0, n_ctx - (n_batch) - 2*n_predict), extra_tokens.size());