From 5663e168212ae5044ad18aa285033d8a2e7bf046 Mon Sep 17 00:00:00 2001 From: Eero Tamminen Date: Fri, 6 Dec 2024 05:08:57 +0200 Subject: [PATCH] Exclude yield/reply time from first token latency metric (#973) While metrics are OK for small number of requests, when megaservice is handling many (hundreds of) _parallel_ requests, it was reporting clearly (~10%) larger first token latency, than the client receiving the tokens from the megaservice. Getting the time before token is yielded, means that reported first token latency can be slightly shorter than it actually is. However, testing with ChatQnA shows latencies to be clearly closer to ones seen by the client (within couple of percent) and typically smaller (i.e. logical). PS. Doing the metrics timing after yielding the token, meant that also time for sending the reply to the client and waiting that to complete, was included to the token time. I suspect that with lot of parallel requests, processing often had switched to other megaservice request processing threads, and getting control back to yielding thread for timing, could be delayed much longer than sending the response to client took. Signed-off-by: Eero Tamminen --- comps/cores/mega/orchestrator.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/comps/cores/mega/orchestrator.py b/comps/cores/mega/orchestrator.py index 8a75f9cff..803965f6e 100644 --- a/comps/cores/mega/orchestrator.py +++ b/comps/cores/mega/orchestrator.py @@ -237,8 +237,8 @@ def generate(): ) token_start = time.time() else: - yield chunk token_start = self.metrics.token_update(token_start, is_first) + yield chunk is_first = False self.metrics.request_update(req_start) self.metrics.pending_update(False) @@ -306,7 +306,7 @@ def token_generator(self, sentence: str, token_start: float, is_first: bool, is_ suffix = "\n\n" tokens = re.findall(r"\s?\S+\s?", sentence, re.UNICODE) for token in tokens: - yield prefix + repr(token.replace("\\n", "\n").encode("utf-8")) + suffix token_start = self.metrics.token_update(token_start, is_first) + yield prefix + repr(token.replace("\\n", "\n").encode("utf-8")) + suffix if is_last: yield "data: [DONE]\n\n"