From fbd018c4c34f32a428e1ab81b441da67367f7acd Mon Sep 17 00:00:00 2001 From: Michal Kulakowski Date: Wed, 29 Apr 2026 14:55:57 +0200 Subject: [PATCH] Support finish reason in legacy pipelines --- src/llm/apis/openai_completions.cpp | 13 ++++++++----- src/llm/apis/openai_responses.cpp | 18 ++++++++++++++++-- src/llm/language_model/legacy/servable.cpp | 6 +++++- .../visual_language_model/legacy/servable.cpp | 6 +++++- src/test/llm/llmnode_test.cpp | 2 +- 5 files changed, 35 insertions(+), 10 deletions(-) diff --git a/src/llm/apis/openai_completions.cpp b/src/llm/apis/openai_completions.cpp index 810fcdc50a..8a9590d11f 100644 --- a/src/llm/apis/openai_completions.cpp +++ b/src/llm/apis/openai_completions.cpp @@ -409,17 +409,18 @@ std::string OpenAIChatCompletionsHandler::serializeUnaryResponse(ov::genai::Enco // choices: array of size N, where N is related to n request parameter jsonResponse.StartArray("choices"); - int index = 0; - for (int i = 0; i < results.tokens.size(); i++) { + for (size_t i = 0; i < results.tokens.size(); ++i) { const std::vector& tokens = results.tokens[i]; SPDLOG_LOGGER_TRACE(llm_calculator_logger, "Generated tokens: {}", tokens); ParsedOutput parsedOutput = parseOutputIfNeeded(tokens); jsonResponse.StartObject(); // finish_reason: "stop" in regular scenario, "tool_calls" if output contains tool calls - auto finishReason = mapFinishReason(ov::genai::GenerationFinishReason::STOP, !parsedOutput.toolCalls.empty()); + const ov::genai::GenerationFinishReason finishReasonRaw = + (!results.finish_reasons.empty()) ? results.finish_reasons[0] : ov::genai::GenerationFinishReason::STOP; + auto finishReason = mapFinishReason(finishReasonRaw, !parsedOutput.toolCalls.empty()); jsonResponse.FinishReason(finishReason.value_or("unknown")); // index: integer; Choice index, only n=1 supported anyway - jsonResponse.Index(index++); + jsonResponse.Index(static_cast(i)); if (endpoint == Endpoint::CHAT_COMPLETIONS) { jsonResponse.MessageObject(parsedOutput); @@ -481,7 +482,9 @@ std::string OpenAIChatCompletionsHandler::serializeUnaryResponse(ov::genai::VLMD ParsedOutput parsedOutput = parseOutputIfNeeded(generatedTokens); jsonResponse.StartObject(); // finish_reason: "stop" in regular scenario, "tool_calls" if output contains tool calls - auto finishReason = mapFinishReason(ov::genai::GenerationFinishReason::STOP, !parsedOutput.toolCalls.empty()); + const ov::genai::GenerationFinishReason finishReasonRaw = + (!results.finish_reasons.empty()) ? results.finish_reasons[0] : ov::genai::GenerationFinishReason::STOP; + auto finishReason = mapFinishReason(finishReasonRaw, !parsedOutput.toolCalls.empty()); jsonResponse.FinishReason(finishReason.value_or("unknown")); // index: integer; Choice index, only n=1 supported anyway jsonResponse.Index(index++); diff --git a/src/llm/apis/openai_responses.cpp b/src/llm/apis/openai_responses.cpp index 49703c0fc2..dcd886aed3 100644 --- a/src/llm/apis/openai_responses.cpp +++ b/src/llm/apis/openai_responses.cpp @@ -649,10 +649,17 @@ std::string OpenAIResponsesHandler::serializeUnaryResponse(ov::genai::EncodedRes usage.promptTokens = results.perf_metrics.get_num_input_tokens(); usage.completionTokens = results.perf_metrics.get_num_generated_tokens(); std::vector parsedOutputs; + ov::genai::GenerationFinishReason responsesFinishReason = ov::genai::GenerationFinishReason::STOP; for (const auto& tokens : results.tokens) { parsedOutputs.push_back(parseOutputIfNeeded(tokens)); } - return serializeUnaryResponseImpl(parsedOutputs); + for (const auto& finishReason : results.finish_reasons) { + if (finishReason == ov::genai::GenerationFinishReason::LENGTH) { + responsesFinishReason = ov::genai::GenerationFinishReason::LENGTH; + break; + } + } + return serializeUnaryResponseImpl(parsedOutputs, responsesFinishReason); } std::string OpenAIResponsesHandler::serializeUnaryResponse(ov::genai::VLMDecodedResults& results, const std::string& textResponse) { @@ -673,7 +680,14 @@ std::string OpenAIResponsesHandler::serializeUnaryResponse(ov::genai::VLMDecoded parsedOutputs.push_back(std::move(output)); } } - return serializeUnaryResponseImpl(parsedOutputs); + ov::genai::GenerationFinishReason responsesFinishReason = ov::genai::GenerationFinishReason::STOP; + for (const auto& finishReason : results.finish_reasons) { + if (finishReason == ov::genai::GenerationFinishReason::LENGTH) { + responsesFinishReason = ov::genai::GenerationFinishReason::LENGTH; + break; + } + } + return serializeUnaryResponseImpl(parsedOutputs, responsesFinishReason); } // --- Streaming event building blocks --- diff --git a/src/llm/language_model/legacy/servable.cpp b/src/llm/language_model/legacy/servable.cpp index 4234088a2a..3ae0055530 100644 --- a/src/llm/language_model/legacy/servable.cpp +++ b/src/llm/language_model/legacy/servable.cpp @@ -229,7 +229,11 @@ absl::Status LegacyServable::preparePartialResponse(std::shared_ptrlastStreamerCallbackOutput.empty()) { lastTextChunk = lastTextChunk + executionContext->lastStreamerCallbackOutput; } - std::string serializedChunk = executionContext->apiHandler->serializeStreamingChunk(lastTextChunk, ov::genai::GenerationFinishReason::STOP); + ov::genai::GenerationFinishReason finishReason = ov::genai::GenerationFinishReason::STOP; + if (!legacyExecutionContext->results.finish_reasons.empty()) { + finishReason = legacyExecutionContext->results.finish_reasons[0]; + } + std::string serializedChunk = executionContext->apiHandler->serializeStreamingChunk(lastTextChunk, finishReason); if (!serializedChunk.empty()) { executionContext->response = wrapTextInServerSideEventMessage(serializedChunk); } diff --git a/src/llm/visual_language_model/legacy/servable.cpp b/src/llm/visual_language_model/legacy/servable.cpp index 6297745360..d56e851572 100644 --- a/src/llm/visual_language_model/legacy/servable.cpp +++ b/src/llm/visual_language_model/legacy/servable.cpp @@ -245,7 +245,11 @@ absl::Status VisualLanguageModelLegacyServable::preparePartialResponse(std::shar if (!executionContext->lastStreamerCallbackOutput.empty()) { lastTextChunk = lastTextChunk + executionContext->lastStreamerCallbackOutput; } - std::string serializedChunk = executionContext->apiHandler->serializeStreamingChunk(lastTextChunk, ov::genai::GenerationFinishReason::STOP); + ov::genai::GenerationFinishReason finishReason = ov::genai::GenerationFinishReason::STOP; + if (!legacyExecutionContext->results.finish_reasons.empty()) { + finishReason = legacyExecutionContext->results.finish_reasons[0]; + } + std::string serializedChunk = executionContext->apiHandler->serializeStreamingChunk(lastTextChunk, finishReason); if (!serializedChunk.empty()) { executionContext->response = wrapTextInServerSideEventMessage(serializedChunk); } diff --git a/src/test/llm/llmnode_test.cpp b/src/test/llm/llmnode_test.cpp index ecd1d598d7..33c16c1a3e 100644 --- a/src/test/llm/llmnode_test.cpp +++ b/src/test/llm/llmnode_test.cpp @@ -2688,7 +2688,7 @@ INSTANTIATE_TEST_SUITE_P( ::testing::Values( // params: model name, generate expected output, check logprobs, check finish reason, test speculative decoding, supports empty handshake msg TestParameters{"lm_cb_regular", true, true, true, false, true}, - TestParameters{"lm_legacy_regular", false, false, false, false, false}, + TestParameters{"lm_legacy_regular", false, false, true, false, false}, TestParameters{"vlm_cb_regular", false, true, true, false, true}, TestParameters{"vlm_legacy_regular", false, false, false, false, false}));