diff --git a/packages/opencode/test/session/llm-native.test.ts b/packages/opencode/test/session/llm-native.test.ts index 81d4a131190d..40aa71df4d39 100644 --- a/packages/opencode/test/session/llm-native.test.ts +++ b/packages/opencode/test/session/llm-native.test.ts @@ -1,5 +1,7 @@ import { describe, expect, test } from "bun:test" +import { LLMClient, RequestExecutor } from "@opencode-ai/llm/route" import { jsonSchema, tool, type ModelMessage } from "ai" +import { Effect } from "effect" import { LLMNative } from "@/session/llm-native" import type { Provider } from "@/provider/provider" import { ModelID, ProviderID } from "@/provider/schema" @@ -231,4 +233,30 @@ describe("session.llm-native.request", () => { }), ).toThrow("Native LLM request adapter does not support provider package unknown-provider") }) + + test("compiles through the native OpenAI Responses route", async () => { + const prepared = await Effect.runPromise( + LLMClient.prepare( + LLMNative.request({ + model: baseModel, + messages: [{ role: "user", content: "hello" }], + providerOptions: { openai: { store: false } }, + maxOutputTokens: 512, + headers: { "x-request": "request-header" }, + }), + ).pipe(Effect.provide(LLMClient.layer), Effect.provide(RequestExecutor.defaultLayer)), + ) + + expect(prepared).toMatchObject({ + route: "openai-responses", + protocol: "openai-responses", + body: { + model: "gpt-5-mini", + input: [{ role: "user", content: [{ type: "input_text", text: "hello" }] }], + max_output_tokens: 512, + store: false, + stream: true, + }, + }) + }) })