From d79e8ba701e2dad7dc0aeeec10bbf76fe987313b Mon Sep 17 00:00:00 2001 From: Kit Langton Date: Mon, 11 May 2026 17:30:03 -0400 Subject: [PATCH 1/3] refactor(session): add native LLM request adapter --- packages/opencode/src/session/llm-native.ts | 184 +++++++++++++++ .../opencode/test/session/llm-native.test.ts | 219 ++++++++++++++++++ 2 files changed, 403 insertions(+) create mode 100644 packages/opencode/src/session/llm-native.ts create mode 100644 packages/opencode/test/session/llm-native.test.ts diff --git a/packages/opencode/src/session/llm-native.ts b/packages/opencode/src/session/llm-native.ts new file mode 100644 index 000000000000..6bb2159412c8 --- /dev/null +++ b/packages/opencode/src/session/llm-native.ts @@ -0,0 +1,184 @@ +import type { JsonSchema, LLMRequest, ProviderMetadata, ToolDefinition } from "@opencode-ai/llm" +import { LLM } from "@opencode-ai/llm" +import type { ModelMessage } from "ai" +import type { Provider } from "@/provider/provider" + +type ToolInput = { + readonly description?: string + readonly inputSchema?: unknown +} + +export type RequestInput = { + readonly model: Provider.Model + readonly system?: readonly string[] + readonly messages: readonly ModelMessage[] + readonly tools?: Record + readonly toolChoice?: "auto" | "required" | "none" + readonly temperature?: number + readonly topP?: number + readonly topK?: number + readonly maxOutputTokens?: number + readonly providerOptions?: LLMRequest["providerOptions"] + readonly headers?: Record +} + +const DEFAULT_BASE_URL: Record = { + "@ai-sdk/openai": "https://api.openai.com/v1", + "@ai-sdk/anthropic": "https://api.anthropic.com/v1", + "@ai-sdk/google": "https://generativelanguage.googleapis.com/v1beta", + "@ai-sdk/amazon-bedrock": "https://bedrock-runtime.us-east-1.amazonaws.com", +} + +const ROUTE: Record = { + "@ai-sdk/openai": "openai-responses", + "@ai-sdk/azure": "azure-openai-responses", + "@ai-sdk/anthropic": "anthropic-messages", + "@ai-sdk/google": "gemini", + "@ai-sdk/amazon-bedrock": "bedrock-converse", + "@ai-sdk/openai-compatible": "openai-compatible-chat", + "@openrouter/ai-sdk-provider": "openai-compatible-chat", +} + +const isRecord = (value: unknown): value is Record => + typeof value === "object" && value !== null && !Array.isArray(value) + +const providerMetadata = (value: unknown): ProviderMetadata | undefined => { + if (!isRecord(value)) return undefined + const result = Object.fromEntries( + Object.entries(value).filter((entry): entry is [string, Record] => isRecord(entry[1])), + ) + return Object.keys(result).length === 0 ? undefined : result +} + +const textPart = (part: Record) => ({ + type: "text" as const, + text: typeof part.text === "string" ? part.text : "", + providerMetadata: providerMetadata(part.providerOptions), +}) + +const mediaPart = (part: Record) => { + if (typeof part.data !== "string" && !(part.data instanceof Uint8Array)) + throw new Error("Native LLM request adapter only supports file parts with string or Uint8Array data") + return { + type: "media" as const, + mediaType: typeof part.mediaType === "string" ? part.mediaType : "application/octet-stream", + data: part.data, + filename: typeof part.filename === "string" ? part.filename : undefined, + } +} + +const toolResult = (part: Record) => { + const output = isRecord(part.output) ? part.output : { type: "json", value: part.output } + const type = output.type === "text" ? "text" : output.type === "error-text" ? "error" : "json" + return LLM.toolResult({ + id: typeof part.toolCallId === "string" ? part.toolCallId : "", + name: typeof part.toolName === "string" ? part.toolName : "", + result: "value" in output ? output.value : output, + resultType: type, + providerExecuted: typeof part.providerExecuted === "boolean" ? part.providerExecuted : undefined, + providerMetadata: providerMetadata(part.providerOptions), + }) +} + +const contentPart = (part: unknown) => { + if (!isRecord(part)) throw new Error("Native LLM request adapter only supports object content parts") + if (part.type === "text") return textPart(part) + if (part.type === "file") return mediaPart(part) + if (part.type === "reasoning") + return { + type: "reasoning" as const, + text: typeof part.text === "string" ? part.text : "", + providerMetadata: providerMetadata(part.providerOptions), + } + if (part.type === "tool-call") + return LLM.toolCall({ + id: typeof part.toolCallId === "string" ? part.toolCallId : "", + name: typeof part.toolName === "string" ? part.toolName : "", + input: part.input, + providerExecuted: typeof part.providerExecuted === "boolean" ? part.providerExecuted : undefined, + providerMetadata: providerMetadata(part.providerOptions), + }) + if (part.type === "tool-result") return toolResult(part) + throw new Error(`Native LLM request adapter does not support ${String(part.type)} content parts`) +} + +const content = (value: ModelMessage["content"]) => + typeof value === "string" ? [LLM.text(value)] : value.map(contentPart) + +const messages = (input: readonly ModelMessage[]) => { + const system = input.flatMap((message) => (message.role === "system" ? [LLM.system(message.content)] : [])) + const messages = input.flatMap((message) => { + if (message.role === "system") return [] + return [ + LLM.message({ + role: message.role, + content: content(message.content), + native: isRecord(message.providerOptions) ? { providerOptions: message.providerOptions } : undefined, + }), + ] + }) + return { system, messages } +} + +const schema = (value: unknown): JsonSchema => { + if (!isRecord(value)) return { type: "object", properties: {} } + if (isRecord(value.jsonSchema)) return value.jsonSchema + return value +} + +const tools = (input: Record | undefined): ToolDefinition[] => + Object.entries(input ?? {}).map(([name, item]) => + LLM.toolDefinition({ + name, + description: item.description ?? "", + inputSchema: schema(item.inputSchema), + }), + ) + +const generation = (input: RequestInput) => { + const result = { + temperature: input.temperature, + topP: input.topP, + topK: input.topK, + maxTokens: input.maxOutputTokens, + } + return Object.values(result).some((value) => value !== undefined) ? result : undefined +} + +const baseURL = (model: Provider.Model) => { + if (model.api.url) return model.api.url + const fallback = DEFAULT_BASE_URL[model.api.npm] + if (fallback) return fallback + throw new Error(`Native LLM request adapter requires a base URL for ${model.providerID}/${model.id}`) +} + +export const model = (model: Provider.Model, headers?: Record) => { + const route = ROUTE[model.api.npm] + if (!route) throw new Error(`Native LLM request adapter does not support provider package ${model.api.npm}`) + return LLM.model({ + id: model.api.id, + provider: model.providerID, + route, + baseURL: baseURL(model), + headers: Object.keys({ ...model.headers, ...headers }).length === 0 ? undefined : { ...model.headers, ...headers }, + limits: { + context: model.limit.context, + output: model.limit.output, + }, + }) +} + +export const request = (input: RequestInput) => { + const converted = messages(input.messages) + return LLM.request({ + model: model(input.model, input.headers), + system: [...(input.system ?? []).map(LLM.system), ...converted.system], + messages: converted.messages, + tools: tools(input.tools), + toolChoice: input.toolChoice, + generation: generation(input), + providerOptions: input.providerOptions, + }) +} + +export * as LLMNative from "./llm-native" diff --git a/packages/opencode/test/session/llm-native.test.ts b/packages/opencode/test/session/llm-native.test.ts new file mode 100644 index 000000000000..9a8e003d1f41 --- /dev/null +++ b/packages/opencode/test/session/llm-native.test.ts @@ -0,0 +1,219 @@ +import { describe, expect, test } from "bun:test" +import { jsonSchema, tool, type ModelMessage } from "ai" +import { LLMNative } from "@/session/llm-native" +import type { Provider } from "@/provider/provider" +import { ModelID, ProviderID } from "@/provider/schema" + +const baseModel: Provider.Model = { + id: ModelID.make("gpt-5-mini"), + providerID: ProviderID.make("openai"), + api: { + id: "gpt-5-mini", + url: "https://api.openai.com/v1", + npm: "@ai-sdk/openai", + }, + name: "GPT-5 Mini", + capabilities: { + temperature: true, + reasoning: true, + attachment: true, + toolcall: true, + input: { + text: true, + audio: false, + image: true, + video: false, + pdf: false, + }, + output: { + text: true, + audio: false, + image: false, + video: false, + pdf: false, + }, + interleaved: false, + }, + cost: { + input: 0, + output: 0, + cache: { + read: 0, + write: 0, + }, + }, + limit: { + context: 128_000, + input: 128_000, + output: 32_000, + }, + status: "active", + options: {}, + headers: { + "x-model": "model-header", + }, + release_date: "2026-01-01", +} + +describe("session.llm-native.request", () => { + test("maps normalized stream inputs to a native LLM request", () => { + const messages: ModelMessage[] = [ + { + role: "system", + content: "system from messages", + }, + { + role: "user", + content: [ + { type: "text", text: "hello", providerOptions: { openai: { cacheControl: { type: "ephemeral" } } } }, + { type: "file", mediaType: "image/png", filename: "img.png", data: "data:image/png;base64,Zm9v" }, + ], + }, + { + role: "assistant", + content: [ + { type: "reasoning", text: "thinking", providerOptions: { openai: { encryptedContent: "secret" } } }, + { type: "text", text: "I'll run it" }, + { + type: "tool-call", + toolCallId: "call-1", + toolName: "bash", + input: { command: "ls" }, + providerOptions: { openai: { itemId: "item-1" } }, + }, + ], + }, + { + role: "tool", + content: [ + { + type: "tool-result", + toolCallId: "call-1", + toolName: "bash", + output: { type: "text", value: "ok" }, + providerOptions: { openai: { outputId: "output-1" } }, + }, + ], + }, + ] + + const request = LLMNative.request({ + model: baseModel, + system: ["agent system"], + messages, + tools: { + bash: tool({ + description: "Run a shell command", + inputSchema: jsonSchema({ + type: "object", + properties: { + command: { type: "string" }, + }, + required: ["command"], + }), + }), + }, + toolChoice: "required", + temperature: 0.2, + topP: 0.9, + topK: 40, + maxOutputTokens: 1024, + providerOptions: { openai: { store: false } }, + headers: { "x-request": "request-header" }, + }) + + expect(request.model).toMatchObject({ + id: "gpt-5-mini", + provider: "openai", + route: "openai-responses", + baseURL: "https://api.openai.com/v1", + headers: { + "x-model": "model-header", + "x-request": "request-header", + }, + limits: { + context: 128_000, + output: 32_000, + }, + }) + expect(request.system).toEqual([ + { type: "text", text: "agent system" }, + { type: "text", text: "system from messages" }, + ]) + expect(request.generation).toMatchObject({ + temperature: 0.2, + topP: 0.9, + topK: 40, + maxTokens: 1024, + }) + expect(request.providerOptions).toEqual({ openai: { store: false } }) + expect(request.toolChoice).toMatchObject({ type: "required" }) + expect(request.tools).toMatchObject([ + { + name: "bash", + description: "Run a shell command", + inputSchema: { + type: "object", + properties: { + command: { type: "string" }, + }, + required: ["command"], + }, + }, + ]) + expect(request.messages).toMatchObject([ + { + role: "user", + content: [ + { type: "text", text: "hello", providerMetadata: { openai: { cacheControl: { type: "ephemeral" } } } }, + { type: "media", mediaType: "image/png", filename: "img.png", data: "data:image/png;base64,Zm9v" }, + ], + }, + { + role: "assistant", + content: [ + { type: "reasoning", text: "thinking", providerMetadata: { openai: { encryptedContent: "secret" } } }, + { type: "text", text: "I'll run it" }, + { + type: "tool-call", + id: "call-1", + name: "bash", + input: { command: "ls" }, + providerMetadata: { openai: { itemId: "item-1" } }, + }, + ], + }, + { + role: "tool", + content: [ + { + type: "tool-result", + id: "call-1", + name: "bash", + result: { type: "text", value: "ok" }, + providerMetadata: { openai: { outputId: "output-1" } }, + }, + ], + }, + ]) + }) + + test("selects native routes from existing provider packages", () => { + expect(LLMNative.model({ ...baseModel, api: { ...baseModel.api, npm: "@ai-sdk/anthropic" } }).route).toBe( + "anthropic-messages", + ) + expect(LLMNative.model({ ...baseModel, api: { ...baseModel.api, npm: "@ai-sdk/google" } }).route).toBe("gemini") + expect(LLMNative.model({ ...baseModel, api: { ...baseModel.api, npm: "@ai-sdk/openai-compatible" } }).route).toBe( + "openai-compatible-chat", + ) + }) + + test("fails fast for unsupported provider packages", () => { + expect(() => + LLMNative.request({ + model: { ...baseModel, api: { ...baseModel.api, npm: "unknown-provider" } }, + messages: [], + }), + ).toThrow("Native LLM request adapter does not support provider package unknown-provider") + }) +}) From 3e8fc071eda5c01a2fc0c7322b1c50ebd6c51edc Mon Sep 17 00:00:00 2001 From: Kit Langton Date: Mon, 11 May 2026 17:32:17 -0400 Subject: [PATCH 2/3] fix(session): target native OpenRouter route --- packages/opencode/src/session/llm-native.ts | 3 +- .../opencode/test/session/llm-native.test.ts | 29 ++++++++++++++----- 2 files changed, 24 insertions(+), 8 deletions(-) diff --git a/packages/opencode/src/session/llm-native.ts b/packages/opencode/src/session/llm-native.ts index 6bb2159412c8..1c359dcf9d50 100644 --- a/packages/opencode/src/session/llm-native.ts +++ b/packages/opencode/src/session/llm-native.ts @@ -27,6 +27,7 @@ const DEFAULT_BASE_URL: Record = { "@ai-sdk/anthropic": "https://api.anthropic.com/v1", "@ai-sdk/google": "https://generativelanguage.googleapis.com/v1beta", "@ai-sdk/amazon-bedrock": "https://bedrock-runtime.us-east-1.amazonaws.com", + "@openrouter/ai-sdk-provider": "https://openrouter.ai/api/v1", } const ROUTE: Record = { @@ -36,7 +37,7 @@ const ROUTE: Record = { "@ai-sdk/google": "gemini", "@ai-sdk/amazon-bedrock": "bedrock-converse", "@ai-sdk/openai-compatible": "openai-compatible-chat", - "@openrouter/ai-sdk-provider": "openai-compatible-chat", + "@openrouter/ai-sdk-provider": "openrouter", } const isRecord = (value: unknown): value is Record => diff --git a/packages/opencode/test/session/llm-native.test.ts b/packages/opencode/test/session/llm-native.test.ts index 9a8e003d1f41..81d4a131190d 100644 --- a/packages/opencode/test/session/llm-native.test.ts +++ b/packages/opencode/test/session/llm-native.test.ts @@ -199,13 +199,28 @@ describe("session.llm-native.request", () => { }) test("selects native routes from existing provider packages", () => { - expect(LLMNative.model({ ...baseModel, api: { ...baseModel.api, npm: "@ai-sdk/anthropic" } }).route).toBe( - "anthropic-messages", - ) - expect(LLMNative.model({ ...baseModel, api: { ...baseModel.api, npm: "@ai-sdk/google" } }).route).toBe("gemini") - expect(LLMNative.model({ ...baseModel, api: { ...baseModel.api, npm: "@ai-sdk/openai-compatible" } }).route).toBe( - "openai-compatible-chat", - ) + expect( + LLMNative.model({ ...baseModel, api: { ...baseModel.api, url: "", npm: "@ai-sdk/anthropic" } }), + ).toMatchObject({ + route: "anthropic-messages", + baseURL: "https://api.anthropic.com/v1", + }) + expect(LLMNative.model({ ...baseModel, api: { ...baseModel.api, url: "", npm: "@ai-sdk/google" } })).toMatchObject({ + route: "gemini", + baseURL: "https://generativelanguage.googleapis.com/v1beta", + }) + expect( + LLMNative.model({ ...baseModel, api: { ...baseModel.api, npm: "@ai-sdk/openai-compatible" } }), + ).toMatchObject({ + route: "openai-compatible-chat", + baseURL: "https://api.openai.com/v1", + }) + expect( + LLMNative.model({ ...baseModel, api: { ...baseModel.api, url: "", npm: "@openrouter/ai-sdk-provider" } }), + ).toMatchObject({ + route: "openrouter", + baseURL: "https://openrouter.ai/api/v1", + }) }) test("fails fast for unsupported provider packages", () => { From a8a58fd61ec54e9383e29dcbafcbbe89adc15ef9 Mon Sep 17 00:00:00 2001 From: Kit Langton Date: Mon, 11 May 2026 18:04:02 -0400 Subject: [PATCH 3/3] fix(session): use current native LLM constructors --- packages/opencode/src/session/llm-native.ts | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/packages/opencode/src/session/llm-native.ts b/packages/opencode/src/session/llm-native.ts index 1c359dcf9d50..74be5988ee2f 100644 --- a/packages/opencode/src/session/llm-native.ts +++ b/packages/opencode/src/session/llm-native.ts @@ -1,5 +1,6 @@ -import type { JsonSchema, LLMRequest, ProviderMetadata, ToolDefinition } from "@opencode-ai/llm" -import { LLM } from "@opencode-ai/llm" +import type { JsonSchema, LLMRequest, ProviderMetadata } from "@opencode-ai/llm" +import { LLM, Message, SystemPart, ToolCallPart, ToolDefinition, ToolResultPart } from "@opencode-ai/llm" +import "@opencode-ai/llm/providers" import type { ModelMessage } from "ai" import type { Provider } from "@/provider/provider" @@ -71,7 +72,7 @@ const mediaPart = (part: Record) => { const toolResult = (part: Record) => { const output = isRecord(part.output) ? part.output : { type: "json", value: part.output } const type = output.type === "text" ? "text" : output.type === "error-text" ? "error" : "json" - return LLM.toolResult({ + return ToolResultPart.make({ id: typeof part.toolCallId === "string" ? part.toolCallId : "", name: typeof part.toolName === "string" ? part.toolName : "", result: "value" in output ? output.value : output, @@ -92,7 +93,7 @@ const contentPart = (part: unknown) => { providerMetadata: providerMetadata(part.providerOptions), } if (part.type === "tool-call") - return LLM.toolCall({ + return ToolCallPart.make({ id: typeof part.toolCallId === "string" ? part.toolCallId : "", name: typeof part.toolName === "string" ? part.toolName : "", input: part.input, @@ -104,14 +105,14 @@ const contentPart = (part: unknown) => { } const content = (value: ModelMessage["content"]) => - typeof value === "string" ? [LLM.text(value)] : value.map(contentPart) + typeof value === "string" ? [{ type: "text" as const, text: value }] : value.map(contentPart) const messages = (input: readonly ModelMessage[]) => { - const system = input.flatMap((message) => (message.role === "system" ? [LLM.system(message.content)] : [])) + const system = input.flatMap((message) => (message.role === "system" ? [SystemPart.make(message.content)] : [])) const messages = input.flatMap((message) => { if (message.role === "system") return [] return [ - LLM.message({ + Message.make({ role: message.role, content: content(message.content), native: isRecord(message.providerOptions) ? { providerOptions: message.providerOptions } : undefined, @@ -129,7 +130,7 @@ const schema = (value: unknown): JsonSchema => { const tools = (input: Record | undefined): ToolDefinition[] => Object.entries(input ?? {}).map(([name, item]) => - LLM.toolDefinition({ + ToolDefinition.make({ name, description: item.description ?? "", inputSchema: schema(item.inputSchema), @@ -173,7 +174,7 @@ export const request = (input: RequestInput) => { const converted = messages(input.messages) return LLM.request({ model: model(input.model, input.headers), - system: [...(input.system ?? []).map(LLM.system), ...converted.system], + system: [...(input.system ?? []).map(SystemPart.make), ...converted.system], messages: converted.messages, tools: tools(input.tools), toolChoice: input.toolChoice,