Skip to content

Commit 64e8056

Browse files
committed
fix(opencode): sanitize history and gate options on target model switch
When a session switches to a different target model, it now sanitizes history and request options against that target model's capabilities. - Strips incompatible reasoning/tool history for targets that do not support those features - Preserves field-based reasoning metadata for interleaved targets - Filters unsupported reasoning/thinking options - Gates tool resolution plus toolChoice on target model toolcall Added regression tests for GPT reasoning history -> non-reasoning OpenAI-compatible Mistral.
1 parent b976f33 commit 64e8056

File tree

4 files changed

+713
-178
lines changed

4 files changed

+713
-178
lines changed

packages/opencode/src/provider/transform.ts

Lines changed: 250 additions & 61 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,193 @@ function mimeToModality(mime: string): Modality | undefined {
2020
export namespace ProviderTransform {
2121
export const OUTPUT_TOKEN_MAX = Flag.OPENCODE_EXPERIMENTAL_OUTPUT_TOKEN_MAX || 32_000
2222

23+
function rewrite(
24+
options: Record<string, unknown> | undefined,
25+
fn: (opts: Record<string, unknown>) => Record<string, unknown>,
26+
) {
27+
if (!options) return undefined
28+
const next = fn(options)
29+
return Object.keys(next).length ? next : undefined
30+
}
31+
32+
function clean(options: Record<string, unknown> | undefined, keep?: "reasoning_content" | "reasoning_details") {
33+
return rewrite(options, (opts) => {
34+
const next = { ...opts }
35+
if (keep !== "reasoning_content") delete next.reasoning_content
36+
if (keep !== "reasoning_details") delete next.reasoning_details
37+
return next
38+
})
39+
}
40+
41+
function scrub(options: Record<string, unknown> | undefined, keep?: "reasoning_content" | "reasoning_details") {
42+
return rewrite(options, (opts) =>
43+
Object.fromEntries(
44+
Object.entries(opts).flatMap(([key, value]) => {
45+
if (!value || typeof value !== "object" || Array.isArray(value)) return [[key, value]]
46+
const cleaned = clean(value as Record<string, unknown> | undefined, keep)
47+
if (!cleaned) return []
48+
return [[key, cleaned]]
49+
}),
50+
),
51+
)
52+
}
53+
54+
function patch(options: Record<string, unknown> | undefined, values: Record<string, unknown>) {
55+
return {
56+
...(options ?? {}),
57+
openaiCompatible: {
58+
...(typeof options?.openaiCompatible === "object" && options.openaiCompatible ? options.openaiCompatible : {}),
59+
...values,
60+
},
61+
}
62+
}
63+
64+
function bare<T extends { providerOptions?: unknown }>(msg: T) {
65+
const rest = { ...msg }
66+
delete rest.providerOptions
67+
return rest
68+
}
69+
70+
function mode(model: Provider.Model) {
71+
if (model.capabilities.reasoning) {
72+
if (typeof model.capabilities.interleaved === "object" && model.capabilities.interleaved.field) {
73+
return "field"
74+
}
75+
return "parts"
76+
}
77+
if (model.providerID === "anthropic" || model.api.npm === "@ai-sdk/anthropic") {
78+
return "parts"
79+
}
80+
return "strip"
81+
}
82+
83+
function sanitize(msgs: ModelMessage[], model: Provider.Model) {
84+
const keep =
85+
model.capabilities.reasoning && typeof model.capabilities.interleaved === "object"
86+
? model.capabilities.interleaved.field
87+
: undefined
88+
89+
return msgs.flatMap<ModelMessage>((msg) => {
90+
if (msg.role === "tool" && !model.capabilities.toolcall) return []
91+
if (!Array.isArray(msg.content)) {
92+
if (!msg.providerOptions) return [msg]
93+
const opts = scrub(msg.providerOptions, keep)
94+
return [
95+
{
96+
...bare(msg),
97+
...(opts ? { providerOptions: opts } : {}),
98+
} as ModelMessage,
99+
]
100+
}
101+
102+
const stripped =
103+
!model.capabilities.toolcall &&
104+
msg.content.some((part) => part.type === "tool-call" || part.type === "tool-result")
105+
const content = model.capabilities.toolcall
106+
? msg.content
107+
: msg.content.filter((part) => part.type !== "tool-call" && part.type !== "tool-result")
108+
const opts = scrub(msg.providerOptions, keep)
109+
110+
if (msg.role === "assistant") {
111+
const kind = mode(model)
112+
const reasoning = content.filter(
113+
(part): part is Extract<(typeof content)[number], { type: "reasoning" }> => part.type === "reasoning",
114+
)
115+
const text = reasoning.map((part) => part.text).join("")
116+
117+
if (kind === "field" && typeof model.capabilities.interleaved === "object") {
118+
const next = content.filter((part) => part.type !== "reasoning")
119+
const meta = text
120+
? patch(opts, {
121+
[model.capabilities.interleaved.field]: text,
122+
})
123+
: opts
124+
if (next.length > 0) {
125+
return [
126+
{
127+
...bare(msg),
128+
content: next,
129+
...(meta ? { providerOptions: meta } : {}),
130+
} as ModelMessage,
131+
]
132+
}
133+
if (!stripped && !reasoning.length) {
134+
return [
135+
{
136+
...bare(msg),
137+
...(meta ? { providerOptions: meta } : {}),
138+
} as ModelMessage,
139+
]
140+
}
141+
return [
142+
{
143+
...bare(msg),
144+
content: [{ type: "text", text: "[Previous content omitted for model compatibility.]" }],
145+
...(meta ? { providerOptions: meta } : {}),
146+
} as ModelMessage,
147+
]
148+
}
149+
150+
if (kind === "strip") {
151+
const next = content.filter((part) => part.type !== "reasoning")
152+
if (next.length > 0) {
153+
return [
154+
{
155+
...bare(msg),
156+
content: next,
157+
...(opts ? { providerOptions: opts } : {}),
158+
} as ModelMessage,
159+
]
160+
}
161+
if (!stripped && reasoning.length === 0) {
162+
return [
163+
{
164+
...bare(msg),
165+
...(opts ? { providerOptions: opts } : {}),
166+
} as ModelMessage,
167+
]
168+
}
169+
return [
170+
{
171+
...bare(msg),
172+
content: [{ type: "text", text: "[Previous content omitted for model compatibility.]" }],
173+
...(opts ? { providerOptions: opts } : {}),
174+
} as ModelMessage,
175+
]
176+
}
177+
}
178+
179+
// "parts" mode (Anthropic): reasoning parts are kept as-is in content;
180+
// the Anthropic SDK handles them natively in the message history.
181+
if (content.length > 0) {
182+
return [
183+
{
184+
...bare(msg),
185+
content,
186+
...(opts ? { providerOptions: opts } : {}),
187+
} as ModelMessage,
188+
]
189+
}
190+
191+
if (!stripped) {
192+
return [
193+
{
194+
...bare(msg),
195+
...(opts ? { providerOptions: opts } : {}),
196+
} as ModelMessage,
197+
]
198+
}
199+
200+
return [
201+
{
202+
...bare(msg),
203+
content: [{ type: "text", text: "[Previous content omitted for model compatibility.]" }],
204+
...(opts ? { providerOptions: opts } : {}),
205+
} as ModelMessage,
206+
]
207+
})
208+
}
209+
23210
// Maps npm package to the key the AI SDK expects for providerOptions
24211
function sdkKey(npm: string): string | undefined {
25212
switch (npm) {
@@ -44,11 +231,7 @@ export namespace ProviderTransform {
44231
return undefined
45232
}
46233

47-
function normalizeMessages(
48-
msgs: ModelMessage[],
49-
model: Provider.Model,
50-
options: Record<string, unknown>,
51-
): ModelMessage[] {
234+
function normalizeMessages(msgs: ModelMessage[], model: Provider.Model): ModelMessage[] {
52235
// Anthropic rejects messages with empty content - filter out empty string messages
53236
// and remove empty text/reasoning parts from array content
54237
if (model.api.npm === "@ai-sdk/anthropic") {
@@ -133,41 +316,6 @@ export namespace ProviderTransform {
133316
return result
134317
}
135318

136-
if (typeof model.capabilities.interleaved === "object" && model.capabilities.interleaved.field) {
137-
const field = model.capabilities.interleaved.field
138-
return msgs.map((msg) => {
139-
if (msg.role === "assistant" && Array.isArray(msg.content)) {
140-
const reasoningParts = msg.content.filter((part: any) => part.type === "reasoning")
141-
const reasoningText = reasoningParts.map((part: any) => part.text).join("")
142-
143-
// Filter out reasoning parts from content
144-
const filteredContent = msg.content.filter((part: any) => part.type !== "reasoning")
145-
146-
// Include reasoning_content | reasoning_details directly on the message for all assistant messages
147-
if (reasoningText) {
148-
return {
149-
...msg,
150-
content: filteredContent,
151-
providerOptions: {
152-
...msg.providerOptions,
153-
openaiCompatible: {
154-
...(msg.providerOptions as any)?.openaiCompatible,
155-
[field]: reasoningText,
156-
},
157-
},
158-
}
159-
}
160-
161-
return {
162-
...msg,
163-
content: filteredContent,
164-
}
165-
}
166-
167-
return msg
168-
})
169-
}
170-
171319
return msgs
172320
}
173321

@@ -249,9 +397,10 @@ export namespace ProviderTransform {
249397
})
250398
}
251399

252-
export function message(msgs: ModelMessage[], model: Provider.Model, options: Record<string, unknown>) {
400+
export function message(msgs: ModelMessage[], model: Provider.Model) {
401+
msgs = sanitize(msgs, model)
253402
msgs = unsupportedParts(msgs, model)
254-
msgs = normalizeMessages(msgs, model, options)
403+
msgs = normalizeMessages(msgs, model)
255404
if (
256405
(model.providerID === "anthropic" ||
257406
model.api.id.includes("anthropic") ||
@@ -757,26 +906,28 @@ export namespace ProviderTransform {
757906
}
758907

759908
if (input.model.api.id.includes("gpt-5") && !input.model.api.id.includes("gpt-5-chat")) {
760-
if (!input.model.api.id.includes("gpt-5-pro")) {
761-
result["reasoningEffort"] = "medium"
762-
result["reasoningSummary"] = "auto"
763-
}
909+
if (input.model.capabilities.reasoning) {
910+
if (!input.model.api.id.includes("gpt-5-pro")) {
911+
result["reasoningEffort"] = "medium"
912+
result["reasoningSummary"] = "auto"
913+
}
764914

765-
// Only set textVerbosity for non-chat gpt-5.x models
766-
// Chat models (e.g. gpt-5.2-chat-latest) only support "medium" verbosity
767-
if (
768-
input.model.api.id.includes("gpt-5.") &&
769-
!input.model.api.id.includes("codex") &&
770-
!input.model.api.id.includes("-chat") &&
771-
input.model.providerID !== "azure"
772-
) {
773-
result["textVerbosity"] = "low"
774-
}
915+
// Only set textVerbosity for non-chat gpt-5.x models
916+
// Chat models (e.g. gpt-5.2-chat-latest) only support "medium" verbosity
917+
if (
918+
input.model.api.id.includes("gpt-5.") &&
919+
!input.model.api.id.includes("codex") &&
920+
!input.model.api.id.includes("-chat") &&
921+
input.model.providerID !== "azure"
922+
) {
923+
result["textVerbosity"] = "low"
924+
}
775925

776-
if (input.model.providerID.startsWith("opencode")) {
777-
result["promptCacheKey"] = input.sessionID
778-
result["include"] = ["reasoning.encrypted_content"]
779-
result["reasoningSummary"] = "auto"
926+
if (input.model.providerID.startsWith("opencode")) {
927+
result["promptCacheKey"] = input.sessionID
928+
result["include"] = ["reasoning.encrypted_content"]
929+
result["reasoningSummary"] = "auto"
930+
}
780931
}
781932
}
782933

@@ -793,7 +944,7 @@ export namespace ProviderTransform {
793944
}
794945
}
795946

796-
return result
947+
return compat(input.model, result)
797948
}
798949

799950
export function smallOptions(model: Provider.Model) {
@@ -803,6 +954,7 @@ export namespace ProviderTransform {
803954
model.api.npm === "@ai-sdk/github-copilot"
804955
) {
805956
if (model.api.id.includes("gpt-5")) {
957+
if (!model.capabilities.reasoning) return { store: false }
806958
if (model.api.id.includes("5.")) {
807959
return { store: false, reasoningEffort: "low" }
808960
}
@@ -811,6 +963,7 @@ export namespace ProviderTransform {
811963
return { store: false }
812964
}
813965
if (model.providerID === "google") {
966+
if (!model.capabilities.reasoning) return {}
814967
// gemini-3 uses thinkingLevel, gemini-2.5 uses thinkingBudget
815968
if (model.api.id.includes("gemini-3")) {
816969
return { thinkingConfig: { thinkingLevel: "minimal" } }
@@ -821,16 +974,52 @@ export namespace ProviderTransform {
821974
if (model.api.id.includes("google")) {
822975
return { reasoning: { enabled: false } }
823976
}
977+
if (!model.capabilities.reasoning) return {}
824978
return { reasoningEffort: "minimal" }
825979
}
826980

827981
if (model.providerID === "venice") {
982+
if (!model.capabilities.reasoning) return {}
828983
return { veniceParameters: { disableThinking: true } }
829984
}
830985

831986
return {}
832987
}
833988

989+
export function compat(model: Provider.Model, options: Record<string, unknown>) {
990+
if (model.capabilities.reasoning) return options
991+
992+
const result = { ...options }
993+
delete result.reasoning
994+
delete result.reasoningConfig
995+
delete result.reasoningEffort
996+
delete result.reasoningSummary
997+
delete result.thinking
998+
delete result.thinkingConfig
999+
delete result.enable_thinking
1000+
delete result.textVerbosity
1001+
1002+
if (Array.isArray(result.include)) {
1003+
const include = result.include.filter((item) => item !== "reasoning.encrypted_content")
1004+
result.include = include
1005+
if (!include.length) delete result.include
1006+
}
1007+
1008+
if (
1009+
result.chat_template_args &&
1010+
typeof result.chat_template_args === "object" &&
1011+
!Array.isArray(result.chat_template_args)
1012+
) {
1013+
const entries = Object.entries(result.chat_template_args as Record<string, unknown>).filter(
1014+
([key]) => key !== "enable_thinking",
1015+
)
1016+
if (entries.length) result.chat_template_args = Object.fromEntries(entries)
1017+
if (!entries.length) delete result.chat_template_args
1018+
}
1019+
1020+
return result
1021+
}
1022+
8341023
// Maps model ID prefix to provider slug used in providerOptions.
8351024
// Example: "amazon/nova-2-lite" → "bedrock"
8361025
const SLUG_OVERRIDES: Record<string, string> = {

0 commit comments

Comments
 (0)