Skip to content

Commit d25510b

Browse files
committed
fix(opencode): sanitize history and gate options on target model switch
When a session switches to a different target model, it now sanitizes history and request options against that target model's capabilities. - Strips incompatible reasoning/tool history for targets that do not support those features - Preserves field-based reasoning metadata for interleaved targets - Filters unsupported reasoning/thinking options - Gates tool resolution plus toolChoice on target model toolcall Added regression tests for GPT reasoning history -> non-reasoning OpenAI-compatible Mistral.
1 parent f20ee2f commit d25510b

File tree

4 files changed

+713
-178
lines changed

4 files changed

+713
-178
lines changed

packages/opencode/src/provider/transform.ts

Lines changed: 250 additions & 61 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,193 @@ function mimeToModality(mime: string): Modality | undefined {
2020
export namespace ProviderTransform {
2121
export const OUTPUT_TOKEN_MAX = Flag.OPENCODE_EXPERIMENTAL_OUTPUT_TOKEN_MAX || 32_000
2222

23+
function rewrite(
24+
options: Record<string, unknown> | undefined,
25+
fn: (opts: Record<string, unknown>) => Record<string, unknown>,
26+
) {
27+
if (!options) return undefined
28+
const next = fn(options)
29+
return Object.keys(next).length ? next : undefined
30+
}
31+
32+
function clean(options: Record<string, unknown> | undefined, keep?: "reasoning_content" | "reasoning_details") {
33+
return rewrite(options, (opts) => {
34+
const next = { ...opts }
35+
if (keep !== "reasoning_content") delete next.reasoning_content
36+
if (keep !== "reasoning_details") delete next.reasoning_details
37+
return next
38+
})
39+
}
40+
41+
function scrub(options: Record<string, unknown> | undefined, keep?: "reasoning_content" | "reasoning_details") {
42+
return rewrite(options, (opts) =>
43+
Object.fromEntries(
44+
Object.entries(opts).flatMap(([key, value]) => {
45+
if (!value || typeof value !== "object" || Array.isArray(value)) return [[key, value]]
46+
const cleaned = clean(value as Record<string, unknown> | undefined, keep)
47+
if (!cleaned) return []
48+
return [[key, cleaned]]
49+
}),
50+
),
51+
)
52+
}
53+
54+
function patch(options: Record<string, unknown> | undefined, values: Record<string, unknown>) {
55+
return {
56+
...(options ?? {}),
57+
openaiCompatible: {
58+
...(typeof options?.openaiCompatible === "object" && options.openaiCompatible ? options.openaiCompatible : {}),
59+
...values,
60+
},
61+
}
62+
}
63+
64+
function bare<T extends { providerOptions?: unknown }>(msg: T) {
65+
const rest = { ...msg }
66+
delete rest.providerOptions
67+
return rest
68+
}
69+
70+
function mode(model: Provider.Model) {
71+
if (model.capabilities.reasoning) {
72+
if (typeof model.capabilities.interleaved === "object" && model.capabilities.interleaved.field) {
73+
return "field"
74+
}
75+
return "parts"
76+
}
77+
if (model.providerID === "anthropic" || model.api.npm === "@ai-sdk/anthropic") {
78+
return "parts"
79+
}
80+
return "strip"
81+
}
82+
83+
function sanitize(msgs: ModelMessage[], model: Provider.Model) {
84+
const keep =
85+
model.capabilities.reasoning && typeof model.capabilities.interleaved === "object"
86+
? model.capabilities.interleaved.field
87+
: undefined
88+
89+
return msgs.flatMap<ModelMessage>((msg) => {
90+
if (msg.role === "tool" && !model.capabilities.toolcall) return []
91+
if (!Array.isArray(msg.content)) {
92+
if (!msg.providerOptions) return [msg]
93+
const opts = scrub(msg.providerOptions, keep)
94+
return [
95+
{
96+
...bare(msg),
97+
...(opts ? { providerOptions: opts } : {}),
98+
} as ModelMessage,
99+
]
100+
}
101+
102+
const stripped =
103+
!model.capabilities.toolcall &&
104+
msg.content.some((part) => part.type === "tool-call" || part.type === "tool-result")
105+
const content = model.capabilities.toolcall
106+
? msg.content
107+
: msg.content.filter((part) => part.type !== "tool-call" && part.type !== "tool-result")
108+
const opts = scrub(msg.providerOptions, keep)
109+
110+
if (msg.role === "assistant") {
111+
const kind = mode(model)
112+
const reasoning = content.filter(
113+
(part): part is Extract<(typeof content)[number], { type: "reasoning" }> => part.type === "reasoning",
114+
)
115+
const text = reasoning.map((part) => part.text).join("")
116+
117+
if (kind === "field" && typeof model.capabilities.interleaved === "object") {
118+
const next = content.filter((part) => part.type !== "reasoning")
119+
const meta = text
120+
? patch(opts, {
121+
[model.capabilities.interleaved.field]: text,
122+
})
123+
: opts
124+
if (next.length > 0) {
125+
return [
126+
{
127+
...bare(msg),
128+
content: next,
129+
...(meta ? { providerOptions: meta } : {}),
130+
} as ModelMessage,
131+
]
132+
}
133+
if (!stripped && !reasoning.length) {
134+
return [
135+
{
136+
...bare(msg),
137+
...(meta ? { providerOptions: meta } : {}),
138+
} as ModelMessage,
139+
]
140+
}
141+
return [
142+
{
143+
...bare(msg),
144+
content: [{ type: "text", text: "[Previous content omitted for model compatibility.]" }],
145+
...(meta ? { providerOptions: meta } : {}),
146+
} as ModelMessage,
147+
]
148+
}
149+
150+
if (kind === "strip") {
151+
const next = content.filter((part) => part.type !== "reasoning")
152+
if (next.length > 0) {
153+
return [
154+
{
155+
...bare(msg),
156+
content: next,
157+
...(opts ? { providerOptions: opts } : {}),
158+
} as ModelMessage,
159+
]
160+
}
161+
if (!stripped && reasoning.length === 0) {
162+
return [
163+
{
164+
...bare(msg),
165+
...(opts ? { providerOptions: opts } : {}),
166+
} as ModelMessage,
167+
]
168+
}
169+
return [
170+
{
171+
...bare(msg),
172+
content: [{ type: "text", text: "[Previous content omitted for model compatibility.]" }],
173+
...(opts ? { providerOptions: opts } : {}),
174+
} as ModelMessage,
175+
]
176+
}
177+
}
178+
179+
// "parts" mode (Anthropic): reasoning parts are kept as-is in content;
180+
// the Anthropic SDK handles them natively in the message history.
181+
if (content.length > 0) {
182+
return [
183+
{
184+
...bare(msg),
185+
content,
186+
...(opts ? { providerOptions: opts } : {}),
187+
} as ModelMessage,
188+
]
189+
}
190+
191+
if (!stripped) {
192+
return [
193+
{
194+
...bare(msg),
195+
...(opts ? { providerOptions: opts } : {}),
196+
} as ModelMessage,
197+
]
198+
}
199+
200+
return [
201+
{
202+
...bare(msg),
203+
content: [{ type: "text", text: "[Previous content omitted for model compatibility.]" }],
204+
...(opts ? { providerOptions: opts } : {}),
205+
} as ModelMessage,
206+
]
207+
})
208+
}
209+
23210
// Maps npm package to the key the AI SDK expects for providerOptions
24211
function sdkKey(npm: string): string | undefined {
25212
switch (npm) {
@@ -44,11 +231,7 @@ export namespace ProviderTransform {
44231
return undefined
45232
}
46233

47-
function normalizeMessages(
48-
msgs: ModelMessage[],
49-
model: Provider.Model,
50-
options: Record<string, unknown>,
51-
): ModelMessage[] {
234+
function normalizeMessages(msgs: ModelMessage[], model: Provider.Model): ModelMessage[] {
52235
// Anthropic rejects messages with empty content - filter out empty string messages
53236
// and remove empty text/reasoning parts from array content
54237
if (model.api.npm === "@ai-sdk/anthropic") {
@@ -133,41 +316,6 @@ export namespace ProviderTransform {
133316
return result
134317
}
135318

136-
if (typeof model.capabilities.interleaved === "object" && model.capabilities.interleaved.field) {
137-
const field = model.capabilities.interleaved.field
138-
return msgs.map((msg) => {
139-
if (msg.role === "assistant" && Array.isArray(msg.content)) {
140-
const reasoningParts = msg.content.filter((part: any) => part.type === "reasoning")
141-
const reasoningText = reasoningParts.map((part: any) => part.text).join("")
142-
143-
// Filter out reasoning parts from content
144-
const filteredContent = msg.content.filter((part: any) => part.type !== "reasoning")
145-
146-
// Include reasoning_content | reasoning_details directly on the message for all assistant messages
147-
if (reasoningText) {
148-
return {
149-
...msg,
150-
content: filteredContent,
151-
providerOptions: {
152-
...msg.providerOptions,
153-
openaiCompatible: {
154-
...(msg.providerOptions as any)?.openaiCompatible,
155-
[field]: reasoningText,
156-
},
157-
},
158-
}
159-
}
160-
161-
return {
162-
...msg,
163-
content: filteredContent,
164-
}
165-
}
166-
167-
return msg
168-
})
169-
}
170-
171319
return msgs
172320
}
173321

@@ -249,9 +397,10 @@ export namespace ProviderTransform {
249397
})
250398
}
251399

252-
export function message(msgs: ModelMessage[], model: Provider.Model, options: Record<string, unknown>) {
400+
export function message(msgs: ModelMessage[], model: Provider.Model) {
401+
msgs = sanitize(msgs, model)
253402
msgs = unsupportedParts(msgs, model)
254-
msgs = normalizeMessages(msgs, model, options)
403+
msgs = normalizeMessages(msgs, model)
255404
if (
256405
(model.providerID === "anthropic" ||
257406
model.api.id.includes("anthropic") ||
@@ -759,26 +908,28 @@ export namespace ProviderTransform {
759908
}
760909

761910
if (input.model.api.id.includes("gpt-5") && !input.model.api.id.includes("gpt-5-chat")) {
762-
if (!input.model.api.id.includes("gpt-5-pro")) {
763-
result["reasoningEffort"] = "medium"
764-
result["reasoningSummary"] = "auto"
765-
}
911+
if (input.model.capabilities.reasoning) {
912+
if (!input.model.api.id.includes("gpt-5-pro")) {
913+
result["reasoningEffort"] = "medium"
914+
result["reasoningSummary"] = "auto"
915+
}
766916

767-
// Only set textVerbosity for non-chat gpt-5.x models
768-
// Chat models (e.g. gpt-5.2-chat-latest) only support "medium" verbosity
769-
if (
770-
input.model.api.id.includes("gpt-5.") &&
771-
!input.model.api.id.includes("codex") &&
772-
!input.model.api.id.includes("-chat") &&
773-
input.model.providerID !== "azure"
774-
) {
775-
result["textVerbosity"] = "low"
776-
}
917+
// Only set textVerbosity for non-chat gpt-5.x models
918+
// Chat models (e.g. gpt-5.2-chat-latest) only support "medium" verbosity
919+
if (
920+
input.model.api.id.includes("gpt-5.") &&
921+
!input.model.api.id.includes("codex") &&
922+
!input.model.api.id.includes("-chat") &&
923+
input.model.providerID !== "azure"
924+
) {
925+
result["textVerbosity"] = "low"
926+
}
777927

778-
if (input.model.providerID.startsWith("opencode")) {
779-
result["promptCacheKey"] = input.sessionID
780-
result["include"] = ["reasoning.encrypted_content"]
781-
result["reasoningSummary"] = "auto"
928+
if (input.model.providerID.startsWith("opencode")) {
929+
result["promptCacheKey"] = input.sessionID
930+
result["include"] = ["reasoning.encrypted_content"]
931+
result["reasoningSummary"] = "auto"
932+
}
782933
}
783934
}
784935

@@ -795,7 +946,7 @@ export namespace ProviderTransform {
795946
}
796947
}
797948

798-
return result
949+
return compat(input.model, result)
799950
}
800951

801952
export function smallOptions(model: Provider.Model) {
@@ -805,6 +956,7 @@ export namespace ProviderTransform {
805956
model.api.npm === "@ai-sdk/github-copilot"
806957
) {
807958
if (model.api.id.includes("gpt-5")) {
959+
if (!model.capabilities.reasoning) return { store: false }
808960
if (model.api.id.includes("5.")) {
809961
return { store: false, reasoningEffort: "low" }
810962
}
@@ -813,6 +965,7 @@ export namespace ProviderTransform {
813965
return { store: false }
814966
}
815967
if (model.providerID === "google") {
968+
if (!model.capabilities.reasoning) return {}
816969
// gemini-3 uses thinkingLevel, gemini-2.5 uses thinkingBudget
817970
if (model.api.id.includes("gemini-3")) {
818971
return { thinkingConfig: { thinkingLevel: "minimal" } }
@@ -823,16 +976,52 @@ export namespace ProviderTransform {
823976
if (model.api.id.includes("google")) {
824977
return { reasoning: { enabled: false } }
825978
}
979+
if (!model.capabilities.reasoning) return {}
826980
return { reasoningEffort: "minimal" }
827981
}
828982

829983
if (model.providerID === "venice") {
984+
if (!model.capabilities.reasoning) return {}
830985
return { veniceParameters: { disableThinking: true } }
831986
}
832987

833988
return {}
834989
}
835990

991+
export function compat(model: Provider.Model, options: Record<string, unknown>) {
992+
if (model.capabilities.reasoning) return options
993+
994+
const result = { ...options }
995+
delete result.reasoning
996+
delete result.reasoningConfig
997+
delete result.reasoningEffort
998+
delete result.reasoningSummary
999+
delete result.thinking
1000+
delete result.thinkingConfig
1001+
delete result.enable_thinking
1002+
delete result.textVerbosity
1003+
1004+
if (Array.isArray(result.include)) {
1005+
const include = result.include.filter((item) => item !== "reasoning.encrypted_content")
1006+
result.include = include
1007+
if (!include.length) delete result.include
1008+
}
1009+
1010+
if (
1011+
result.chat_template_args &&
1012+
typeof result.chat_template_args === "object" &&
1013+
!Array.isArray(result.chat_template_args)
1014+
) {
1015+
const entries = Object.entries(result.chat_template_args as Record<string, unknown>).filter(
1016+
([key]) => key !== "enable_thinking",
1017+
)
1018+
if (entries.length) result.chat_template_args = Object.fromEntries(entries)
1019+
if (!entries.length) delete result.chat_template_args
1020+
}
1021+
1022+
return result
1023+
}
1024+
8361025
// Maps model ID prefix to provider slug used in providerOptions.
8371026
// Example: "amazon/nova-2-lite" → "bedrock"
8381027
const SLUG_OVERRIDES: Record<string, string> = {

0 commit comments

Comments
 (0)