Skip to content

Commit 12d46c4

Browse files
R44VC0RPColin4k1024
authored andcommitted
fix(provider): exclude chat models from textVerbosity setting (anomalyco#11363)
1 parent 58ace74 commit 12d46c4

File tree

2 files changed

+75
-0
lines changed

2 files changed

+75
-0
lines changed

packages/opencode/src/provider/transform.ts

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -594,9 +594,12 @@ export namespace ProviderTransform {
594594
result["reasoningEffort"] = "medium"
595595
}
596596

597+
// Only set textVerbosity for non-chat gpt-5.x models
598+
// Chat models (e.g. gpt-5.2-chat-latest) only support "medium" verbosity
597599
if (
598600
input.model.api.id.includes("gpt-5.") &&
599601
!input.model.api.id.includes("codex") &&
602+
!input.model.api.id.includes("-chat") &&
600603
input.model.providerID !== "azure"
601604
) {
602605
result["textVerbosity"] = "low"

packages/opencode/test/provider/transform.test.ts

Lines changed: 72 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -103,6 +103,78 @@ describe("ProviderTransform.options - setCacheKey", () => {
103103
})
104104
})
105105

106+
describe("ProviderTransform.options - gpt-5 textVerbosity", () => {
107+
const sessionID = "test-session-123"
108+
109+
const createGpt5Model = (apiId: string) =>
110+
({
111+
id: `openai/${apiId}`,
112+
providerID: "openai",
113+
api: {
114+
id: apiId,
115+
url: "https://api.openai.com",
116+
npm: "@ai-sdk/openai",
117+
},
118+
name: apiId,
119+
capabilities: {
120+
temperature: true,
121+
reasoning: true,
122+
attachment: true,
123+
toolcall: true,
124+
input: { text: true, audio: false, image: true, video: false, pdf: false },
125+
output: { text: true, audio: false, image: false, video: false, pdf: false },
126+
interleaved: false,
127+
},
128+
cost: { input: 0.03, output: 0.06, cache: { read: 0.001, write: 0.002 } },
129+
limit: { context: 128000, output: 4096 },
130+
status: "active",
131+
options: {},
132+
headers: {},
133+
}) as any
134+
135+
test("gpt-5.2 should have textVerbosity set to low", () => {
136+
const model = createGpt5Model("gpt-5.2")
137+
const result = ProviderTransform.options({ model, sessionID, providerOptions: {} })
138+
expect(result.textVerbosity).toBe("low")
139+
})
140+
141+
test("gpt-5.1 should have textVerbosity set to low", () => {
142+
const model = createGpt5Model("gpt-5.1")
143+
const result = ProviderTransform.options({ model, sessionID, providerOptions: {} })
144+
expect(result.textVerbosity).toBe("low")
145+
})
146+
147+
test("gpt-5.2-chat-latest should NOT have textVerbosity set (only supports medium)", () => {
148+
const model = createGpt5Model("gpt-5.2-chat-latest")
149+
const result = ProviderTransform.options({ model, sessionID, providerOptions: {} })
150+
expect(result.textVerbosity).toBeUndefined()
151+
})
152+
153+
test("gpt-5.1-chat-latest should NOT have textVerbosity set (only supports medium)", () => {
154+
const model = createGpt5Model("gpt-5.1-chat-latest")
155+
const result = ProviderTransform.options({ model, sessionID, providerOptions: {} })
156+
expect(result.textVerbosity).toBeUndefined()
157+
})
158+
159+
test("gpt-5.2-chat should NOT have textVerbosity set", () => {
160+
const model = createGpt5Model("gpt-5.2-chat")
161+
const result = ProviderTransform.options({ model, sessionID, providerOptions: {} })
162+
expect(result.textVerbosity).toBeUndefined()
163+
})
164+
165+
test("gpt-5-chat should NOT have textVerbosity set", () => {
166+
const model = createGpt5Model("gpt-5-chat")
167+
const result = ProviderTransform.options({ model, sessionID, providerOptions: {} })
168+
expect(result.textVerbosity).toBeUndefined()
169+
})
170+
171+
test("gpt-5.2-codex should NOT have textVerbosity set (codex models excluded)", () => {
172+
const model = createGpt5Model("gpt-5.2-codex")
173+
const result = ProviderTransform.options({ model, sessionID, providerOptions: {} })
174+
expect(result.textVerbosity).toBeUndefined()
175+
})
176+
})
177+
106178
describe("ProviderTransform.maxOutputTokens", () => {
107179
test("returns 32k when modelLimit > 32k", () => {
108180
const modelLimit = 100000

0 commit comments

Comments
 (0)