Skip to content

Commit 8e93ce2

Browse files
docs: update OpenAI model examples from gpt-4o to gpt-5.2 (#222)
Update documentation examples to use gpt-5.2 instead of gpt-4o, and gpt-5-mini instead of gpt-4o-mini for summarization examples. Audio/transcription docs were intentionally left unchanged as they reference specialized audio models (gpt-4o-audio-preview, whisper-1, etc.) that have different naming conventions.
1 parent 0e37d8b commit 8e93ce2

30 files changed

Lines changed: 131 additions & 131 deletions

docs/adapters/openai.md

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@ import { chat } from "@tanstack/ai";
1919
import { openaiText } from "@tanstack/ai-openai";
2020

2121
const stream = chat({
22-
adapter: openaiText("gpt-4o"),
22+
adapter: openaiText("gpt-5.2"),
2323
messages: [{ role: "user", content: "Hello!" }],
2424
});
2525
```
@@ -35,7 +35,7 @@ const adapter = createOpenaiChat(process.env.OPENAI_API_KEY!, {
3535
});
3636

3737
const stream = chat({
38-
adapter: adapter("gpt-4o"),
38+
adapter: adapter("gpt-5.2"),
3939
messages: [{ role: "user", content: "Hello!" }],
4040
});
4141
```
@@ -63,7 +63,7 @@ export async function POST(request: Request) {
6363
const { messages } = await request.json();
6464

6565
const stream = chat({
66-
adapter: openaiText("gpt-4o"),
66+
adapter: openaiText("gpt-5.2"),
6767
messages,
6868
});
6969

@@ -92,7 +92,7 @@ const getWeather = getWeatherDef.server(async ({ location }) => {
9292
});
9393

9494
const stream = chat({
95-
adapter: openaiText("gpt-4o"),
95+
adapter: openaiText("gpt-5.2"),
9696
messages,
9797
tools: [getWeather],
9898
});
@@ -104,7 +104,7 @@ OpenAI supports various provider-specific options:
104104

105105
```typescript
106106
const stream = chat({
107-
adapter: openaiText("gpt-4o"),
107+
adapter: openaiText("gpt-5.2"),
108108
messages,
109109
modelOptions: {
110110
temperature: 0.7,
@@ -141,7 +141,7 @@ import { summarize } from "@tanstack/ai";
141141
import { openaiSummarize } from "@tanstack/ai-openai";
142142

143143
const result = await summarize({
144-
adapter: openaiSummarize("gpt-4o-mini"),
144+
adapter: openaiSummarize("gpt-5-mini"),
145145
text: "Your long text to summarize...",
146146
maxLength: 100,
147147
style: "concise", // "concise" | "bullet-points" | "paragraph"

docs/api/ai.md

Lines changed: 13 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ import { chat } from "@tanstack/ai";
2121
import { openaiText } from "@tanstack/ai-openai";
2222

2323
const stream = chat({
24-
adapter: openaiText("gpt-4o"),
24+
adapter: openaiText("gpt-5.2"),
2525
messages: [{ role: "user", content: "Hello!" }],
2626
tools: [myTool],
2727
systemPrompts: ["You are a helpful assistant"],
@@ -31,7 +31,7 @@ const stream = chat({
3131

3232
### Parameters
3333

34-
- `adapter` - An AI adapter instance with model (e.g., `openaiText('gpt-4o')`, `anthropicText('claude-sonnet-4-5')`)
34+
- `adapter` - An AI adapter instance with model (e.g., `openaiText('gpt-5.2')`, `anthropicText('claude-sonnet-4-5')`)
3535
- `messages` - Array of chat messages
3636
- `tools?` - Array of tools for function calling
3737
- `systemPrompts?` - System prompts to prepend to messages
@@ -52,7 +52,7 @@ import { summarize } from "@tanstack/ai";
5252
import { openaiSummarize } from "@tanstack/ai-openai";
5353

5454
const result = await summarize({
55-
adapter: openaiSummarize("gpt-4o"),
55+
adapter: openaiSummarize("gpt-5.2"),
5656
text: "Long text to summarize...",
5757
maxLength: 100,
5858
style: "concise",
@@ -99,7 +99,7 @@ const myClientTool = myToolDef.client(async ({ param }) => {
9999

100100
// Use directly in chat() (server-side, no execute)
101101
chat({
102-
adapter: openaiText("gpt-4o"),
102+
adapter: openaiText("gpt-5.2"),
103103
tools: [myToolDef],
104104
messages: [{ role: "user", content: "..." }],
105105
});
@@ -112,7 +112,7 @@ const myServerTool = myToolDef.server(async ({ param }) => {
112112

113113
// Use directly in chat() (server-side, no execute)
114114
chat({
115-
adapter: openaiText("gpt-4o"),
115+
adapter: openaiText("gpt-5.2"),
116116
tools: [myServerTool],
117117
messages: [{ role: "user", content: "..." }],
118118
});
@@ -140,7 +140,7 @@ import { chat, toServerSentEventsStream } from "@tanstack/ai";
140140
import { openaiText } from "@tanstack/ai-openai";
141141

142142
const stream = chat({
143-
adapter: openaiText("gpt-4o"),
143+
adapter: openaiText("gpt-5.2"),
144144
messages: [...],
145145
});
146146
const readableStream = toServerSentEventsStream(stream);
@@ -167,7 +167,7 @@ import { chat, toServerSentEventsResponse } from "@tanstack/ai";
167167
import { openaiText } from "@tanstack/ai-openai";
168168

169169
const stream = chat({
170-
adapter: openaiText("gpt-4o"),
170+
adapter: openaiText("gpt-5.2"),
171171
messages: [...],
172172
});
173173
return toServerSentEventsResponse(stream);
@@ -191,7 +191,7 @@ import { chat, maxIterations } from "@tanstack/ai";
191191
import { openaiText } from "@tanstack/ai-openai";
192192

193193
const stream = chat({
194-
adapter: openaiText("gpt-4o"),
194+
adapter: openaiText("gpt-5.2"),
195195
messages: [...],
196196
agentLoopStrategy: maxIterations(20),
197197
});
@@ -274,21 +274,21 @@ import {
274274

275275
// --- Streaming chat
276276
const stream = chat({
277-
adapter: openaiText("gpt-4o"),
277+
adapter: openaiText("gpt-5.2"),
278278
messages: [{ role: "user", content: "Hello!" }],
279279
});
280280

281281
// --- One-shot chat response (stream: false)
282282
const response = await chat({
283-
adapter: openaiText("gpt-4o"),
283+
adapter: openaiText("gpt-5.2"),
284284
messages: [{ role: "user", content: "What's the capital of France?" }],
285285
stream: false, // Returns a Promise<string> instead of AsyncIterable
286286
});
287287

288288
// --- Structured response with outputSchema
289289
import { z } from "zod";
290290
const parsed = await chat({
291-
adapter: openaiText("gpt-4o"),
291+
adapter: openaiText("gpt-5.2"),
292292
messages: [{ role: "user", content: "Summarize this text in JSON with keys 'summary' and 'keywords': ... " }],
293293
outputSchema: z.object({
294294
summary: z.string(),
@@ -310,7 +310,7 @@ const weatherTool = toolDefinition({
310310
});
311311

312312
const toolResult = await chat({
313-
adapter: openaiText("gpt-4o"),
313+
adapter: openaiText("gpt-5.2"),
314314
messages: [
315315
{ role: "user", content: "What's the weather in Paris?" }
316316
],
@@ -326,7 +326,7 @@ const toolResult = await chat({
326326

327327
// --- Summarization
328328
const summary = await summarize({
329-
adapter: openaiSummarize("gpt-4o"),
329+
adapter: openaiSummarize("gpt-5.2"),
330330
text: "Long text to summarize...",
331331
maxLength: 100,
332332
});

docs/getting-started/overview.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@ const getProducts = getProductsDef.server(async ({ query }) => {
4646

4747
// Use in AI chat
4848
chat({
49-
adapter: openaiText('gpt-4o'),
49+
adapter: openaiText('gpt-5.2'),
5050
messages: [{ role: 'user', content: 'Find products' }],
5151
tools: [getProducts]
5252
})

docs/getting-started/quick-start.md

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@ export const Route = createFileRoute("/api/chat")({
5151
const stream = chat({
5252
adapter: openai(),
5353
messages,
54-
model: "gpt-4o",
54+
model: "gpt-5.2",
5555
conversationId,
5656
});
5757

@@ -100,7 +100,7 @@ export async function POST(request: Request) {
100100
try {
101101
// Create a streaming chat response
102102
const stream = chat({
103-
adapter: openaiText("gpt-4o"),
103+
adapter: openaiText("gpt-5.2"),
104104
messages,
105105
conversationId
106106
});
@@ -248,7 +248,7 @@ const getProducts = getProductsDef.server(async ({ query }) => {
248248
})
249249

250250
chat({
251-
adapter: openaiText('gpt-4o'),
251+
adapter: openaiText('gpt-5.2'),
252252
messages: [{ role: 'user', content: 'Find products' }],
253253
tools: [getProducts]
254254
})

docs/guides/agentic-cycle.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -123,7 +123,7 @@ export async function POST(request: Request) {
123123
const { messages } = await request.json();
124124

125125
const stream = chat({
126-
adapter: openaiText("gpt-4o"),
126+
adapter: openaiText("gpt-5.2"),
127127
messages,
128128
tools: [getWeather, getClothingAdvice],
129129
});

docs/guides/client-tools.md

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -102,7 +102,7 @@ export async function POST(request: Request) {
102102
const { messages } = await request.json();
103103

104104
const stream = chat({
105-
adapter: openaiText("gpt-4o"),
105+
adapter: openaiText("gpt-5.2"),
106106
messages,
107107
tools: [updateUIDef, saveToLocalStorageDef], // Pass definitions
108108
});
@@ -297,10 +297,10 @@ const addToCartClient = addToCartDef.client((input) => {
297297
});
298298

299299
// Server: Pass definition for client execution
300-
chat({ adapter: openaiText('gpt-4o'), messages: [], tools: [addToCartDef] }); // Client will execute
300+
chat({ adapter: openaiText('gpt-5.2'), messages: [], tools: [addToCartDef] }); // Client will execute
301301

302302
// Or pass server implementation for server execution
303-
chat({ adapter: openaiText('gpt-4o'), messages: [], tools: [addToCartServer] }); // Server will execute
303+
chat({ adapter: openaiText('gpt-5.2'), messages: [], tools: [addToCartServer] }); // Server will execute
304304
```
305305

306306
## Best Practices

docs/guides/migration.md

Lines changed: 16 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ import { openai } from '@tanstack/ai-openai'
3030

3131
const stream = chat({
3232
adapter: openai(),
33-
model: 'gpt-4o',
33+
model: 'gpt-5.2',
3434
messages: [{ role: 'user', content: 'Hello!' }],
3535
})
3636
```
@@ -42,14 +42,14 @@ import { chat } from '@tanstack/ai'
4242
import { openaiText } from '@tanstack/ai-openai'
4343

4444
const stream = chat({
45-
adapter: openaiText('gpt-4o'),
45+
adapter: openaiText('gpt-5.2'),
4646
messages: [{ role: 'user', content: 'Hello!' }],
4747
})
4848
```
4949

5050
### Key Changes
5151

52-
- **Model is passed to adapter factory** - The model name is now passed directly to the adapter function (e.g., `openaiText('gpt-4o')`)
52+
- **Model is passed to adapter factory** - The model name is now passed directly to the adapter function (e.g., `openaiText('gpt-5.2')`)
5353
- **No separate `model` parameter** - The model is stored on the adapter, so you don't need to pass it separately to `chat()`
5454
- **Activity-specific imports** - Import only what you need (e.g., `openaiText`, `openaiSummarize`, `openaiImage`)
5555

@@ -123,7 +123,7 @@ function getAdapter(provider: Provider) {
123123

124124
const stream = chat({
125125
adapter: getAdapter(provider),
126-
model: provider === 'openai' ? 'gpt-4o' : 'claude-sonnet-4-5',
126+
model: provider === 'openai' ? 'gpt-5.2' : 'claude-sonnet-4-5',
127127
messages,
128128
})
129129
```
@@ -138,7 +138,7 @@ import { anthropicText } from '@tanstack/ai-anthropic'
138138
type Provider = 'openai' | 'anthropic'
139139

140140
const adapters = {
141-
openai: () => openaiText('gpt-4o'),
141+
openai: () => openaiText('gpt-5.2'),
142142
anthropic: () => anthropicText('claude-sonnet-4-5'),
143143
}
144144

@@ -157,7 +157,7 @@ Common options that were previously nested in an `options` object are now flatte
157157
```typescript
158158
const stream = chat({
159159
adapter: openai(),
160-
model: 'gpt-4o',
160+
model: 'gpt-5.2',
161161
messages,
162162
options: {
163163
temperature: 0.7,
@@ -171,7 +171,7 @@ const stream = chat({
171171

172172
```typescript
173173
const stream = chat({
174-
adapter: openaiText('gpt-4o'),
174+
adapter: openaiText('gpt-5.2'),
175175
messages,
176176
temperature: 0.7,
177177
maxTokens: 1000,
@@ -197,7 +197,7 @@ The `providerOptions` parameter has been renamed to `modelOptions` for clarity.
197197
```typescript
198198
const stream = chat({
199199
adapter: openai(),
200-
model: 'gpt-4o',
200+
model: 'gpt-5.2',
201201
messages,
202202
providerOptions: {
203203
// OpenAI-specific options
@@ -211,7 +211,7 @@ const stream = chat({
211211

212212
```typescript
213213
const stream = chat({
214-
adapter: openaiText('gpt-4o'),
214+
adapter: openaiText('gpt-5.2'),
215215
messages,
216216
modelOptions: {
217217
// OpenAI-specific options
@@ -228,14 +228,14 @@ const stream = chat({
228228
```typescript
229229
import { openaiText } from '@tanstack/ai-openai'
230230

231-
const adapter = openaiText('gpt-4o')
231+
const adapter = openaiText('gpt-5.2')
232232

233-
// TypeScript knows the exact modelOptions type for gpt-4o
233+
// TypeScript knows the exact modelOptions type for gpt-5.2
234234
const stream = chat({
235235
adapter,
236236
messages,
237237
modelOptions: {
238-
// Autocomplete and type checking for gpt-4o options
238+
// Autocomplete and type checking for gpt-5.2 options
239239
responseFormat: { type: 'json_object' },
240240
},
241241
})
@@ -257,7 +257,7 @@ export async function POST(request: Request) {
257257

258258
const stream = chat({
259259
adapter: openai(),
260-
model: 'gpt-4o',
260+
model: 'gpt-5.2',
261261
messages,
262262
abortController,
263263
})
@@ -277,7 +277,7 @@ export async function POST(request: Request) {
277277
const abortController = new AbortController()
278278

279279
const stream = chat({
280-
adapter: openaiText('gpt-4o'),
280+
adapter: openaiText('gpt-5.2'),
281281
messages,
282282
abortController,
283283
})
@@ -371,7 +371,7 @@ export async function POST(request: Request) {
371371

372372
const stream = chat({
373373
adapter: openai(),
374-
model: 'gpt-4o',
374+
model: 'gpt-5.2',
375375
messages,
376376
options: {
377377
temperature: 0.7,
@@ -398,7 +398,7 @@ export async function POST(request: Request) {
398398
const abortController = new AbortController()
399399

400400
const stream = chat({
401-
adapter: openaiText('gpt-4o'),
401+
adapter: openaiText('gpt-5.2'),
402402
messages,
403403
temperature: 0.7,
404404
maxTokens: 1000,

0 commit comments

Comments
 (0)