Skip to content

Commit d6e30de

Browse files
valenvivaldirekram1-node
authored andcommitted
fix(core): prevent agent loop from stopping after tool calls with OpenAI-compatible providers (anomalyco#14973)
Co-authored-by: Aiden Cline <63023139+rekram1-node@users.noreply.github.com> Co-authored-by: Aiden Cline <aidenpcline@gmail.com>
1 parent 9762330 commit d6e30de

2 files changed

Lines changed: 40 additions & 2 deletions

File tree

packages/opencode/src/session/prompt.ts

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1362,9 +1362,18 @@ NOTE: At any point in time through this workflow you should feel free to ask the
13621362
}
13631363

13641364
if (!lastUser) throw new Error("No user message found in stream. This should never happen.")
1365+
1366+
const lastAssistantMsg = msgs.findLast(
1367+
(msg) => msg.info.role === "assistant" && msg.info.id === lastAssistant?.id,
1368+
)
1369+
// Some providers return "stop" even when the assistant message contains tool calls.
1370+
// Keep the loop running so tool results can be sent back to the model.
1371+
const hasToolCalls = lastAssistantMsg?.parts.some((part) => part.type === "tool") ?? false
1372+
13651373
if (
13661374
lastAssistant?.finish &&
13671375
!["tool-calls"].includes(lastAssistant.finish) &&
1376+
!hasToolCalls &&
13681377
lastUser.id < lastAssistant.id
13691378
) {
13701379
log.info("exiting loop", { sessionID })

packages/opencode/test/session/prompt-effect.test.ts

Lines changed: 31 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,6 @@ import { expect, spyOn } from "bun:test"
33
import { Cause, Effect, Exit, Fiber, Layer } from "effect"
44
import path from "path"
55
import z from "zod"
6-
import type { Agent } from "../../src/agent/agent"
76
import { Agent as AgentSvc } from "../../src/agent/agent"
87
import { Bus } from "../../src/bus"
98
import { Command } from "../../src/command"
@@ -35,7 +34,7 @@ import { Log } from "../../src/util/log"
3534
import * as CrossSpawnSpawner from "../../src/effect/cross-spawn-spawner"
3635
import { provideTmpdirInstance, provideTmpdirServer } from "../fixture/fixture"
3736
import { testEffect } from "../lib/effect"
38-
import { TestLLMServer } from "../lib/llm-server"
37+
import { reply, TestLLMServer } from "../lib/llm-server"
3938

4039
Log.init({ print: false })
4140

@@ -453,6 +452,36 @@ it.live("loop continues when finish is tool-calls", () =>
453452
),
454453
)
455454

455+
it.live("loop continues when finish is stop but assistant has tool parts", () =>
456+
provideTmpdirServer(
457+
Effect.fnUntraced(function* ({ llm }) {
458+
const prompt = yield* SessionPrompt.Service
459+
const sessions = yield* Session.Service
460+
const session = yield* sessions.create({
461+
title: "Pinned",
462+
permission: [{ permission: "*", pattern: "*", action: "allow" }],
463+
})
464+
yield* prompt.prompt({
465+
sessionID: session.id,
466+
agent: "build",
467+
noReply: true,
468+
parts: [{ type: "text", text: "hello" }],
469+
})
470+
yield* llm.push(reply().tool("first", { value: "first" }).stop())
471+
yield* llm.text("second")
472+
473+
const result = yield* prompt.loop({ sessionID: session.id })
474+
expect(yield* llm.calls).toBe(2)
475+
expect(result.info.role).toBe("assistant")
476+
if (result.info.role === "assistant") {
477+
expect(result.parts.some((part) => part.type === "text" && part.text === "second")).toBe(true)
478+
expect(result.info.finish).toBe("stop")
479+
}
480+
}),
481+
{ git: true, config: providerCfg },
482+
),
483+
)
484+
456485
it.live("failed subtask preserves metadata on error tool state", () =>
457486
provideTmpdirServer(
458487
Effect.fnUntraced(function* ({ llm }) {

0 commit comments

Comments
 (0)