Skip to content

Commit 57843a2

Browse files
committed
fix: align async hooks and tool parsing
Ensure async call paths run before/after hooks with the original messages and skip tool execution when argument parsing fails, matching streaming behavior.
1 parent c61aa69 commit 57843a2

1 file changed

Lines changed: 21 additions & 6 deletions

File tree

  • lib/crewai/src/crewai/llms/providers/openai_responses

lib/crewai/src/crewai/llms/providers/openai_responses/completion.py

Lines changed: 21 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -273,6 +273,7 @@ def call(
273273
if self.stream:
274274
return self._handle_streaming_response(
275275
params=params,
276+
formatted_messages=formatted_messages,
276277
available_functions=available_functions,
277278
from_task=from_task,
278279
from_agent=from_agent,
@@ -281,6 +282,7 @@ def call(
281282

282283
return self._handle_response(
283284
params=params,
285+
formatted_messages=formatted_messages,
284286
available_functions=available_functions,
285287
from_task=from_task,
286288
from_agent=from_agent,
@@ -335,6 +337,9 @@ async def acall(
335337

336338
formatted_messages = self._format_messages(messages)
337339

340+
if not self._invoke_before_llm_call_hooks(formatted_messages, from_agent):
341+
raise ValueError("LLM call blocked by before_llm_call hook")
342+
338343
params = self._prepare_responses_params(
339344
messages=formatted_messages,
340345
tools=tools,
@@ -344,6 +349,7 @@ async def acall(
344349
if self.stream:
345350
return await self._ahandle_streaming_response(
346351
params=params,
352+
formatted_messages=formatted_messages,
347353
available_functions=available_functions,
348354
from_task=from_task,
349355
from_agent=from_agent,
@@ -352,6 +358,7 @@ async def acall(
352358

353359
return await self._ahandle_response(
354360
params=params,
361+
formatted_messages=formatted_messages,
355362
available_functions=available_functions,
356363
from_task=from_task,
357364
from_agent=from_agent,
@@ -501,6 +508,7 @@ def _convert_tools_for_responses(
501508
def _handle_response(
502509
self,
503510
params: dict[str, Any],
511+
formatted_messages: list[LLMMessage],
504512
available_functions: dict[str, Any] | None = None,
505513
from_task: Task | None = None,
506514
from_agent: Agent | None = None,
@@ -540,7 +548,7 @@ def _handle_response(
540548
function_args = json.loads(output_item.arguments)
541549
except json.JSONDecodeError as e:
542550
logging.error(f"Failed to parse tool arguments: {e}")
543-
function_args = {}
551+
continue
544552

545553
result = self._handle_tool_execution(
546554
function_name=function_name,
@@ -580,14 +588,15 @@ def _handle_response(
580588
)
581589

582590
content = self._invoke_after_llm_call_hooks(
583-
params.get("messages", []), content, from_agent
591+
formatted_messages, content, from_agent
584592
)
585593

586594
return content
587595

588596
async def _ahandle_response(
589597
self,
590598
params: dict[str, Any],
599+
formatted_messages: list[LLMMessage],
591600
available_functions: dict[str, Any] | None = None,
592601
from_task: Task | None = None,
593602
from_agent: Agent | None = None,
@@ -627,7 +636,7 @@ async def _ahandle_response(
627636
function_args = json.loads(output_item.arguments)
628637
except json.JSONDecodeError as e:
629638
logging.error(f"Failed to parse tool arguments: {e}")
630-
function_args = {}
639+
continue
631640

632641
result = self._handle_tool_execution(
633642
function_name=function_name,
@@ -666,11 +675,14 @@ async def _ahandle_response(
666675
from_agent=from_agent,
667676
)
668677

669-
return content
678+
return self._invoke_after_llm_call_hooks(
679+
formatted_messages, content, from_agent
680+
)
670681

671682
def _handle_streaming_response(
672683
self,
673684
params: dict[str, Any],
685+
formatted_messages: list[LLMMessage],
674686
available_functions: dict[str, Any] | None = None,
675687
from_task: Task | None = None,
676688
from_agent: Agent | None = None,
@@ -829,12 +841,13 @@ def _handle_streaming_response(
829841
)
830842

831843
return self._invoke_after_llm_call_hooks(
832-
params.get("messages", []), full_response, from_agent
844+
formatted_messages, full_response, from_agent
833845
)
834846

835847
async def _ahandle_streaming_response(
836848
self,
837849
params: dict[str, Any],
850+
formatted_messages: list[LLMMessage],
838851
available_functions: dict[str, Any] | None = None,
839852
from_task: Task | None = None,
840853
from_agent: Agent | None = None,
@@ -992,7 +1005,9 @@ async def _ahandle_streaming_response(
9921005
from_agent=from_agent,
9931006
)
9941007

995-
return full_response
1008+
return self._invoke_after_llm_call_hooks(
1009+
formatted_messages, full_response, from_agent
1010+
)
9961011

9971012
def supports_function_calling(self) -> bool:
9981013
"""Check if the model supports function calling."""

0 commit comments

Comments
 (0)