|
8 | 8 |
|
9 | 9 | from fastapi import APIRouter, Depends, Request |
10 | 10 | from llama_stack.apis.agents.openai_responses import ( |
| 11 | + OpenAIResponseMCPApprovalRequest, |
| 12 | + OpenAIResponseMCPApprovalResponse, |
11 | 13 | OpenAIResponseObject, |
| 14 | + OpenAIResponseOutput, |
| 15 | + OpenAIResponseOutputMessageFileSearchToolCall, |
| 16 | + OpenAIResponseOutputMessageFunctionToolCall, |
| 17 | + OpenAIResponseOutputMessageMCPCall, |
| 18 | + OpenAIResponseOutputMessageMCPListTools, |
| 19 | + OpenAIResponseOutputMessageWebSearchToolCall, |
12 | 20 | ) |
13 | 21 | from llama_stack_client import AsyncLlamaStackClient |
14 | 22 |
|
|
41 | 49 | get_topic_summary_system_prompt, |
42 | 50 | ) |
43 | 51 | from utils.mcp_headers import mcp_headers_dependency |
| 52 | +from utils.query import parse_arguments_string |
44 | 53 | from utils.responses import extract_text_from_response_output_item |
45 | 54 | from utils.shields import ( |
46 | 55 | append_turn_to_conversation, |
|
73 | 82 |
|
74 | 83 |
|
75 | 84 | def _build_tool_call_summary( # pylint: disable=too-many-return-statements,too-many-branches |
76 | | - output_item: Any, |
| 85 | + output_item: OpenAIResponseOutput, |
77 | 86 | ) -> tuple[Optional[ToolCallSummary], Optional[ToolResultSummary]]: |
78 | | - """Translate applicable Responses API tool outputs into ``ToolCallSummary`` records. |
| 87 | + """Translate Responses API tool outputs into ToolCallSummary and ToolResultSummary records. |
79 | 88 |
|
80 | | - The OpenAI ``response.output`` array may contain any ``OpenAIResponseOutput`` variant: |
81 | | - ``message``, ``function_call``, ``file_search_call``, ``web_search_call``, ``mcp_call``, |
82 | | - ``mcp_list_tools``, or ``mcp_approval_request``. The OpenAI Spec supports more types |
83 | | - but as llamastack does not support them, yet they are not considered here. |
| 89 | + Processes OpenAI response output items and extracts tool call and result information. |
| 90 | +
|
| 91 | + Args: |
| 92 | + output_item: An OpenAIResponseOutput item from the response.output array |
| 93 | +
|
| 94 | + Returns: |
| 95 | + A tuple of (ToolCallSummary, ToolResultSummary) one of them possibly None |
| 96 | + if current llama stack Responses API does not provide the information. |
| 97 | +
|
| 98 | + Supported tool types: |
| 99 | + - function_call: Function tool calls with parsed arguments (no result) |
| 100 | + - file_search_call: File search operations with results |
| 101 | + - web_search_call: Web search operations (incomplete) |
| 102 | + - mcp_call: MCP calls with server labels |
| 103 | + - mcp_list_tools: MCP server tool listings |
| 104 | + - mcp_approval_request: MCP approval requests (no result) |
| 105 | + - mcp_approval_response: MCP approval responses (no call) |
84 | 106 | """ |
85 | 107 | item_type = getattr(output_item, "type", None) |
86 | 108 |
|
87 | 109 | if item_type == "function_call": |
88 | | - parsed_arguments = getattr(output_item, "arguments", "") |
89 | | - if isinstance(parsed_arguments, dict): |
90 | | - args = parsed_arguments |
91 | | - else: |
92 | | - args = {"arguments": parsed_arguments} |
93 | | - |
94 | | - call_id = getattr(output_item, "id", None) or getattr( |
95 | | - output_item, "call_id", None |
96 | | - ) |
| 110 | + item = cast(OpenAIResponseOutputMessageFunctionToolCall, output_item) |
97 | 111 | return ( |
98 | 112 | ToolCallSummary( |
99 | | - id=str(call_id), |
100 | | - name=getattr(output_item, "name", "function_call"), |
101 | | - args=args, |
| 113 | + id=item.call_id, |
| 114 | + name=item.name, |
| 115 | + args=parse_arguments_string(item.arguments), |
102 | 116 | type="function_call", |
103 | 117 | ), |
104 | | - None, |
| 118 | + None, # not supported by Responses API at all |
105 | 119 | ) |
106 | 120 |
|
107 | 121 | if item_type == "file_search_call": |
108 | | - args = { |
109 | | - "queries": list(getattr(output_item, "queries", [])), |
110 | | - "status": getattr(output_item, "status", None), |
111 | | - } |
112 | | - results = getattr(output_item, "results", None) |
113 | | - response_payload: Optional[Any] = None |
114 | | - if results is not None: |
115 | | - # Store only the essential result metadata to avoid large payloads |
| 122 | + item = cast(OpenAIResponseOutputMessageFileSearchToolCall, output_item) |
| 123 | + response_payload: Optional[dict[str, Any]] = None |
| 124 | + if item.results is not None: |
116 | 125 | response_payload = { |
117 | | - "results": [ |
118 | | - { |
119 | | - "file_id": ( |
120 | | - getattr(result, "file_id", None) |
121 | | - if not isinstance(result, dict) |
122 | | - else result.get("file_id") |
123 | | - ), |
124 | | - "filename": ( |
125 | | - getattr(result, "filename", None) |
126 | | - if not isinstance(result, dict) |
127 | | - else result.get("filename") |
128 | | - ), |
129 | | - "score": ( |
130 | | - getattr(result, "score", None) |
131 | | - if not isinstance(result, dict) |
132 | | - else result.get("score") |
133 | | - ), |
134 | | - } |
135 | | - for result in results |
136 | | - ] |
| 126 | + "results": [result.model_dump() for result in item.results] |
137 | 127 | } |
138 | 128 | return ToolCallSummary( |
139 | | - id=str(getattr(output_item, "id")), |
| 129 | + id=item.id, |
140 | 130 | name=DEFAULT_RAG_TOOL, |
141 | | - args=args, |
| 131 | + args={"queries": item.queries}, |
142 | 132 | type="file_search_call", |
143 | 133 | ), ToolResultSummary( |
144 | | - id=str(getattr(output_item, "id")), |
145 | | - status=str(getattr(output_item, "status", None)), |
146 | | - content=json.dumps(response_payload) if response_payload else None, |
| 134 | + id=item.id, |
| 135 | + status=item.status, |
| 136 | + content=json.dumps(response_payload) if response_payload else "", |
147 | 137 | type="file_search_call", |
148 | 138 | round=1, |
149 | 139 | ) |
150 | 140 |
|
| 141 | + # Incomplete OpenAI Responses API definition in LLS: action attribute not supported yet |
151 | 142 | if item_type == "web_search_call": |
152 | | - args = {"status": getattr(output_item, "status", None)} |
| 143 | + item = cast(OpenAIResponseOutputMessageWebSearchToolCall, output_item) |
153 | 144 | return ( |
154 | 145 | ToolCallSummary( |
155 | | - id=str(getattr(output_item, "id")), |
| 146 | + id=item.id, |
156 | 147 | name="web_search", |
157 | | - args=args, |
| 148 | + args={}, |
158 | 149 | type="web_search_call", |
159 | 150 | ), |
160 | | - None, |
| 151 | + ToolResultSummary( |
| 152 | + id=item.id, |
| 153 | + status=item.status, |
| 154 | + content="", |
| 155 | + type="web_search_call", |
| 156 | + round=1, |
| 157 | + ), |
161 | 158 | ) |
162 | 159 |
|
163 | 160 | if item_type == "mcp_call": |
164 | | - parsed_arguments = getattr(output_item, "arguments", "") |
165 | | - args = {"arguments": parsed_arguments} |
166 | | - server_label = getattr(output_item, "server_label", None) |
167 | | - if server_label: |
168 | | - args["server_label"] = server_label |
169 | | - error = getattr(output_item, "error", None) |
170 | | - if error: |
171 | | - args["error"] = error |
| 161 | + item = cast(OpenAIResponseOutputMessageMCPCall, output_item) |
| 162 | + args = parse_arguments_string(item.arguments) |
| 163 | + if item.server_label: |
| 164 | + args["server_label"] = item.server_label |
| 165 | + content = item.error if item.error else (item.output if item.output else "") |
172 | 166 |
|
173 | 167 | return ToolCallSummary( |
174 | | - id=str(getattr(output_item, "id")), |
175 | | - name=getattr(output_item, "name", "mcp_call"), |
| 168 | + id=item.id, |
| 169 | + name=item.name, |
176 | 170 | args=args, |
177 | 171 | type="mcp_call", |
178 | 172 | ), ToolResultSummary( |
179 | | - id=str(getattr(output_item, "id")), |
180 | | - status=str(getattr(output_item, "status", None)), |
181 | | - content=getattr(output_item, "output", ""), |
| 173 | + id=item.id, |
| 174 | + status="success" if item.error is None else "failure", |
| 175 | + content=content, |
182 | 176 | type="mcp_call", |
183 | 177 | round=1, |
184 | 178 | ) |
185 | 179 |
|
186 | 180 | if item_type == "mcp_list_tools": |
187 | | - tool_names: list[str] = [] |
188 | | - for tool in getattr(output_item, "tools", []): |
189 | | - if hasattr(tool, "name"): |
190 | | - tool_names.append(str(getattr(tool, "name"))) |
191 | | - elif isinstance(tool, dict) and tool.get("name"): |
192 | | - tool_names.append(str(tool.get("name"))) |
193 | | - args = { |
194 | | - "server_label": getattr(output_item, "server_label", None), |
195 | | - "tools": tool_names, |
| 181 | + item = cast(OpenAIResponseOutputMessageMCPListTools, output_item) |
| 182 | + tools_info = [ |
| 183 | + { |
| 184 | + "name": tool.name, |
| 185 | + "description": tool.description, |
| 186 | + "input_schema": tool.input_schema, |
| 187 | + } |
| 188 | + for tool in item.tools |
| 189 | + ] |
| 190 | + content_dict = { |
| 191 | + "server_label": item.server_label, |
| 192 | + "tools": tools_info, |
196 | 193 | } |
197 | 194 | return ( |
198 | 195 | ToolCallSummary( |
199 | | - id=str(getattr(output_item, "id")), |
| 196 | + id=item.id, |
200 | 197 | name="mcp_list_tools", |
201 | | - args=args, |
| 198 | + args={"server_label": item.server_label}, |
202 | 199 | type="mcp_list_tools", |
203 | 200 | ), |
204 | | - None, |
| 201 | + ToolResultSummary( |
| 202 | + id=item.id, |
| 203 | + status="success", |
| 204 | + content=json.dumps(content_dict), |
| 205 | + type="mcp_list_tools", |
| 206 | + round=1, |
| 207 | + ), |
205 | 208 | ) |
206 | 209 |
|
207 | 210 | if item_type == "mcp_approval_request": |
208 | | - parsed_arguments = getattr(output_item, "arguments", "") |
209 | | - args = {"arguments": parsed_arguments} |
210 | | - server_label = getattr(output_item, "server_label", None) |
211 | | - if server_label: |
212 | | - args["server_label"] = server_label |
| 211 | + item = cast(OpenAIResponseMCPApprovalRequest, output_item) |
| 212 | + args = parse_arguments_string(item.arguments) |
213 | 213 | return ( |
214 | 214 | ToolCallSummary( |
215 | | - id=str(getattr(output_item, "id")), |
216 | | - name=getattr(output_item, "name", "mcp_approval_request"), |
| 215 | + id=item.id, |
| 216 | + name=item.name, |
217 | 217 | args=args, |
218 | 218 | type="tool_call", |
219 | 219 | ), |
220 | 220 | None, |
221 | 221 | ) |
222 | 222 |
|
| 223 | + if item_type == "mcp_approval_response": |
| 224 | + item = cast(OpenAIResponseMCPApprovalResponse, output_item) |
| 225 | + content_dict = {} |
| 226 | + if item.reason: |
| 227 | + content_dict["reason"] = item.reason |
| 228 | + return ( |
| 229 | + None, |
| 230 | + ToolResultSummary( |
| 231 | + id=item.approval_request_id, |
| 232 | + status="success" if item.approve else "denied", |
| 233 | + content=json.dumps(content_dict), |
| 234 | + type="mcp_approval_response", |
| 235 | + round=1, |
| 236 | + ), |
| 237 | + ) |
| 238 | + |
223 | 239 | return None, None |
224 | 240 |
|
225 | 241 |
|
|
0 commit comments