-
Notifications
You must be signed in to change notification settings - Fork 5.4k
disable openai agents tracing by default #18562
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Closed
Closed
Changes from all commits
Commits
Show all changes
3 commits
Select commit
Hold shift + click to select a range
File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
There are no files selected for viewing
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -11,7 +11,6 @@ | |
| pytest.skip("OpenAI SDK is not installed. Skipping tests.", allow_module_level=True) | ||
|
|
||
| from agents import Agent, Runner, function_tool, set_default_openai_client, trace | ||
| from agents.tracing import set_trace_processors | ||
| from openai.types.responses.function_tool import FunctionTool | ||
| from openai.types.responses.response import Response | ||
| from openai.types.responses.response_output_item import ( | ||
|
|
@@ -40,12 +39,6 @@ | |
| set_default_openai_client(async_client) | ||
|
|
||
|
|
||
| @pytest.fixture(autouse=True) | ||
|
Collaborator
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. deleting since this will be the case by default now -- tests should continue passing |
||
| def disable_default_tracing(): | ||
| # Disable default OpenAI tracer | ||
| set_trace_processors([]) | ||
|
|
||
|
|
||
| @pytest.mark.asyncio | ||
| async def test_autolog_agent(): | ||
| mlflow.openai.autolog() | ||
|
|
@@ -126,6 +119,8 @@ | |
|
|
||
| assert response.final_output == "¡Hola! Estoy bien, gracias. ¿Y tú, cómo estás?" | ||
| traces = get_traces() | ||
| print([t.to_dict() for t in traces]) | ||
| assert False | ||
| assert len(traces) == 1 | ||
| trace = traces[0] | ||
| assert trace.info.status == "OK" | ||
|
|
@@ -139,7 +134,7 @@ | |
| assert spans[0].outputs == response.final_output | ||
| assert spans[1].name == "Triage Agent" | ||
| assert spans[1].parent_id == spans[0].span_id | ||
| assert spans[2].name == "Response_1" | ||
| assert spans[2].name == "Response" | ||
| assert spans[2].parent_id == spans[1].span_id | ||
| assert spans[2].inputs == [{"role": "user", "content": "Hola. ¿Como estás?"}] | ||
| assert spans[2].outputs == [ | ||
|
|
@@ -158,7 +153,7 @@ | |
| assert spans[3].parent_id == spans[1].span_id | ||
| assert spans[4].name == "Spanish Agent" | ||
| assert spans[4].parent_id == spans[0].span_id | ||
| assert spans[5].name == "Response_2" | ||
| assert spans[5].name == "Response" | ||
| assert spans[5].parent_id == spans[4].span_id | ||
|
|
||
| # Validate chat attributes | ||
|
|
@@ -389,3 +384,181 @@ | |
| await Runner.run(agent, messages) | ||
|
|
||
| assert get_traces() == [] | ||
|
|
||
|
|
||
| @pytest.mark.asyncio | ||
| async def test_autolog_agent_with_enabled_openai_agent_tracer(): | ||
| import logging | ||
|
|
||
| # Set up logging capture for the openai.agents logger | ||
| class LogCapture(logging.Handler): | ||
| def __init__(self): | ||
| super().__init__() | ||
| self.records = [] | ||
|
|
||
| def emit(self, record): | ||
| self.records.append(record) | ||
|
|
||
| log_capture = LogCapture() | ||
| openai_agents_logger = logging.getLogger("openai.agents") | ||
| openai_agents_logger.addHandler(log_capture) | ||
| openai_agents_logger.setLevel(logging.DEBUG) | ||
|
|
||
| mlflow.openai.autolog(disable_openai_agent_tracer=False) | ||
|
|
||
| # NB: We have to mock the OpenAI SDK responses to make agent works | ||
| DUMMY_RESPONSES = [ | ||
| Response( | ||
| id="123", | ||
| created_at=12345678.0, | ||
| error=None, | ||
| model="gpt-4o-mini", | ||
| object="response", | ||
| instructions="Handoff to the appropriate agent based on the language of the request.", | ||
| output=[ | ||
| ResponseFunctionToolCall( | ||
| id="123", | ||
| arguments="{}", | ||
| call_id="123", | ||
| name="transfer_to_spanish_agent", | ||
| type="function_call", | ||
| status="completed", | ||
| ) | ||
| ], | ||
| tools=[ | ||
| FunctionTool( | ||
| name="transfer_to_spanish_agent", | ||
| parameters={"type": "object", "properties": {}, "required": []}, | ||
| type="function", | ||
| description="Handoff to the Spanish_Agent agent to handle the request.", | ||
| strict=False, | ||
| ), | ||
| ], | ||
| tool_choice="auto", | ||
| temperature=1, | ||
| parallel_tool_calls=True, | ||
| ), | ||
| Response( | ||
| id="123", | ||
| created_at=12345678.0, | ||
| error=None, | ||
| model="gpt-4o-mini", | ||
| object="response", | ||
| instructions="You only speak Spanish", | ||
| output=[ | ||
| ResponseOutputMessage( | ||
| id="123", | ||
| content=[ | ||
| ResponseOutputText( | ||
| annotations=[], | ||
| text="¡Hola! Estoy bien, gracias. ¿Y tú, cómo estás?", | ||
| type="output_text", | ||
| ) | ||
| ], | ||
| role="assistant", | ||
| status="completed", | ||
| type="message", | ||
| ) | ||
| ], | ||
| tools=[], | ||
| tool_choice="auto", | ||
| temperature=1, | ||
| parallel_tool_calls=True, | ||
| ), | ||
| ] | ||
|
|
||
| set_dummy_client(DUMMY_RESPONSES) | ||
|
|
||
| english_agent = Agent(name="English Agent", instructions="You only speak English") | ||
| spanish_agent = Agent(name="Spanish Agent", instructions="You only speak Spanish") | ||
| triage_agent = Agent( | ||
| name="Triage Agent", | ||
| instructions="Handoff to the appropriate agent based on the language of the request.", | ||
| handoffs=[spanish_agent, english_agent], | ||
| ) | ||
|
|
||
| messages = [{"role": "user", "content": "Hola. ¿Como estás?"}] | ||
| response = await Runner.run(starting_agent=triage_agent, input=messages) | ||
|
|
||
| assert response.final_output == "¡Hola! Estoy bien, gracias. ¿Y tú, cómo estás?" | ||
| traces = get_traces() | ||
| print([t.to_dict() for t in traces]) | ||
| # assert False | ||
| assert len(traces) == 1 | ||
| trace = traces[0] | ||
| assert trace.info.status == "OK" | ||
| assert json.loads(trace.info.request_preview) == messages | ||
| assert json.loads(trace.info.response_preview) == response.final_output | ||
| spans = trace.data.spans | ||
| assert len(spans) == 6 # 1 root + 2 agent + 1 handoff + 2 response | ||
| assert spans[0].name == "AgentRunner.run" | ||
| assert spans[0].span_type == SpanType.AGENT | ||
| assert spans[0].inputs == messages | ||
| assert spans[0].outputs == response.final_output | ||
| assert spans[1].name == "Triage Agent" | ||
| assert spans[1].parent_id == spans[0].span_id | ||
| assert spans[2].name == "Response" | ||
| assert spans[2].parent_id == spans[1].span_id | ||
| assert spans[2].inputs == [{"role": "user", "content": "Hola. ¿Como estás?"}] | ||
| assert spans[2].outputs == [ | ||
| { | ||
| "id": "123", | ||
| "arguments": "{}", | ||
| "call_id": "123", | ||
| "name": "transfer_to_spanish_agent", | ||
| "type": "function_call", | ||
| "status": "completed", | ||
| } | ||
| ] | ||
| assert spans[2].attributes["temperature"] == 1 | ||
| assert spans[3].name == "Handoff" | ||
| assert spans[3].span_type == SpanType.CHAIN | ||
| assert spans[3].parent_id == spans[1].span_id | ||
| assert spans[4].name == "Spanish Agent" | ||
| assert spans[4].parent_id == spans[0].span_id | ||
| assert spans[5].name == "Response" | ||
| assert spans[5].parent_id == spans[4].span_id | ||
|
|
||
| # Validate chat attributes | ||
| assert spans[2].attributes[SpanAttributeKey.CHAT_TOOLS] == [ | ||
| { | ||
| "function": { | ||
| "description": "Handoff to the Spanish_Agent agent to handle the request.", | ||
| "name": "transfer_to_spanish_agent", | ||
| "parameters": { | ||
| "additionalProperties": None, | ||
| "properties": {}, | ||
| "required": [], | ||
| "type": "object", | ||
| }, | ||
| "strict": False, | ||
| }, | ||
| "type": "function", | ||
| }, | ||
| ] | ||
| assert SpanAttributeKey.CHAT_TOOLS not in spans[5].attributes | ||
|
|
||
| # Validate that the non-fatal API key error was logged | ||
| import time | ||
|
|
||
| time.sleep(5.0) # Give background thread time to log the error | ||
|
|
||
| # Check captured logs from openai.agents logger | ||
| captured_messages = [record.getMessage() for record in log_capture.records] | ||
| api_key_errors = [msg for msg in captured_messages if "Incorrect API key provided" in msg] | ||
|
|
||
| # Print debug information | ||
| print(f"DEBUG: Captured {len(captured_messages)} log messages") | ||
| print(f"DEBUG: API key errors found: {len(api_key_errors)}") | ||
| if captured_messages: | ||
| print(f"DEBUG: Sample captured messages: {captured_messages[:5]}") | ||
|
|
||
| # Clean up first before assertions to avoid interference | ||
| openai_agents_logger.removeHandler(log_capture) | ||
|
|
||
| error_msg = api_key_errors[0] | ||
| print(f"SUCCESS: Captured expected API key error: {error_msg}") | ||
| assert "401" in error_msg | ||
| assert "Incorrect API key provided: test" in error_msg | ||
| assert "invalid_api_key" in error_msg | ||
| assert "[non-fatal]" in error_msg | ||
Oops, something went wrong.
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
" OpenAI Agent SDK tracer" might be mistaken to our tracer if users don't know about the native tracer functionality.