@@ -386,6 +386,13 @@ def main():
386386 failed_tests_files = []
387387
388388 has_error = False
389+ if not is_targeted_check :
390+ session_timeout = 5400
391+ else :
392+ # For targeted jobs, use a shorter session timeout to keep feedback fast.
393+ # If this timeout is exceeded but all completed tests have passed, the
394+ # targeted check will not fail solely because the session timed out.
395+ session_timeout = 1200
389396 error_info = []
390397
391398 module_repeat_cnt = 1
@@ -398,7 +405,7 @@ def main():
398405 for attempt in range (module_repeat_cnt ):
399406 log_file = f"{ temp_path } /pytest_parallel.log"
400407 test_result_parallel = Result .from_pytest_run (
401- command = f"{ ' ' .join (parallel_test_modules )} --report-log-exclude-logs-on-passed-tests -n { workers } --dist=loadfile --tb=short { repeat_option } --session-timeout=5400 " ,
408+ command = f"{ ' ' .join (parallel_test_modules )} --report-log-exclude-logs-on-passed-tests -n { workers } --dist=loadfile --tb=short { repeat_option } --session-timeout={ session_timeout } " ,
402409 cwd = "./tests/integration/" ,
403410 env = test_env ,
404411 pytest_report_file = f"{ temp_path } /pytest_parallel.jsonl" ,
@@ -416,15 +423,20 @@ def main():
416423 if test_result_parallel .files :
417424 failed_tests_files .extend (test_result_parallel .files )
418425 if test_result_parallel .is_error ():
419- has_error = True
420- error_info .append (test_result_parallel .info )
426+ if not is_targeted_check :
427+ # In targeted checks we may overload the run with many or heavy tests
428+ # (--count N is used). In this mode, a session-timeout is an expected risk
429+ # rather than an infrastructure problem, so we do not treat such errors as job-level
430+ # failures and avoid setting the error flag for targeted runs.
431+ has_error = True
432+ error_info .append (test_result_parallel .info )
421433
422434 fail_num = len ([r for r in test_results if not r .is_ok ()])
423435 if sequential_test_modules and fail_num < MAX_FAILS_BEFORE_DROP and not has_error :
424436 for attempt in range (module_repeat_cnt ):
425437 log_file = f"{ temp_path } /pytest_sequential.log"
426438 test_result_sequential = Result .from_pytest_run (
427- command = f"{ ' ' .join (sequential_test_modules )} --report-log-exclude-logs-on-passed-tests --tb=short { repeat_option } -n 1 --dist=loadfile --session-timeout=5400 " ,
439+ command = f"{ ' ' .join (sequential_test_modules )} --report-log-exclude-logs-on-passed-tests --tb=short { repeat_option } -n 1 --dist=loadfile --session-timeout={ session_timeout } " ,
428440 env = test_env ,
429441 cwd = "./tests/integration/" ,
430442 pytest_report_file = f"{ temp_path } /pytest_sequential.jsonl" ,
@@ -442,8 +454,13 @@ def main():
442454 if test_result_sequential .files :
443455 failed_tests_files .extend (test_result_sequential .files )
444456 if test_result_sequential .is_error ():
445- has_error = True
446- error_info .append (test_result_sequential .info )
457+ if not is_targeted_check :
458+ # In targeted checks we may overload the run with many or heavy tests
459+ # (--count N is used). In this mode, a session-timeout is an expected risk
460+ # rather than an infrastructure problem, so we do not treat such errors as job-level
461+ # failures and avoid setting the error flag for targeted runs.
462+ has_error = True
463+ error_info .append (test_result_sequential .info )
447464
448465 # Collect logs before re-run
449466 attached_files = []
0 commit comments