-
Notifications
You must be signed in to change notification settings - Fork 275
[panic] too many cycle iterations in 0.0.24 (regression from 0.0.23) #3104
Copy link
Copy link
Closed
astral-sh/ruff
#24006Labels
bugSomething isn't workingSomething isn't workingfatala fatal error (panic or crash)a fatal error (panic or crash)
Milestone
Description
Summary
ty 0.0.24 panics with infer_expression_type_impl: execute: too many cycle iterations on code that passes cleanly on 0.0.23. The trigger appears to be nested while loops where a method containing concurrent.futures iteration is called from both a main loop and a polling-sleep helper.
Minimal reproducer:
import concurrent.futures
import time
from datetime import datetime, timedelta
class QueueItem:
def __init__(self, id: int, nvbug_id: int) -> None:
self.id = id
self.nvbug_id = nvbug_id
class Repository:
def claim_next_items(self, limit: int = 1) -> list[QueueItem]:
return []
def count_pending(self, threshold: int) -> int:
return 0
class Worker:
def __init__(self) -> None:
self.cache = Repository()
self.max_workers = 4
self._running = False
self._current_ids: list[int] = []
self._current_started_at: datetime | None = None
self._analyzed: int = 0
self._failed: int = 0
def _process_item(self, item: QueueItem) -> bool:
return True
def _process_queue(self) -> dict[str, int]:
analyzed = 0
failed = 0
while self._running:
items = self.cache.claim_next_items(limit=self.max_workers)
if not items:
self._current_ids = []
self._current_started_at = None
break
self._current_ids = [item.nvbug_id for item in items]
self._current_started_at = datetime.now()
if self.max_workers <= 1 or len(items) == 1:
for item in items:
if not self._running:
break
if self._process_item(item):
analyzed += 1
else:
failed += 1
else:
with concurrent.futures.ThreadPoolExecutor(max_workers=self.max_workers) as pool:
futures = {pool.submit(self._process_item, item): item for item in items}
for future in concurrent.futures.as_completed(futures):
if future.result():
analyzed += 1
else:
failed += 1
self._analyzed = analyzed
self._failed = failed
self._current_ids = []
self._current_started_at = None
return {"analyzed": analyzed, "failed": failed}
def _interruptible_sleep(self, seconds: float) -> None:
interval = 1.0
elapsed = 0.0
while self._running and elapsed < seconds:
time.sleep(min(interval, seconds - elapsed))
elapsed += interval
def _sleep_with_poll(self, total_seconds: float) -> None:
elapsed = 0.0
poll_interval = 60.0
while self._running and elapsed < total_seconds:
chunk = min(poll_interval, total_seconds - elapsed)
self._interruptible_sleep(chunk)
elapsed += chunk
if not self._running or elapsed >= total_seconds:
break
pending = self.cache.count_pending(1)
if pending > 0:
self._process_queue()
def run_loop(self) -> None:
self._running = True
interval = timedelta(minutes=15)
while self._running:
self._process_queue()
if not self._running:
break
self._sleep_with_poll(interval.total_seconds())$ ty check repro.py
error[panic]: Panicked at .../salsa-.../src/function/execute.rs:633:9
when checking `repro.py`:
`infer_expression_type_impl(Id(1825)): execute: too many cycle iterations`
The stack trace goes through loop_header_reachability → infer_expression_type_impl, suggesting the cycle is triggered by reachability analysis across the nested while-loop structure.
No pyproject.toml settings needed — default ty check reproduces it.
Note: #256 and #1098 are closed, but this appears to be a remaining variant through a different code path (loop_header_reachability).
Version
ty 0.0.24 (8762330 2026-03-19)
Passes on: ty 0.0.23
Reactions are currently unavailable
Metadata
Metadata
Assignees
Labels
bugSomething isn't workingSomething isn't workingfatala fatal error (panic or crash)a fatal error (panic or crash)