@@ -171,16 +171,19 @@ class NavigableAutoSuggestFromHistory(AutoSuggestFromHistory):
171171 # another request.
172172 _llm_task : asyncio .Task | None = None
173173
174- # This is the instance of the LLM provider from jupyter-ai to which we forward the request
175- # to generate inline completions.
176- _llm_provider : Any | None
174+ # This is the constructor of the LLM provider from jupyter-ai
175+ # to which we forward the request to generate inline completions.
176+ _init_llm_provider : Callable | None
177+
178+ _llm_provider_instance : Any | None
177179 _llm_prefixer : Callable = lambda self , x : "wrong"
178180
179181 def __init__ (self ):
180182 super ().__init__ ()
181183 self .skip_lines = 0
182184 self ._connected_apps = []
183- self ._llm_provider = None
185+ self ._llm_provider_instance = None
186+ self ._init_llm_provider = None
184187 self ._request_number = 0
185188
186189 def reset_history_position (self , _ : Buffer ):
@@ -317,6 +320,16 @@ def _cancel_running_llm_task(self) -> None:
317320 "LLM task not cancelled, does your provider support cancellation?"
318321 )
319322
323+ @property
324+ def _llm_provider (self ):
325+ """Lazy-initialized instance of the LLM provider.
326+
327+ Do not use in the constructor, as `_init_llm_provider` can trigger slow side-effects.
328+ """
329+ if self ._llm_provider_instance is None and self ._init_llm_provider :
330+ self ._llm_provider_instance = self ._init_llm_provider ()
331+ return self ._llm_provider_instance
332+
320333 async def _trigger_llm (self , buffer ) -> None :
321334 """
322335 This will ask the current llm provider a suggestion for the current buffer.
@@ -325,14 +338,14 @@ async def _trigger_llm(self, buffer) -> None:
325338 """
326339 # we likely want to store the current cursor position, and cancel if the cursor has moved.
327340 try :
328- import jupyter_ai . completions . models as jai_models
341+ import jupyter_ai_magics
329342 except ModuleNotFoundError :
330- jai_models = None
343+ jupyter_ai_magics = None
331344 if not self ._llm_provider :
332345 warnings .warn ("No LLM provider found, cannot trigger LLM completions" )
333346 return
334- if jai_models is None :
335- warnings .warn ("LLM Completion requires `jupyter_ai ` to be installed" )
347+ if jupyter_ai_magics is None :
348+ warnings .warn ("LLM Completion requires `jupyter_ai_magics ` to be installed" )
336349
337350 self ._cancel_running_llm_task ()
338351
@@ -359,7 +372,7 @@ async def _trigger_llm_core(self, buffer: Buffer):
359372 provider to stream it's response back to us iteratively setting it as
360373 the suggestion on the current buffer.
361374
362- Unlike with JupyterAi, as we do not have multiple cell , the cell id
375+ Unlike with JupyterAi, as we do not have multiple cells , the cell id
363376 is always set to `None`.
364377
365378 We set the prefix to the current cell content, but could also insert the
0 commit comments