Thanks to visit codestin.com
Credit goes to github.com

Skip to content

Commit 00bb6f3

Browse files
committed
Add request number and logging (do not use print), fix typos
Add test for LLM completion Add `@pytest.mark.asyncio` decorators
1 parent 801c9ad commit 00bb6f3

4 files changed

Lines changed: 124 additions & 6 deletions

File tree

IPython/terminal/shortcuts/auto_suggest.py

Lines changed: 11 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -185,6 +185,7 @@ def __init__(self):
185185
self.skip_lines = 0
186186
self._connected_apps = []
187187
self._llm_provider = None
188+
self._request_number = 0
188189

189190
def reset_history_position(self, _: Buffer) -> None:
190191
self.skip_lines = 0
@@ -350,7 +351,7 @@ async def error_catcher(buffer):
350351
try:
351352
await self._trigger_llm_core(buffer)
352353
except Exception as e:
353-
get_ipython().log.error("error")
354+
get_ipython().log.error(f"error {e}")
354355
raise
355356

356357
# here we need a cancellable task so we can't just await the error catched
@@ -365,9 +366,8 @@ async def _trigger_llm_core(self, buffer: Buffer):
365366
provider to stream it's response back to us iteratively setting it as
366367
the suggestion on the current buffer.
367368
368-
Unlike with JupyterAi, as we do not have multiple cell, the cell number
369-
is always set to `0`, note that we _could_ set it to a new number each
370-
time and ignore threply from past numbers.
369+
Unlike with JupyterAi, as we do not have multiple cell, the cell id
370+
is always set to `None`.
371371
372372
We set the prefix to the current cell content, but could also inset the
373373
rest of the history or even just the non-fail history.
@@ -389,10 +389,12 @@ async def _trigger_llm_core(self, buffer: Buffer):
389389

390390
hm = buffer.history.shell.history_manager
391391
prefix = self._llm_prefixer(hm)
392-
print(prefix)
392+
get_ipython().log.debug(f"prefix: {prefix}")
393393

394+
self._request_number += 1
395+
request_number = self._request_number
394396
request = jai_models.InlineCompletionRequest(
395-
number=0,
397+
number=request_number,
396398
prefix=prefix + buffer.document.text,
397399
suffix="",
398400
mime="text/x-python",
@@ -405,6 +407,9 @@ async def _trigger_llm_core(self, buffer: Buffer):
405407
async for reply_and_chunks in self._llm_provider.stream_inline_completions(
406408
request
407409
):
410+
if self._request_number != request_number:
411+
# If a new suggestion was requested, skip processing this one.
412+
return
408413
if isinstance(reply_and_chunks, jai_models.InlineCompletionReply):
409414
if len(reply_and_chunks.list.items) > 1:
410415
raise ValueError(

pyproject.toml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -77,6 +77,7 @@ test = [
7777
test_extra = [
7878
"ipython[test]",
7979
"curio",
80+
"jupyter_ai",
8081
"matplotlib!=3.2.0",
8182
"nbformat",
8283
"numpy>=1.23",

tests/fake_llm.py

Lines changed: 88 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,88 @@
1+
import asyncio
2+
3+
try:
4+
from jupyter_ai_magics import BaseProvider
5+
from langchain_community.llms import FakeListLLM
6+
except ImportError:
7+
8+
class BaseProvider:
9+
pass
10+
11+
class FakeListLLM:
12+
pass
13+
14+
15+
FIBONACCI = """\
16+
def fib(n):
17+
if n < 2: return n
18+
return fib(n - 1) + fib(n - 2)
19+
"""
20+
21+
22+
class FibonacciCompletionProvider(BaseProvider, FakeListLLM): # type: ignore[misc, valid-type]
23+
24+
id = "my_provider"
25+
name = "My Provider"
26+
model_id_key = "model"
27+
models = ["model_a"]
28+
29+
def __init__(self, **kwargs):
30+
kwargs["responses"] = ["This fake response will not be used for completion"]
31+
kwargs["model_id"] = "model_a"
32+
super().__init__(**kwargs)
33+
34+
async def generate_inline_completions(self, request):
35+
raise ValueError("IPython only supports streaming models.")
36+
37+
async def stream_inline_completions(self, request):
38+
from jupyter_ai.completions.models import (
39+
InlineCompletionList,
40+
InlineCompletionReply,
41+
InlineCompletionStreamChunk,
42+
)
43+
44+
assert request.number > 0
45+
token = f"t{request.number}s0"
46+
last_line = request.prefix.rstrip("\n").splitlines()[-1]
47+
48+
if not FIBONACCI.startswith(last_line):
49+
return
50+
51+
yield InlineCompletionReply(
52+
list=InlineCompletionList(
53+
items=[
54+
{"insertText": "", "isIncomplete": True, "token": token},
55+
]
56+
),
57+
reply_to=request.number,
58+
)
59+
60+
async for reply in self._stream(
61+
FIBONACCI[len(last_line) :],
62+
request.number,
63+
token,
64+
):
65+
yield reply
66+
67+
async def _stream(self, sentence, request_number, token, start_with=""):
68+
suggestion = start_with
69+
70+
for fragment in sentence.split(" "):
71+
await asyncio.sleep(0.05)
72+
if suggestion:
73+
suggestion += " "
74+
suggestion += fragment
75+
yield InlineCompletionStreamChunk(
76+
type="stream",
77+
response={"insertText": suggestion, "token": token},
78+
reply_to=request_number,
79+
done=False,
80+
)
81+
82+
# finally, send a message confirming that we are done
83+
yield InlineCompletionStreamChunk(
84+
type="stream",
85+
response={"insertText": suggestion, "token": token},
86+
reply_to=request_number,
87+
done=True,
88+
)

tests/test_shortcuts.py

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,12 +7,14 @@
77
accept_word,
88
accept_and_keep_cursor,
99
discard,
10+
llm_autosuggestion,
1011
NavigableAutoSuggestFromHistory,
1112
swap_autosuggestion_up,
1213
swap_autosuggestion_down,
1314
)
1415
from IPython.terminal.shortcuts.auto_match import skip_over
1516
from IPython.terminal.shortcuts import create_ipython_shortcuts, reset_search_buffer
17+
from IPython.testing import decorators as dec
1618

1719
from prompt_toolkit.history import InMemoryHistory
1820
from prompt_toolkit.buffer import Buffer
@@ -34,6 +36,26 @@ def make_event(text, cursor, suggestion):
3436
return event
3537

3638

39+
try:
40+
from .fake_llm import FIBONACCI
41+
except ImportError:
42+
FIBONACCI = None
43+
44+
45+
@dec.skip_without("jupyter_ai")
46+
@pytest.mark.asyncio
47+
async def test_llm_autosuggestion():
48+
provider = NavigableAutoSuggestFromHistory()
49+
ip = get_ipython()
50+
ip.auto_suggest = provider
51+
ip.llm_provider_class = "tests.fake_llm.FibonacciCompletionProvider"
52+
text = "def fib"
53+
event = make_event(text, len(text), "")
54+
event.current_buffer.history.shell.history_manager.get_range = Mock(return_value=[])
55+
await llm_autosuggestion(event)
56+
assert event.current_buffer.suggestion.text == FIBONACCI[len(text) :]
57+
58+
3759
@pytest.mark.parametrize(
3860
"text, suggestion, expected",
3961
[
@@ -219,6 +241,7 @@ def test_other_providers():
219241
assert swap_autosuggestion_down(event) is None
220242

221243

244+
@pytest.mark.asyncio
222245
async def test_navigable_provider():
223246
provider = NavigableAutoSuggestFromHistory()
224247
history = InMemoryHistory(history_strings=["very_a", "very", "very_b", "very_c"])
@@ -271,6 +294,7 @@ def get_suggestion():
271294
assert get_suggestion().text == "_a"
272295

273296

297+
@pytest.mark.asyncio
274298
async def test_navigable_provider_multiline_entries():
275299
provider = NavigableAutoSuggestFromHistory()
276300
history = InMemoryHistory(history_strings=["very_a\nvery_b", "very_c"])

0 commit comments

Comments
 (0)