8000 Update to more sensible return signature · coderonion/llama-cpp-python@3dec778 · GitHub
[go: up one dir, main page]

Skip to content

Commit 3dec778

Browse files
committed
Update to more sensible return signature
1 parent f7ab8d5 commit 3dec778

File tree

1 file changed

+4
-7
lines changed

1 file changed

+4
-7
lines changed

llama_cpp/llama.py

Lines changed: 4 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
import uuid
33
import time
44
import multiprocessing
5-
from typing import List, Optional, Union, Generator, Sequence
5+
from typing import List, Optional, Union, Generator, Sequence, Iterator
66
from collections import deque
77

88
from . import llama_cpp
@@ -286,10 +286,7 @@ def _create_completion(
286286
repeat_penalty: float = 1.1,
287287
top_k: int = 40,
288288
stream: bool = False,
289-
) -> Union[
290-
Generator[Completion, None, None],
291-
Generator[CompletionChunk, None, None],
292-
]:
289+
) -> Union[Iterator[Completion], Iterator[CompletionChunk],]:
293290
assert self.ctx is not None
294291
completion_id = f"cmpl-{str(uuid.uuid4())}"
295292
created = int(time.time())
@@ -428,7 +425,7 @@ def create_completion(
428425
repeat_penalty: float = 1.1,
429426
top_k: int = 40,
430427
stream: bool = False,
431-
) -> Union[Completion, Generator[CompletionChunk, None, None]]:
428+
) -> Union[Completion, Iterator[CompletionChunk]]:
432429
"""Generate text from a prompt.
433430
434431
Args:
@@ -465,7 +462,7 @@ def create_completion(
465462
stream=stream,
466463
)
467464
if stream:
468-
chunks: Generator[CompletionChunk, None, None] = completion_or_chunks
465+
chunks: Iterator[CompletionChunk] = completion_or_chunks
469466
return chunks
470467
completion: Completion = next(completion_or_chunks) # type: ignore
471468
return completion

0 commit comments

Comments
 (0)
0