8000 Add type annotations · coderonion/llama-cpp-python@3cd67c7 · GitHub
[go: up one dir, main page]

Skip to content

Commit 3cd67c7

Browse files
committed
Add type annotations
1 parent d7de0e8 commit 3cd67c7

File tree

1 file changed

+7
-5
lines changed

1 file changed

+7
-5
lines changed

llama_cpp/llama.py

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -332,13 +332,15 @@ def _create_completion(
332332
stream: bool = False,
333333
) -> Union[Iterator[Completion], Iterator[CompletionChunk]]:
334334
assert self.ctx is not None
335-
completion_id = f"cmpl-{str(uuid.uuid4())}"
336-
created = int(time.time())
335+
completion_id: str = f"cmpl-{str(uuid.uuid4())}"
336+
created: int = int(time.time())
337337
completion_tokens: List[llama_cpp.llama_token] = []
338338
# Add blank space to start of prompt to match OG llama tokenizer
339-
prompt_tokens = self.tokenize(b" " + prompt.encode("utf-8"))
340-
text = b""
341-
returned_characters = 0
339+
prompt_tokens: List[llama_cpp.llama_token] = self.tokenize(
340+
b" " + prompt.encode("utf-8")
341+
)
342+
text: bytes = b""
343+
returned_characters: int = 0
342344
stop = stop if stop is not None else []
343345

344346
if self.verbose:

0 commit comments

Comments
 (0)
0