8000 Fix type annotations · cyberjon/llama-cpp-python@7df6c32 · GitHub
[go: up one dir, main page]

Skip to content

Commit 7df6c32

Browse files
committed
Fix type annotations
1 parent b703aad commit 7df6c32

File tree

1 file changed

+2
-2
lines changed

1 file changed

+2
-2
lines changed

llama_cpp/llama_cpp.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1059,7 +1059,7 @@ def llama_kv_cache_view_init(
10591059

10601060
# // Free a KV cache view. (use only for debugging purposes)
10611061
# LLAMA_API void llama_kv_cache_view_free(struct llama_kv_cache_view * view);
1062-
def llama_kv_cache_view_free(view: "ctypes._Pointer[llama_kv_cache_view]"): # type: ignore
1062+
def llama_kv_cache_view_free(view: "ctypes.pointer[llama_kv_cache_view]"): # type: ignore
10631063
"""Free a KV cache view. (use only for debugging purposes)"""
10641064
return _lib.llama_kv_cache_view_free(view)
10651065

@@ -1070,7 +1070,7 @@ def llama_kv_cache_view_free(view: "ctypes._Pointer[llama_kv_cache_view]"): # ty
10701070

10711071
# // Update the KV cache view structure with the current state of the KV cache. (use only for debugging purposes)
10721072
# LLAMA_API void llama_kv_cache_view_update(const struct llama_context * ctx, struct llama_kv_cache_view * view);
1073-
def llama_kv_cache_view_update(ctx: llama_context_p, view: "ctypes._Pointer[llama_kv_cache_view]"): # type: ignore
1073+
def llama_kv_cache_view_update(ctx: llama_context_p, view: "ctypes.pointer[llama_kv_cache_view]"): # type: ignore
10741074
"""Update the KV cache view structure with the current state of the KV cache. (use only for debugging purposes)"""
10751075
return _lib.llama_kv_cache_view_update(ctx, view)
10761076

0 commit comments

Comments
 (0)
0