8000 Fix bug in embedding · hxy9243/llama-cpp-python@42bb721 · GitHub
[go: up one dir, main page]

Skip to content

Commit 42bb721

Browse files
committed
Fix bug in embedding
1 parent bca9653 commit 42bb721

File tree

1 file changed

+3
-2
lines changed

1 file changed

+3
-2
lines changed

llama_cpp/llama.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -839,9 +839,10 @@ def create_embedding(
839839
An embedding object.
840840
"""
841841
assert self.ctx is not None
842+
assert self.model is not None
842843
model_name: str = model if model is not None else self.model_path
843844

844-
if self.model_params.embedding == False:
845+
if self.context_params.embedding == False:
845846
raise RuntimeError(
846847
"Llama model must be created with embedding=True to call this method"
847848
)
@@ -863,7 +864,7 @@ def create_embedding(
863864
n_tokens = len(tokens)
864865
total_tokens += n_tokens
865866
embedding = llama_cpp.llama_get_embeddings(self.ctx)[
866-
: llama_cpp.llama_n_embd(self.ctx)
867+
: llama_cpp.llama_n_embd(self.model)
867868
]
868869

869870
data.append(

0 commit comments

Comments
 (0)
0