10000 Fix logits_all bug · fieri/llama-cpp-python@d696251 · GitHub
[go: up one dir, main page]

Skip to content

Commit d696251

Browse files
committed
Fix logits_all bug
1 parent 6ee413d commit d696251

File tree

1 file changed

+2
-2
lines changed

1 file changed

+2
-2
lines changed

llama_cpp/llama.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -439,7 +439,7 @@ def eval_tokens(self) -> Deque[int]:
439439
def eval_logits(self) -> Deque[List[float]]:
440440
return deque(
441441
self.scores[: self.n_tokens, :].tolist(),
442-
maxlen=self._n_ctx if self.model_params.logits_all else 1,
442+
maxlen=self._n_ctx if self.context_params.logits_all else 1,
443443
)
444444

445445
def tokenize(self, text: bytes, add_bos: bool = True) -> List[int]:
@@ -964,7 +964,7 @@ def _create_completion(
964964
else:
965965
stop_sequences = []
966966

967-
if logprobs is not None and self.model_params.logits_all is False:
967+
if logprobs is not None and self.context_params.logits_all is False:
968968
raise ValueError(
969969
"logprobs is not supported for models created with logits_all=False"
970970
)

0 commit comments

Comments
 (0)
0