8000 Potential bugfix for eval · furyhawk/llama-cpp-python@d9b38e3 · GitHub
[go: up one dir, main page]

Skip to content

Commit d9b38e3

Browse files
committed
Potential bugfix for eval
1 parent 52350cc commit d9b38e3

File tree

1 file changed

+2
-3
lines changed

1 file changed

+2
-3
lines changed

llama_cpp/llama.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1019,12 +1019,11 @@ def eval(self, tokens: Sequence[int]):
10191019
"""
10201020
assert self._ctx.ctx is not None
10211021
assert self._batch.batch is not None
1022-
n_ctx = self._n_ctx
1022+
self._ctx.kv_cache_seq_rm(-1, self.n_tokens, -1)
10231023
for i in range(0, len(tokens), self.n_batch):
10241024
batch = tokens[i : min(len(tokens), i + self.n_batch)]
1025-
n_past = min(n_ctx - len(batch), self.n_tokens)
1025+
n_past = self.n_tokens
10261026
n_tokens = len(batch)
1027-
self._ctx.kv_cache_seq_rm(-1, n_past, -1)
10281027
self._batch.set_batch(
10291028
batch=batch, n_past=n_past, logits_all=self.context_params.logits_all
10301029
)

0 commit comments

Comments
 (0)
0