8000
We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
2 parents 60426b2 + ff9faaa commit 9f528f4Copy full SHA for 9f528f4
llama_cpp/llama.py
@@ -814,7 +814,7 @@ def _create_completion(
814
llama_cpp.llama_reset_timings(self.ctx)
815
816
if len(prompt_tokens) > self._n_ctx:
817
- raise ValueError(f"Requested tokens exceed context window of {self._n_ctx}")
+ raise ValueError(f"Requested tokens ({len(prompt_tokens)}) exceed context window of {self._n_ctx}")
818
819
# Truncate max_tokens if requested tokens would exceed the context window
820
max_tokens = (
0 commit comments