8000
We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
2 parents 37d5192 + fd9f294 commit ff9faaaCopy full SHA for ff9faaa
llama_cpp/llama.py
@@ -814,7 +814,7 @@ def _create_completion(
814
llama_cpp.llama_reset_timings(self.ctx)
815
816
if len(prompt_tokens) > self._n_ctx:
817
- raise ValueError(f"Requested tokens exceed context window of {self._n_ctx}")
+ raise ValueError(f"Requested tokens ({len(prompt_tokens)}) exceed context window of {self._n_ctx}")
818
819
# Truncate max_tokens if requested tokens would exceed the context window
820
max_tokens = (
0 commit comments