From 258c51899458d52976343e160002a58992642693 Mon Sep 17 00:00:00 2001 From: Laurent Sorber Date: Thu, 8 Aug 2024 08:49:23 +0000 Subject: [PATCH] fix: only print 'cache saved' in verbose mode --- llama_cpp/llama.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/llama_cpp/llama.py b/llama_cpp/llama.py index 49608908b..72829f825 100644 --- a/llama_cpp/llama.py +++ b/llama_cpp/llama.py @@ -1523,7 +1523,8 @@ def logit_bias_processor( if self.verbose: print("Llama._create_completion: cache save", file=sys.stderr) self.cache[prompt_tokens + completion_tokens] = self.save_state() - print("Llama._create_completion: cache saved", file=sys.stderr) + if self.verbose: + print("Llama._create_completion: cache saved", file=sys.stderr) return if self.cache: