From 7bb0b56ad3a3c58ec518087df475f47affdd656d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sigbj=C3=B8rn=20Skj=C3=A6ret?= Date: Wed, 29 May 2024 19:22:41 +0200 Subject: [PATCH 01/14] Support SPM infill --- llama_cpp/llama.py | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/llama_cpp/llama.py b/llama_cpp/llama.py index 6d872e3e6..897c4dd0e 100644 --- a/llama_cpp/llama.py +++ b/llama_cpp/llama.py @@ -113,6 +113,7 @@ def __init__( type_k: Optional[int] = None, type_v: Optional[int] = None, # Misc + spm_infill: bool = False, verbose: bool = True, # Extra Params **kwargs, # type: ignore @@ -182,6 +183,7 @@ def __init__( verbose: Print verbose output to stderr. type_k: KV cache data type for K (default: f16) type_v: KV cache data type for V (default: f16) + spm_infill: Use Suffix/Prefix/Middle pattern for infill (instead of Prefix/Suffix/Middle) as some models prefer this. Raises: ValueError: If the model path does not exist. @@ -335,6 +337,8 @@ def __init__( self.lora_scale = lora_scale self.lora_path = lora_path + self.spm_infill = spm_infill + if not os.path.exists(model_path): raise ValueError(f"Model path does not exist: {model_path}") @@ -971,7 +975,7 @@ def _create_completion( # detokenization including a space at the beginning of the completion completion_tokens: List[int] = [] if len(prompt) > 0 else [self.token_bos()] # Add blank space to start of prompt to match OG llama tokenizer - prompt_tokens: List[int] = ( + prefix_tokens: List[int] = ( ( [prefix_token_id] if prefix_token_id >= 0 and suffix is not None @@ -991,7 +995,8 @@ def _create_completion( if isinstance(prompt, str) else prompt ) - + + ) + suffix_tokens: List[int] = ( ( ( [suffix_token_id] @@ -1005,13 +1010,13 @@ def _create_completion( if suffix_token_id >= 0 and suffix is not None else [] ) - + - ( - [middle_token_id] - if middle_token_id >= 0 and suffix is not None - else [] - ) ) + middle_tokens: List[int] = ( + [middle_token_id] + if middle_token_id >= 0 and suffix is not None + else [] + ) + prompt_tokens: List[int] = (suffix_tokens + prefix_tokens + middle_tokens) if spm_infill else (prefix_tokens + suffix_tokens + middle_tokens) text: bytes = b"" returned_tokens: int = 0 stop = ( @@ -1844,6 +1849,7 @@ def __getstate__(self): type_k=self.context_params.type_k, type_v=self.context_params.type_v, # Misc + spm_infill=self.spm_infill, verbose=self.verbose, ) From c5c056ed17732b9e172cb937c22b6068e88cd202 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sigbj=C3=B8rn=20Skj=C3=A6ret?= Date: Wed, 29 May 2024 19:30:41 +0200 Subject: [PATCH 02/14] typo-- --- llama_cpp/llama.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llama_cpp/llama.py b/llama_cpp/llama.py index 897c4dd0e..bdc5d0c93 100644 --- a/llama_cpp/llama.py +++ b/llama_cpp/llama.py @@ -1016,7 +1016,7 @@ def _create_completion( if middle_token_id >= 0 and suffix is not None else [] ) - prompt_tokens: List[int] = (suffix_tokens + prefix_tokens + middle_tokens) if spm_infill else (prefix_tokens + suffix_tokens + middle_tokens) + prompt_tokens: List[int] = (suffix_tokens + prefix_tokens + middle_tokens) if self.spm_infill else (prefix_tokens + suffix_tokens + middle_tokens) text: bytes = b"" returned_tokens: int = 0 stop = ( From 73a1e7274afacab8e4af7734ca83d9c375b7aa8d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sigbj=C3=B8rn=20Skj=C3=A6ret?= Date: Wed, 29 May 2024 19:56:02 +0200 Subject: [PATCH 03/14] one less layer of parenthesis necessary --- llama_cpp/llama.py | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/llama_cpp/llama.py b/llama_cpp/llama.py index bdc5d0c93..1f3ff8372 100644 --- a/llama_cpp/llama.py +++ b/llama_cpp/llama.py @@ -998,18 +998,16 @@ def _create_completion( ) suffix_tokens: List[int] = ( ( + [suffix_token_id] + + ( - [suffix_token_id] - + - ( - self.tokenize(suffix.encode("utf-8"), add_bos=False, special=False) - if suffix - else [] - ) + self.tokenize(suffix.encode("utf-8"), add_bos=False, special=False) + if suffix + else [] ) - if suffix_token_id >= 0 and suffix is not None - else [] ) + if suffix_token_id >= 0 and suffix is not None + else [] ) middle_tokens: List[int] = ( [middle_token_id] From 1f9abf885ba8eef197717325e38488ce5aaacd01 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sigbj=C3=B8rn=20Skj=C3=A6ret?= Date: Wed, 29 May 2024 20:26:20 +0200 Subject: [PATCH 04/14] new required internals --- llama_cpp/_internals.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/llama_cpp/_internals.py b/llama_cpp/_internals.py index b404601d3..b9606bc50 100644 --- a/llama_cpp/_internals.py +++ b/llama_cpp/_internals.py @@ -162,6 +162,14 @@ def token_eot(self) -> int: assert self.model is not None return llama_cpp.llama_token_eot(self.model) + def add_bos_token(self) -> int: + assert self.model is not None + return llama_cpp.llama_add_bos_token(self.model) + + def add_eos_token(self) -> int: + assert self.model is not None + return llama_cpp.llama_add_eos_token(self.model) + # Tokenization def tokenize(self, text: bytes, add_bos: bool, special: bool): From c70483d8f403a3853b89c118bcc985e7f51bb987 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sigbj=C3=B8rn=20Skj=C3=A6ret?= Date: Wed, 29 May 2024 20:47:18 +0200 Subject: [PATCH 05/14] manually add bos/eos if model requires it --- llama_cpp/llama.py | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/llama_cpp/llama.py b/llama_cpp/llama.py index 1f3ff8372..22e43318f 100644 --- a/llama_cpp/llama.py +++ b/llama_cpp/llama.py @@ -971,6 +971,8 @@ def _create_completion( prefix_token_id: int = self._model.token_prefix() middle_token_id: int = self._model.token_middle() suffix_token_id: int = self._model.token_suffix() + bos_tokens: List[int] = [self.token_bos()] if self._model.add_bos_token() == 1 else [] + eos_tokens: List[int] = [self.token_eos()] if self._model.add_eos_token() == 1 else [] # If prompt is empty, initialize completion with BOS token to avoid # detokenization including a space at the beginning of the completion completion_tokens: List[int] = [] if len(prompt) > 0 else [self.token_bos()] @@ -984,13 +986,9 @@ def _create_completion( + ( ( - self.tokenize(prompt.encode("utf-8"), add_bos=(prefix_token_id < 0 or suffix is None), special=(prefix_token_id < 0 or suffix is None)) + self.tokenize(prompt.encode("utf-8"), add_bos=False, special=(prefix_token_id < 0 or suffix is None)) if prompt != "" - else ( - [] - if prefix_token_id >= 0 and suffix is not None - else [self.token_bos()] - ) + else [] ) if isinstance(prompt, str) else prompt @@ -1014,7 +1012,7 @@ def _create_completion( if middle_token_id >= 0 and suffix is not None else [] ) - prompt_tokens: List[int] = (suffix_tokens + prefix_tokens + middle_tokens) if self.spm_infill else (prefix_tokens + suffix_tokens + middle_tokens) + prompt_tokens: List[int] = bos_tokens + ((suffix_tokens + prefix_tokens + middle_tokens) if self.spm_infill else (prefix_tokens + suffix_tokens + middle_tokens)) + eos_tokens text: bytes = b"" returned_tokens: int = 0 stop = ( From e54e47e63699ba6b98d47da30abd790690214080 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sigbj=C3=B8rn=20Skj=C3=A6ret?= Date: Wed, 29 May 2024 21:19:28 +0200 Subject: [PATCH 06/14] add bos even when unknown This is identical behaviour to llama.cpp I guess any model that doesn't use BOS is recent enough to have the add_bos_token metadata. --- llama_cpp/llama.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llama_cpp/llama.py b/llama_cpp/llama.py index 22e43318f..d0afc6815 100644 --- a/llama_cpp/llama.py +++ b/llama_cpp/llama.py @@ -971,7 +971,7 @@ def _create_completion( prefix_token_id: int = self._model.token_prefix() middle_token_id: int = self._model.token_middle() suffix_token_id: int = self._model.token_suffix() - bos_tokens: List[int] = [self.token_bos()] if self._model.add_bos_token() == 1 else [] + bos_tokens: List[int] = [self.token_bos()] if self._model.add_bos_token() != 0 else [] eos_tokens: List[int] = [self.token_eos()] if self._model.add_eos_token() == 1 else [] # If prompt is empty, initialize completion with BOS token to avoid # detokenization including a space at the beginning of the completion From fce73d0919912f3b6e9dcb009f4d621de05d7e98 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sigbj=C3=B8rn=20Skj=C3=A6ret?= Date: Wed, 29 May 2024 21:55:43 +0200 Subject: [PATCH 07/14] don't add bos/eos on non-infill pre-tokenized prompt --- llama_cpp/llama.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/llama_cpp/llama.py b/llama_cpp/llama.py index d0afc6815..eaba53e2f 100644 --- a/llama_cpp/llama.py +++ b/llama_cpp/llama.py @@ -971,8 +971,8 @@ def _create_completion( prefix_token_id: int = self._model.token_prefix() middle_token_id: int = self._model.token_middle() suffix_token_id: int = self._model.token_suffix() - bos_tokens: List[int] = [self.token_bos()] if self._model.add_bos_token() != 0 else [] - eos_tokens: List[int] = [self.token_eos()] if self._model.add_eos_token() == 1 else [] + bos_tokens: List[int] = [self.token_bos()] if not (isinstance(prompt, list) and suffix is None) and self._model.add_bos_token() != 0 else [] + eos_tokens: List[int] = [self.token_eos()] if not (isinstance(prompt, list) and suffix is None) and self._model.add_eos_token() == 1 else [] # If prompt is empty, initialize completion with BOS token to avoid # detokenization including a space at the beginning of the completion completion_tokens: List[int] = [] if len(prompt) > 0 else [self.token_bos()] From b062762b2ec34290cd13a6ca5f0e11a88d4d7178 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sigbj=C3=B8rn=20Skj=C3=A6ret?= Date: Wed, 29 May 2024 23:32:43 +0200 Subject: [PATCH 08/14] add tokenizer hack to remove leading space in suffix --- llama_cpp/llama.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/llama_cpp/llama.py b/llama_cpp/llama.py index eaba53e2f..8182ed88e 100644 --- a/llama_cpp/llama.py +++ b/llama_cpp/llama.py @@ -971,8 +971,16 @@ def _create_completion( prefix_token_id: int = self._model.token_prefix() middle_token_id: int = self._model.token_middle() suffix_token_id: int = self._model.token_suffix() + add_space_prefix: bool = self.metadata.get("tokenizer.ggml.add_space_prefix", True) bos_tokens: List[int] = [self.token_bos()] if not (isinstance(prompt, list) and suffix is None) and self._model.add_bos_token() != 0 else [] eos_tokens: List[int] = [self.token_eos()] if not (isinstance(prompt, list) and suffix is None) and self._model.add_eos_token() == 1 else [] + + suffix_space_prefix: int = 0 + # Tokenizer hack to remove leading space + if add_space_prefix and suffix_token_id >= 0 and suffix: + suffix = "☺" + suffix + suffix_space_prefix = 2 + # If prompt is empty, initialize completion with BOS token to avoid # detokenization including a space at the beginning of the completion completion_tokens: List[int] = [] if len(prompt) > 0 else [self.token_bos()] @@ -999,7 +1007,7 @@ def _create_completion( [suffix_token_id] + ( - self.tokenize(suffix.encode("utf-8"), add_bos=False, special=False) + self.tokenize(suffix.encode("utf-8"), add_bos=False, special=False)[suffix_space_prefix:] if suffix else [] ) From 5118dfa9456651aa03210e27b720b30dc19a19d1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sigbj=C3=B8rn=20Skj=C3=A6ret?= Date: Thu, 30 May 2024 00:40:55 +0200 Subject: [PATCH 09/14] I keep forgetting metadata are strings --- llama_cpp/llama.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llama_cpp/llama.py b/llama_cpp/llama.py index 8182ed88e..c7c631736 100644 --- a/llama_cpp/llama.py +++ b/llama_cpp/llama.py @@ -971,7 +971,7 @@ def _create_completion( prefix_token_id: int = self._model.token_prefix() middle_token_id: int = self._model.token_middle() suffix_token_id: int = self._model.token_suffix() - add_space_prefix: bool = self.metadata.get("tokenizer.ggml.add_space_prefix", True) + add_space_prefix: bool = self.metadata.get("tokenizer.ggml.add_space_prefix", "true") == "true" bos_tokens: List[int] = [self.token_bos()] if not (isinstance(prompt, list) and suffix is None) and self._model.add_bos_token() != 0 else [] eos_tokens: List[int] = [self.token_eos()] if not (isinstance(prompt, list) and suffix is None) and self._model.add_eos_token() == 1 else [] From fa97f86df79ccb2872af8f8938b6e8a79a1a29c9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sigbj=C3=B8rn=20Skj=C3=A6ret?= Date: Thu, 30 May 2024 00:59:18 +0200 Subject: [PATCH 10/14] check if bos exists --- llama_cpp/llama.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/llama_cpp/llama.py b/llama_cpp/llama.py index c7c631736..055900f34 100644 --- a/llama_cpp/llama.py +++ b/llama_cpp/llama.py @@ -968,11 +968,12 @@ def _create_completion( completion_id: str = f"cmpl-{str(uuid.uuid4())}" created: int = int(time.time()) + bos_token_id: int = self.token_bos() prefix_token_id: int = self._model.token_prefix() middle_token_id: int = self._model.token_middle() suffix_token_id: int = self._model.token_suffix() add_space_prefix: bool = self.metadata.get("tokenizer.ggml.add_space_prefix", "true") == "true" - bos_tokens: List[int] = [self.token_bos()] if not (isinstance(prompt, list) and suffix is None) and self._model.add_bos_token() != 0 else [] + bos_tokens: List[int] = [bos_token_id] if not (isinstance(prompt, list) and suffix is None) and self._model.add_bos_token() != 0 and bos_token_id >= 0 else [] eos_tokens: List[int] = [self.token_eos()] if not (isinstance(prompt, list) and suffix is None) and self._model.add_eos_token() == 1 else [] suffix_space_prefix: int = 0 @@ -983,7 +984,7 @@ def _create_completion( # If prompt is empty, initialize completion with BOS token to avoid # detokenization including a space at the beginning of the completion - completion_tokens: List[int] = [] if len(prompt) > 0 else [self.token_bos()] + completion_tokens: List[int] = [] if len(prompt) > 0 else [bos_token_id] # Add blank space to start of prompt to match OG llama tokenizer prefix_tokens: List[int] = ( ( @@ -1171,7 +1172,7 @@ def logit_bias_processor( # not sure how to handle this branch when dealing # with CJK output, so keep it unchanged for token in remaining_tokens: - if token == self.token_bos(): + if token == bos_token_id: continue token_end_position += len(self.detokenize([token], prev_tokens=prompt_tokens + completion_tokens[:returned_tokens])) # Check if stop sequence is in the token @@ -1298,7 +1299,7 @@ def logit_bias_processor( logprobs_or_none: Optional[CompletionLogprobs] = None if logprobs is not None: - if token == self.token_bos(): + if token == bos_token_id: continue token_str = self.detokenize([token]).decode( "utf-8", errors="ignore" @@ -1426,7 +1427,7 @@ def logit_bias_processor( for idx, (token, token_str, logprobs_token) in enumerate( zip(all_tokens, all_token_strs, all_logprobs) ): - if token == self.token_bos(): + if token == bos_token_id: continue text_offsets.append( text_offset From 7b806699c97e930be58861f43cf33c9cb415fe26 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sigbj=C3=B8rn=20Skj=C3=A6ret?= Date: Tue, 4 Jun 2024 20:54:01 +0200 Subject: [PATCH 11/14] add example --- .../high_level_api/high_level_api_infill.py | 33 +++++++++++++++++++ 1 file changed, 33 insertions(+) create mode 100644 examples/high_level_api/high_level_api_infill.py diff --git a/examples/high_level_api/high_level_api_infill.py b/examples/high_level_api/high_level_api_infill.py new file mode 100644 index 000000000..992dc6399 --- /dev/null +++ b/examples/high_level_api/high_level_api_infill.py @@ -0,0 +1,33 @@ +import argparse + +from llama_cpp import Llama + +parser = argparse.ArgumentParser() +parser.add_argument("-m", "--model", type=str, default="../models/7B/ggml-models.bin") +parser.add_argument("-p", "--prompt", type=str, default="def add(") +parser.add_argument("-s", "--suffix", type=str, default="\n return sum\n\n") +parser.add_argument("-i", "--spm-infill", action='store_true') +args = parser.parse_args() + +llm = Llama(model_path=args.model, n_gpu_layers=-1, spm_infill=args.spm_infill) + +output = llm.create_completion( + temperature = 0.0, + repeat_penalty = 1.0, + prompt = args.prompt, + suffix = args.suffix, +) + +# Models sometimes repeat suffix in response, attempt to filter that +response = output["choices"][0]["text"] +response_stripped = response.rstrip() +unwanted_response_suffix = args.suffix.rstrip() +unwanted_response_length = len(unwanted_response_suffix) + +filtered = False +if unwanted_response_suffix and response_stripped[-unwanted_response_length:] == unwanted_response_suffix: + response = response_stripped[:-unwanted_response_length] + filtered = True + +print(f"Fill-in-Middle completion{' (filtered)' if filtered else ''}:\n\n{args.prompt}\033[32m{response}\033[0m{args.suffix}") + From aab7d32c843810ffffed6d3f1ae06a52be36af03 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sigbj=C3=B8rn=20Skj=C3=A6ret?= Date: Wed, 5 Jun 2024 10:29:15 +0200 Subject: [PATCH 12/14] add cls/sep instead of bos/eos for WPM vocab --- llama_cpp/llama.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/llama_cpp/llama.py b/llama_cpp/llama.py index 56b2e54d1..bdcc67ebd 100644 --- a/llama_cpp/llama.py +++ b/llama_cpp/llama.py @@ -977,6 +977,8 @@ def _create_completion( completion_id: str = f"cmpl-{str(uuid.uuid4())}" created: int = int(time.time()) bos_token_id: int = self.token_bos() + cls_token_id: int = self._model.token_cls() + sep_token_id: int = self._model.token_sep() prefix_token_id: int = self._model.token_prefix() middle_token_id: int = self._model.token_middle() suffix_token_id: int = self._model.token_suffix() @@ -984,6 +986,12 @@ def _create_completion( bos_tokens: List[int] = [bos_token_id] if not (isinstance(prompt, list) and suffix is None) and self._model.add_bos_token() != 0 and bos_token_id >= 0 else [] eos_tokens: List[int] = [self.token_eos()] if not (isinstance(prompt, list) and suffix is None) and self._model.add_eos_token() == 1 else [] + if cls_token_id != -1: + bos_tokens = [cls_token_id] + + if sep_token_id != -1: + eos_tokens = [sep_token_id] + suffix_space_prefix: int = 0 # Tokenizer hack to remove leading space if add_space_prefix and suffix_token_id >= 0 and suffix: From 2d7cb7e1cf597de24f2e85d85b45fa127739fba3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sigbj=C3=B8rn=20Skj=C3=A6ret?= Date: Wed, 5 Jun 2024 10:48:59 +0200 Subject: [PATCH 13/14] simplify --- llama_cpp/llama.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/llama_cpp/llama.py b/llama_cpp/llama.py index bdcc67ebd..f15e0ef57 100644 --- a/llama_cpp/llama.py +++ b/llama_cpp/llama.py @@ -983,14 +983,14 @@ def _create_completion( middle_token_id: int = self._model.token_middle() suffix_token_id: int = self._model.token_suffix() add_space_prefix: bool = self.metadata.get("tokenizer.ggml.add_space_prefix", "true") == "true" - bos_tokens: List[int] = [bos_token_id] if not (isinstance(prompt, list) and suffix is None) and self._model.add_bos_token() != 0 and bos_token_id >= 0 else [] - eos_tokens: List[int] = [self.token_eos()] if not (isinstance(prompt, list) and suffix is None) and self._model.add_eos_token() == 1 else [] + bos_tokens: List[int] = [cls_token_id if cls_token_id != -1 else bos_token_id] + eos_tokens: List[int] = [sep_token_id if sep_token_id != -1 else self.token_eos()] - if cls_token_id != -1: - bos_tokens = [cls_token_id] + if (isinstance(prompt, list) and suffix is None) or self._model.add_bos_token() == 0 or bos_tokens[:1] == [-1]: + bos_tokens = [] - if sep_token_id != -1: - eos_tokens = [sep_token_id] + if (isinstance(prompt, list) and suffix is None) or (self._model.add_eos_token() != 1 and sep_token_id == -1): + eos_tokens = [] suffix_space_prefix: int = 0 # Tokenizer hack to remove leading space From 5a262c6d8da5d1b01be2fe69303a91ebe9ff27bf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sigbj=C3=B8rn=20Skj=C3=A6ret?= Date: Wed, 12 Jun 2024 08:48:44 +0200 Subject: [PATCH 14/14] color-code filtered suffix --- examples/high_level_api/high_level_api_infill.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/high_level_api/high_level_api_infill.py b/examples/high_level_api/high_level_api_infill.py index 992dc6399..27af6367e 100644 --- a/examples/high_level_api/high_level_api_infill.py +++ b/examples/high_level_api/high_level_api_infill.py @@ -29,5 +29,5 @@ response = response_stripped[:-unwanted_response_length] filtered = True -print(f"Fill-in-Middle completion{' (filtered)' if filtered else ''}:\n\n{args.prompt}\033[32m{response}\033[0m{args.suffix}") +print(f"Fill-in-Middle completion{' (filtered)' if filtered else ''}:\n\n{args.prompt}\033[32m{response}\033[{'33' if filtered else '0'}m{args.suffix}\033[0m")