8000 llama : fix tokenizer by goerch · Pull Request #2315 · ggml-org/llama.cpp · GitHub
[go: up one dir, main page]

Skip to content

llama : fix tokenizer #2315

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 27 commits into from
Closed
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
27 commits
Select commit Hold shift + click to select a range
ac793a2
Fix for #2023
goerch Jul 21, 2023
8c9d1e7
Fix typo
goerch Jul 21, 2023
9f055e3
Add missing include
goerch Jul 22, 2023
bf665cc
Replace VLA with std::vector
goerch Jul 22, 2023
c8ae817
Add possibly missing typename
goerch Jul 22, 2023
94a0ee1
More testing of the tokenizer
goerch Jul 22, 2023
0e74a72
Added whitespace escaping and unescaping
goerch Jul 22, 2023
e6b1a50
Fix for #2310
goerch Jul 23, 2023
dba8369
One more test case...
goerch Jul 23, 2023
b97a505
Fix C linkage for llama_token_to_str
goerch Jul 24, 2023
81fae1d
Fixing llama_token_to_str for the different sentence_piece token types
goerch Jul 24, 2023
281a4b4
Fixing tests
goerch Jul 24, 2023
a0d28b2
Remove comment
goerch Jul 24, 2023
39c9a3b
Added test cases
goerch Jul 24, 2023
fe7508c
Fix review remarks.
goerch Jul 24, 2023
8253a53
Fix test
goerch Jul 24, 2023
e68580f
Remove llama.cpp.h
goerch Jul 25, 2023
3bdf106
Merge branch 'master' into fix-2023
goerch Jul 25, 2023
b4a5461
Resolve merge conflict with grammar stuff.
goerch Jul 25, 2023
de41d5e
Fix static declarations
goerch Jul 26, 2023
30a0e4c
Fixing function ordering issue
goerch Aug 6, 2023
1b54429
Fix tokenizer regression in convert.py and improve CPP interface for …
goerch Aug 6, 2023
19e950f
Adding support for Aquila (GPT2?) tokenizer.
goerch Aug 6, 2023
bb6a58d
Simplifying an expression.
goerch Aug 6, 2023
5d52192
Remove inactive code.
goerch Aug 6, 2023
38fbb74
Merge branch 'master' into fix-2023
goerch Aug 7, 2023
f1f85de
Split BPE and SentencePiece vocabularies
goerch Aug 8, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
Added whitespace escaping and unescaping
Now we see some resemblence to the Meta-Tokenizer, I think. Only problem: how to integrate this into `llama.cpp` kernel.
  • Loading branch information
goerch committed Jul 22, 2023
commit 0e74a7222eaf13d5e2e4f2d55b89deec3166f462
7 changes: 1 addition & 6 deletions convert.py
Original file line number Diff line number Diff line change
Expand Up @@ -233,12 +233,7 @@ def sentencepiece_tokens(self) -> Iterable[Tuple[bytes, float]]:
for i in range(tokenizer.vocab_size()):
# TODO: How do we want to support is_unknown, is_control, is_byte and is_unused?
piece = tokenizer.id_to_piece(i)
text: bytes
if tokenizer.is_unknown(i) or tokenizer.is_control(i) or tokenizer.is_byte(i):
text: bytes = piece.encode("utf-8")
else:
text = piece.replace("\u2581", " ").encode("utf-8")

text: bytes = piece.encode("utf-8")
score: float = tokenizer.get_score(i)
yield text, score

Expand Down
6 changes: 3 additions & 3 deletions llama.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1832,13 +1832,13 @@ struct llama_tokenizer {
llama_tokenizer(const llama_vocab & vocab): vocab_(vocab) {}

void tokenize(const std::string & text, std::vector<llama_vocab::id> & output) {
// split string into utf8 chars
// split string into utf8 chars / token?
int index = 0;
size_t offs = 0;
while (offs < text.size()) {
llama_sp_symbol sym;
// size_t len = utf8_len(text[offs]);
size_t len = llama_trie_find(vocab_.trie, text, offs);
size_t len = utf8_len(text[offs]);
// size_t len = llama_trie_find(vocab_.trie, text, offs);
if (len == 0) {
len = utf8_len(text[offs]);
}
Expand Down
59 changes: 46 additions & 13 deletions tests/test-tokenizer-0.cpp
8000
Original file line number Diff line number Diff line change
Expand Up @@ -5,26 +5,59 @@
#include <map>
#include <vector>

std::string detokenize(llama_context * ctx, const llama_token * tokens, int count) {
static std::string escape_whitespace(const std::string& text) {
std::string result;
for (int i = 0; i < count; ++i) {
result += llama_token_to_str(ctx, tokens[i]);
if (i < count - 1) {
result += "_";
bool escaping = false;
result += char(0xe2);
result += char(0x96);
result += char(0x81);
for (size_t offs = 0; offs < text.length(); ++offs) {
if (text[offs] == ' ' || text[offs] == '\t' || text[offs] == '\n') {
if (!escaping) {
result += char(0xe2);
result += char(0x96);
result += char(0x81);
escaping = true;
}
}
else {
escaping = false;
result += text[offs];
}
}
return result;
}

static std::string unescape_whitespace(llama_context* ctx, llama_token token) {
const char* word = llama_token_to_str(ctx, token);
if (strlen(word) >= 3 &&
word[0] == char(0xe2) &&
word[1] == char(0x96) &&
word[2] == char(0x81)) {
return std::string(" ") + (word + 3);
}
return word;
}

static std::string unescape_whitespace(llama_context* ctx, const llama_token* tokens, int count) {
std::string result;
for (int i = 0; i < count; ++i) {
result += unescape_whitespace(ctx, tokens[i]);
}
return result;
}

static const std::map<std::string, std::vector<llama_token>> & k_tests()
{
static std::map<std::string, std::vector<llama_token>> _k_tests = {
{ "Hello World", { 1, 10994, 2787, }, },
{ " Hello World", { 1, 15043, 2787, }, },
{ " Hello World!", { 1, 15043, 2787, 29991, }, },
{ " this is 🦙.cpp", { 1, 445, 338, 29871, 243, 162, 169, 156, 29889, 8223, }, },
{ "w048 7tuijk dsdfhu", { 1, 29893, 29900, 29946, 29947, 29871, 29955, 9161, 13535, 18031, 2176, 6905, }, },
{ "нещо на Български", { 1, 821, 4851, 665, 1386, 29713, 1305, }, },
{ "Hello world", { 1, 15043, 3186, }, },
{ " Hello world", { 1, 29871, 15043, 3186, }, },
{ "Hello World", { 1, 15043, 2787, }, },
{ " Hello World", { 1, 29871, 15043, 2787, }, },
{" Hello World!", { 1, 29871, 15043, 2787, 29991, }, },
{" this is 🦙.cpp", { 1, 29871, 445, 338, 29871, 243, 162, 169, 156, 29889, 8223, }, },
{"w048 7tuijk dsdfhu", { 1, 281, 29900, 29946, 29947, 29871, 29955, 9161, 13535, 18031, 2176, 6905, }, },
{"нещо на Български", { 1, 1538, 4851, 665, 1386, 29713, 1305, }, },
};
return _k_tests;
};
Expand Down Expand Up @@ -77,9 +110,9 @@ int main(int argc, char **argv) {

for (const auto & test_kv : k_tests()) {
std::vector<llama_token> res(test_kv.first.size());
const int n = llama_tokenize(ctx, test_kv.first.c_str(), res.data(), int(res.size()), true);
const int n = llama_tokenize(ctx, escape_whitespace(test_kv.first.c_str()).c_str(), res.data(), int(res.size()), true);
fprintf(stderr, "%s : '%s' tokenized to '%s'\n",
__func__, test_kv.first.c_str(), detokenize(ctx, res.data(), n).c_str());
__func__, test_kv.first.c_str(), unescape_whitespace(ctx, res.data(), n).c_str());
res.resize(n);

bool correct = res.size() == test_kv.second.size();
Expand Down
57 changes: 44 additions & 13 deletions tests/test-tokenizer-1.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -8,17 +8,48 @@
#include <map>
#include <vector>

std::string detokenize(llama_context * ctx, const llama_token * tokens, int count) {
static std::string escape_whitespace(const std::string& text) {
std::string result;
for (int i = 0; i < count; ++i) {
result += llama_token_to_str(ctx, tokens[i]);
if (i < count - 1) {
result += "_";
bool escaping = false;
result += char(0xe2);
result += char(0x96);
result += char(0x81);
for (size_t offs = 0; offs < text.length(); ++offs) {
if (text[offs] == ' ' || text[offs] == '\t' || text[offs] == '\n') {
if (!escaping) {
result += char(0xe2);
result += char(0x96);
result += char(0x81);
escaping = true;
}
}
else {
escaping = false;
result += text[offs];
}
}
return result;
}

static std::string unescape_whitespace(llama_context* ctx, llama_token token) {
const char* word = llama_token_to_str(ctx, token);
if (strlen(word) >= 3 &&
word[0] == char(0xe2) &&
word[1] == char(0x96) &&
word[2] == char(0x81)) {
return std::string(" ") + (word + 3);
}
return word;
}

static std::string unescape_whitespace(llama_context* ctx, const llama_token* tokens, int count) {
std::string result;
for (int i = 0; i < count; ++i) {
result += unescape_whitespace(ctx, tokens[i]);
}
return result;
}

int main(int argc, char **argv) {
if (argc < 2) {
fprintf(stderr, "Usage: %s <vocab-file>\n", argv[0]);
Expand Down Expand Up @@ -66,22 +97,22 @@ int main(int argc, char **argv) {
}

for (int i = 0; i < n_vocab; ++i) {
const char * forward = llama_token_to_str(ctx, i);
std::vector<llama_token> tokens(strlen(forward));
auto n = llama_tokenize(ctx, forward, tokens.data(), strlen(forward), false);
std::string forward = llama_token_to_str(ctx, i);
std::vector<llama_token> tokens(forward.length());
int n = llama_tokenize(ctx, forward.c_str(), tokens.data(), forward.length(), false);
if (n == 1) {
if (i != tokens[0]) {
const char* backward = llama_token_to_str(ctx, tokens[0]);
std::string backward = unescape_whitespace(ctx, tokens[0]);
fprintf(stderr, "%s : error: token %d is string %s but tokenize() returns token %d %s\n",
__func__, i, forward, tokens[0], backward);
__func__, i, unescape_whitespace(ctx, i).c_str(), tokens[0], backward.c_str());
}
} else {
if (i <= 258) {
fprintf(stderr, "%s : info: token %d is string %s and tokenize() returns tokens %s\n",
__func__, i, forward, detokenize(ctx, tokens.data(), n).c_str());
__func__, i, unescape_whitespace(ctx, i).c_str(), unescape_whitespace(ctx, tokens.data(), n).c_str());
} else {
fprintf(stderr, "%s : error: token %d is string %s but tokenize() returns tokens %s\n",
__func__, i, forward, detokenize(ctx, tokens.data(), n).c_str());
__func__, i, unescape_whitespace(ctx, i).c_str(), unescape_whitespace(ctx, tokens.data(), n).c_str());
}
}
}
Expand All @@ -91,7 +122,7 @@ int main(int argc, char **argv) {
std::wstring wstr(1, ch);
std::string str = converter.to_bytes(wstr);
std::vector<llama_token> tokens(strlen(str.c_str()));
auto n = llama_tokenize(ctx, str.c_str(), tokens.data(), str.length(), false);
auto n = llama_tokenize(ctx, escape_whitespace(str).c_str(), tokens.data(), str.length(), false);
if (n == 1) {
fprintf(stderr, "%s : info: %s tokenized to %d \n",
__func__, str.c_str(), tokens[0]);
Expand Down
0