8000 We could use std::unordered_map over std::map by Fabio3rs · Pull Request #305 · ggml-org/llama.cpp · GitHub
[go: up one dir, main page]

Skip to content

We could use std::unordered_map over std::map #305

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 7 commits into from
Mar 21, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 10 additions & 8 deletions main.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@
#include <cstring>
#include <fstream>
#include <iostream>
#include <map>
#include <string>
#include <vector>

Expand Down Expand Up @@ -39,7 +38,7 @@ extern "C" __declspec(dllimport) int __stdcall SetConsoleMode(void* hConsoleHand
static const int EOS_TOKEN_ID = 2;

// determine number of model parts based on the dimension
static const std::map<int, int> LLAMA_N_PARTS = {
static const std::unordered_map<int, int> LLAMA_N_PARTS = {
{ 4096, 1 },
{ 5120, 2 },
{ 6656, 4 },
Expand Down Expand Up @@ -93,7 +92,7 @@ struct llama_model {

//
struct ggml_context * ctx;
std::map<std::string, struct ggml_tensor *> tensors;
std::unordered_map<std::string, struct ggml_tensor *> tensors;
};

// load the model's weights from a file
Expand Down Expand Up @@ -178,6 +177,7 @@ bool llama_model_load(const std::string & fname, llama_model & model, llama_voca
// load vocab
{
std::string word;
vocab.id_to_token.resize(model.hparams.n_vocab);
std::vector<char> tmp(64);

for (int i = 0; i < model.hparams.n_vocab; i++) {
Expand All @@ -197,8 +197,10 @@ bool llama_model_load(const std::string & fname, llama_model & model, llama_voca
fin.read((char *) &score, sizeof(score));

vocab.token_to_id[word] = i;
vocab.id_to_token[i] = word;
vocab.score[i] = score;

auto &tok_score = vocab.id_to_token[i];
tok_score.tok = word;
tok_score.score = score;
}
}

Expand Down Expand Up @@ -994,7 +996,7 @@ int main(int argc, char ** argv) {
fprintf(stderr, "%s: prompt: '%s'\n", __func__, params.prompt.c_str());
fprintf(stderr, "%s: number of tokens in prompt = %zu\n", __func__, embd_inp.size());
for (int i = 0; i < (int) embd_inp.size(); i++) {
fprintf(stderr, "%6d -> '%s'\n", embd_inp[i], vocab.id_to_token.at(embd_inp[i]).c_str());
fprintf(stderr, "%6d -> '%s'\n", embd_inp[i], vocab.id_to_token.at(embd_inp[i]).tok.c_str());
}
fprintf(stderr, "\n");
if (params.interactive) {
Expand Down Expand Up @@ -1120,7 +1122,7 @@ int main(int argc, char ** argv) {
// display text
if (!input_noecho) {
for (auto id : embd) {
printf("%s", vocab.id_to_token[id].c_str());
printf("%s", vocab.id_to_token[id].tok.c_str());
}
fflush(stdout);
}
Expand All @@ -1135,7 +1137,7 @@ int main(int argc, char ** argv) {
// check for reverse prompt
std::string last_output;
for (auto id : last_n_tokens) {
last_output += vocab.id_to_token[id];
last_output += vocab.id_to_token[id].tok;
}

// Check if each of the reverse prompts appears at the end of the output.
Expand Down
8000 8 changes: 5 additions & 3 deletions quantize.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@
#include <cstdio>
#include <cstring>
#include <fstream>
#include <map>
#include <string>
#include <vector>
#include <regex>
Expand Down Expand Up @@ -130,6 +129,7 @@ bool llama_model_quantize(const std::string & fname_inp, const std::string & fna
}

std::string word;
vocab.id_to_token.resize(n_vocab);
for (int i = 0; i < n_vocab; i++) {
uint32_t len;
finp.read ((char *) &len, sizeof(len));
Expand All @@ -144,8 +144,10 @@ bool llama_model_quantize(const std::string & fname_inp, const std::string & fna
fout.write((char *) &score, sizeof(score));

vocab.token_to_id[word] = i;
vocab.id_to_token[i] = word;
vocab.score[i] = score;

auto &tok_score = vocab.id_to_token[i];
tok_score.tok = word;
tok_score.score = score;
}
}

Expand Down
20 changes: 12 additions & 8 deletions utils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -155,8 +155,8 @@ void replace(std::string & str, const std::string & needle, const std::string &
}
}

std::map<std::string, int32_t> json_parse(const std::string & fname) {
std::map<std::string, int32_t> result;
std::unordered_map<std::string, int32_t> json_parse(const std::string & fname) {
std::unordered_map<std::string, int32_t> result;

// read file into string
std::string json;
Expand Down Expand Up @@ -360,16 +360,16 @@ struct llama_tokenizer {
return;
}

auto score = vocab_.score.find((*token).second);

if (score == vocab_.score.end()) {
if (static_cast<size_t>((*token).second) >= vocab_.id_to_token.size()) {
return;
}

const auto &tok_score = vocab_.id_to_token[(*token).second];

llama_sp_bigram bigram;
bigram.left = left;
bigram.right = right;
bigram.score = (*score).second;
bigram.score = tok_score.score;
bigram.size = text.size();
work_queue_.push(bigram);
}
Expand All @@ -393,6 +393,8 @@ bool llama_vocab_load(const std::string & fname, llama_vocab & vocab) {
std::string word;
std::vector<char> tmp(64);

vocab.id_to_token.resize(n_vocab);

for (int i = 0; i < n_vocab; i++) {
uint32_t len;
fin.read((char *) &len, sizeof(len));
Expand All @@ -410,8 +412,10 @@ bool llama_vocab_load(const std::string & fname, llama_vocab & vocab) {
fin.read((char *) &score, sizeof(score));

vocab.token_to_id[word] = i;
vocab.id_to_token[i] = word;
vocab.score[i] = score;

auto &tok_score = vocab.id_to_token[i];
tok_score.tok = word;
tok_score.score = score;
}

return true;
Expand Down
14 changes: 9 additions & 5 deletions utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
#pragma once

#include <string>
#include <map>
#include <unordered_map>
#include <vector>
#include <random>
#include <thread>
Expand Down Expand Up @@ -65,15 +65,19 @@ struct llama_vocab {
using id = int32_t;
using token = std::string;

std::map<token, id> token_to_id;
std::map<id, token> id_to_token;
std::map<id, float> score;
struct token_score {
token tok;
float score;
};

std::unordered_map<token, id> token_to_id;
std::vector<token_score> id_to_token;
};

void replace(std::string & str, const std::string & needle, const std::string & replacement);

// poor-man's JSON parsing
std::map<std::string, int32_t> json_parse(const std::string & fname);
std::unordered_map<std::string, int32_t> json_parse(const std::string & fname);

// TODO: temporary until #77 is merged, need this now for some tokenizer tests
bool llama_vocab_load(const std::string & fname, llama_vocab & vocab);
Expand Down
0