Skip to content

Commit

Permalink
lookup: fibonacci hashing, fix crashes (#8548)
Browse files Browse the repository at this point in the history
  • Loading branch information
JohannesGaessler authored Jul 17, 2024
1 parent b328344 commit e02b597
Show file tree
Hide file tree
Showing 3 changed files with 11 additions and 6 deletions.
13 changes: 10 additions & 3 deletions common/ngram-cache.h
Original file line number Diff line number Diff line change
Expand Up @@ -37,11 +37,18 @@ struct llama_ngram {
}
};

struct llama_token_hash_function {
size_t operator()(const llama_token token) const {
// see https://probablydance.com/2018/06/16/fibonacci-hashing-the-optimization-that-the-world-forgot-or-a-better-alternative-to-integer-modulo/
return token * 11400714819323198485llu;
}
};

struct llama_ngram_hash_function {
size_t operator()(const llama_ngram & ngram) const {
size_t hash = 0;
for (int i = 0; i < LLAMA_NGRAM_MAX; ++i) {
hash ^= std::hash<llama_token>{}(ngram.tokens[i]);
size_t hash = llama_token_hash_function{}(ngram.tokens[0]);
for (int i = 1; i < LLAMA_NGRAM_MAX; ++i) {
hash ^= llama_token_hash_function{}(ngram.tokens[i]);
}
return hash;
}
Expand Down
3 changes: 1 addition & 2 deletions examples/lookup/lookup-stats.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,6 @@ int main(int argc, char ** argv){

// load the model
std::tie(model, ctx) = llama_init_from_gpt_params(params);
GGML_ASSERT(llama_n_vocab(model) < (1 << 16));

// tokenize the prompt
std::vector<llama_token> inp;
Expand Down Expand Up @@ -65,7 +64,7 @@ int main(int argc, char ** argv){
}

const int n_input = inp.size();
const int n_ctx = params.n_ctx;
const int n_ctx = llama_n_ctx(ctx);

int n_drafted = 0;
int n_accept = 0;
Expand Down
1 change: 0 additions & 1 deletion examples/lookup/lookup.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,6 @@ int main(int argc, char ** argv){

// load the model
std::tie(model, ctx) = llama_init_from_gpt_params(params);
GGML_ASSERT(llama_n_vocab(model) < (1 << 16));

// tokenize the prompt
std::vector<llama_token> inp;
Expand Down

0 comments on commit e02b597

Please sign in to comment.