8000 Optimized DeepSeek V2/V3 implementation (MLA) by fairydreaming · Pull Request #11446 · ggml-org/llama.cpp · GitHub
[go: up one dir, main page]

Skip to content

Optimized DeepSeek V2/V3 implementation (MLA) #11446

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 10 commits into from
Closed
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
llama : add a second copy of c^KV cache in DeepSeek2 MLA to avoid tra…
…nsposing the cache during inference
  • Loading branch information
sszymczy committed Jan 26, 2025
commit 202f323e66809bb1df192245caddc49471660466
6 changes: 5 additions & 1 deletion src/llama-kv-cache.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ bool llama_kv_cache_init(
auto it = ctx_map.find(buft);
if (it == ctx_map.end()) {
struct ggml_init_params params = {
/*.mem_size =*/ size_t(4u*n_layer*ggml_tensor_overhead()),
/*.mem_size =*/ size_t(5u*n_layer*ggml_tensor_overhead()),
/*.mem_buffer =*/ NULL,
/*.no_alloc =*/ true,
};
Expand All @@ -74,6 +74,7 @@ bool llama_kv_cache_init(
// DeepSeek MLA
cache.kr_l.reserve(n_layer);
cache.kv_l.reserve(n_layer);
cache.kvt_l.reserve(n_layer);

for (int i = 0; i < n_layer; i++) {
const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(i) + hparams.n_embd_k_s();
Expand Down Expand Up @@ -108,10 +109,13 @@ bool llama_kv_cache_init(
LLAMA_LOG_DEBUG("%s: layer %d: n_embd_head_qk_rope = %d, kv_lora_rank = %d\n", __func__, i, n_embd_head_qk_rope, kv_lora_rank);
ggml_tensor * kr = ggml_new_tensor_1d(ctx, cache.type_kr, n_embd_head_qk_rope*kv_size);
ggml_tensor * kv = ggml_new_tensor_1d(ctx, cache.type_kv, kv_lora_rank*kv_size);
ggml_tensor * kvt = ggml_new_tensor_1d(ctx, cache.type_kv, kv_lora_rank*kv_size);
ggml_format_name(kr, "cache_kr_l%d", i);
ggml_format_name(kv, "cache_kv_l%d", i);
ggml_format_name(kvt, "cache_kvt_l%d", i);
cache.kr_l.push_back(kr);
cache.kv_l.push_back(kv);
cache.kvt_l.push_back(kvt);
}

// allocate tensors and initialize the buffers to avoid NaNs in the padding
Expand Down
1 change: 1 addition & 0 deletions src/llama-kv-cache.h
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,7 @@ struct llama_kv_cache {
// DeepSeek MLA
std::vector<struct ggml_tensor *> kr_l; // per layer
std::vector<struct ggml_tensor *> kv_l;
std::vector<struct ggml_tensor *> kvt_l;

std::vector<ggml_context_ptr> ctxs;
std::vector<ggml_backend_buffer_ptr> bufs;
Expand Down
16 changes: 13 additions & 3 deletions src/llama.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -6476,13 +6476,26 @@ struct llm_build_context {
// note: storing c^KV in the KV cache
ggml_build_forward_expand(gf, ggml_cpy(ctx0, kv_compressed, kv_cache_view));

struct ggml_tensor * kv_cache_trans_view = ggml_view_2d(ctx0, kv_self.kvt_l[il], n_tokens, kv_lora_rank, ggml_row_size(kv_self.kv_l[il]->type, kv_self.size), ggml_row_size(kv_self.kv_l[il]->type, kv_head));
cb(kv_cache_trans_view, "kv_cache_trans_view", il);

// note: storing transposed c^KV in the transposed KV cache
ggml_build_forward_expand(gf, ggml_cpy(ctx0, ggml_transpose(ctx0, kv_compressed), kv_cache_trans_view));

struct ggml_tensor * kv_cache =
ggml_view_2d(ctx0, kv_self.kv_l[il],
kv_lora_rank, n_kv,
ggml_row_size(kv_self.kv_l[il]->type, kv_lora_rank),
0);
cb(kv_cache, "kv_cache", il);

struct ggml_tensor * kv_cache_trans =
ggml_view_2d(ctx0, kv_self.kvt_l[il],
n_kv, kv_lora_rank,
ggml_row_size(kv_self.kv_l[il]->type, kv_self.size),
0);
cb(kv_cache_trans, "kv_cache_trans", il);

q_pe = ggml_cont(ctx0, q_pe); // TODO: the CUDA backend does not support non-contiguous RoPE
q_pe = ggml_rope_ext(
ctx0, q_pe, inp_pos, nullptr,
Expand Down Expand Up @@ -6552,9 +6565,6 @@ struct llm_build_context {
struct ggml_tensor * kq_perm = ggml_permute(ctx0, kq, 0, 2, 3, 1);
cb(kq_perm, "kq_soft_max_ext_perm", il);

struct ggml_tensor * kv_cache_trans = ggml_cont(ctx0, ggml_transpose(ctx0, kv_cache));
cb(kv_cache_trans, "kv_cache_trans", il);

struct ggml_tensor * kqv_compressed = ggml_mul_mat(ctx0, kv_cache_trans, kq_perm);
cb(kqv_compressed, "kqv_compressed", il);

Expand Down
0