8000 fix: crash when calling `llama_state_get_size` on a context without a… · robbiemu/llama.cpp@017f10b · GitHub
[go: up one dir, main page]

Skip to content

Commit 017f10b

Browse files
authored
fix: crash when calling llama_state_get_size on a context without a KV cache (ggml-org#13542)
1 parent 4696d56 commit 017f10b

File tree

1 file changed

+5
-3
lines changed

1 file changed

+5
-3
lines changed

src/llama-context.cpp

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1704,10 +1704,12 @@ size_t llama_context::state_write_data(llama_io_write_i & io) {
17041704
}
17051705
}
17061706

1707-
LLAMA_LOG_DEBUG("%s: - writing KV self\n", __func__);
17081707
llama_kv_cache * kv_self = static_cast<llama_kv_cache *>(memory.get());
1709-
1710-
kv_self->state_write(io);
1708+
1709+
if (kv_self != nullptr) {
1710+
LLAMA_LOG_DEBUG("%s: - writing KV self\n", __func__);
1711+
kv_self->state_write(io);
1712+
}
17111713

17121714
return io.n_bytes();
17131715
}

0 commit comments

Comments
 (0)
0