8000 fix compile · pqnet/llama.cpp@624a683 · GitHub
[go: up one dir, main page]

Skip to content

Commit 624a683

Browse files
committed
fix compile
1 parent 116b9a1 commit 624a683

File tree

1 file changed

+1
-0
lines changed

1 file changed

+1
-0
lines changed

include/llama-cpp.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,7 @@ typedef std::unique_ptr<llama_sampler, llama_sampler_deleter> llama_sampler_ptr;
3434
typedef std::unique_ptr<llama_adapter_lora, llama_adapter_lora_deleter> llama_adapter_lora_ptr;
3535

3636
struct llama_batch_ext_ptr : std::unique_ptr<llama_batch_ext, llama_batch_ext_deleter> {
37+
llama_batch_ext_ptr() : std::unique_ptr<llama_batch_ext, llama_batch_ext_deleter>() {}
3738
llama_batch_ext_ptr(llama_batch_ext * batch) : std::unique_ptr<llama_batch_ext, llama_batch_ext_deleter>(batch) {}
3839

3940
// convenience function to create a batch from text tokens, without worrying about manually freeing it

0 commit comments

Comments
 (0)
0