8000
We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 0a5c551 commit 0067c1aCopy full SHA for 0067c1a
llama_cpp/server/__main__.py
@@ -30,7 +30,7 @@ class Settings(BaseSettings):
30
n_batch: int = 8
31
n_threads: int = ((os.cpu_count() or 2) // 2) or 1
32
f16_kv: bool = True
33
- use_mlock: bool = False # This causes a silent failure on platforms that don't support mlock (e.g. Windows) took forever to figure out...
+ use_mlock: bool = False # This causes a silent failure on platforms that don't support mlock (e.g. Windows) took forever to figure out...
34
embedding: bool = True
35
last_n_tokens_size: int = 64
36
0 commit comments