8000
We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 9b75268 commit da539ccCopy full SHA for da539cc
llama_cpp/server/__main__.py
@@ -28,7 +28,7 @@ class Settings(BaseSettings):
28
model: str
29
n_ctx: int = 2048
30
n_batch: int = 8
31
- n_threads: int = int(os.cpu_count() / 2) or 1
+ n_threads: int = ((os.cpu_count() or 2) // 2) or 1
32
f16_kv: bool = True
33
use_mlock: bool = False # This causes a silent failure on platforms that don't support mlock (e.g. Windows) took forever to figure out...
34
embedding: bool = True
0 commit comments