8000
We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent ff88fcb commit 1e42468Copy full SHA for 1e42468
llama_cpp/llama.py
@@ -222,7 +222,11 @@ def __init__(
222
) # 0x7FFFFFFF is INT32 max, will be auto set to all layers
223
self.model_params.split_mode = split_mode
224
self.model_params.main_gpu = main_gpu
225
- self.model_params.rpc_servers = rpc_servers.encode('utf-8')
+ if rpc_servers is not None:
226
+ self.model_params.rpc_servers = rpc_servers.encode('utf-8')
227
+ self._rpc_servers = rpc_servers
228
+ else:
229
+ self._rpc_servers = None
230
self.tensor_split = tensor_split
231
self._c_tensor_split = None
232
if self.tensor_split is not None:
0 commit comments