8000
We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 9e1d80f commit fd7bcc9Copy full SHA for fd7bcc9
llama_cpp/llama.py
@@ -222,7 +222,7 @@ def __init__(
222
) # 0x7FFFFFFF is INT32 max, will be auto set to all layers
223
self.model_params.split_mode = split_mode
224
self.model_params.main_gpu = main_gpu
225
- self.model_params.rpc_servers = rpc_servers
+ self.model_params.rpc_servers = rpc_servers.encode('utf-8')
226
self.tensor_split = tensor_split
227
self._c_tensor_split = None
228
if self.tensor_split is not None:
0 commit comments