8000
We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent f30aa20 commit b501665Copy full SHA for b501665
llama_cpp/llama.py
@@ -308,6 +308,8 @@ def __init__(
308
self.tensor_split = tensor_split
309
self._p_tensor_split = None
310
if self.tensor_split is not None:
311
+ if len(self.tensor_split) > llama_cpp.LLAMA_MAX_DEVICES:
312
+ raise ValueError(f"Attempt to split tensors that exceed maximum supported devices. Current LLAMA_MAX_DEVICES={llama_cpp.LLAMA_MAX_DEVICES}")
313
# Type conversion and expand the list to the length of LLAMA_MAX_DEVICES
314
FloatArray = ctypes.c_float * llama_cpp.LLAMA_MAX_DEVICES
315
self._c_tensor_split = FloatArray(
0 commit comments