8000 ggml-backend: backend-agnostic tensor parallelism by JohannesGaessler · Pull Request #13776 · ggml-org/llama.cpp · GitHub
[go: up one dir, main page]

Skip to content

ggml-backend: backend-agnostic tensor parallelism #13776

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
65 commits
Select commit Hold shift + click to select a range
0c90859
WIP
JohannesGaessler May 17, 2025
838f577
WIP
JohannesGaessler May 17, 2025
aedc3f7
WIP
JohannesGaessler May 17, 2025
99bb015
try fix
JohannesGaessler May 17, 2025
06d7a88
WIP
JohannesGaessler May 17, 2025
0a69555
WIP
JohannesGaessler May 17, 2025
7563db8
WIP
JohannesGaessler May 17, 2025
363e237
WIP
JohannesGaessler May 19, 2025
47e6d24
fix
JohannesGaessler May 20, 2025
751e488
WIP
JohannesGaessler May 20, 2025
3f8f323
WIP
JohannesGaessler May 20, 2025
316ef4e
WIP
JohannesGaessler May 20, 2025
47b228f
try fix
JohannesGaessler May 20, 2025
cf4d0b6
try fix
JohannesGaessler May 20, 2025
bb48a90
try fix
JohannesGaessler May 20, 2025
016405b
WIP
JohannesGaessler May 21, 2025
7c17ff1
WIP
JohannesGaessler May 21, 2025
deda9c2
WIP
JohannesGaessler May 22, 2025
3c1291f
WIP
JohannesGaessler May 22, 2025
16d29fe
WIP
JohannesGaessler May 22, 2025
7468e9d
WIP
JohannesGaessler May 22, 2025
119657a
WIP
JohannesGaessler May 22, 2025
50d2c5e
WIP
JohannesGaessler May 22, 2025
fe2747e
try fix
JohannesGaessler May 22, 2025
6ddf206
try fix
JohannesGaessler May 22, 2025
996d263
WIP
JohannesGaessler May 22, 2025
3a432ab
WIP
JohannesGaessler May 22, 2025
9c6550e
WIP
JohannesGaessler May 23, 2025
2da2cc3
WIP
JohannesGaessler May 23, 2025
67f02bf
WIP
JohannesGaessler May 23, 2025
2e282d5
WIP
JohannesGaessler May 23, 2025
8860122
WIP
JohannesGaessler May 23, 2025
3d96528
WIP
JohannesGaessler May 23, 2025
2d2ef89
WIP
JohannesGaessler May 23, 2025
6b836c8
WIP
JohannesGaessler May 23, 2025
f5a5155
WIP
JohannesGaessler May 23, 2025
cc91ca1
WIP
JohannesGaessler May 23, 2025
6ee4d0e
WIP
JohannesGaessler May 23, 2025
935d652
WIP
JohannesGaessler May 23, 2025
4dacb2f
WIP
JohannesGaessler May 23, 2025
7b7f399
WIP
JohannesGaessler May 23, 2025
aeda7e0
WIP
JohannesGaessler May 23, 2025
95f1caf
WIP
JohannesGaessler May 23, 2025
1f648ba
WIP
JohannesGaessler May 23, 2025
e18d1ef
WIP
JohannesGaessler May 23, 2025
66c8eec
WIP
JohannesGaessler May 23, 2025
206ab58
WIP
JohannesGaessler May 23, 2025
ae1617c
WIP
JohannesGaessler May 24, 2025
f617bbb
WIP
JohannesGaessler May 24, 2025
528dd51
WIP
JohannesGaessler May 24, 2025
4006293
WIP
JohannesGaessler May 24, 2025
943456b
WIP
JohannesGaessler May 24, 2025
25c25ea
WIP
JohannesGaessler May 24, 2025
739d902
WIP
JohannesGaessler May 24, 2025
26807a9
WIP
JohannesGaessler May 24, 2025
1c9dcde
WIP
JohannesGaessler May 24, 2025
3c21fdd
WIP
JohannesGaessler May 24, 2025
9719003
WIP
JohannesGaessler May 24, 2025
1c37a20
WIP
JohannesGaessler May 24, 2025
f6dd08e
WIP
JohannesGaessler May 24, 2025
02e4af1
WIP
JohannesGaessler May 24, 2025
07ca4b8
WIP
JohannesGaessler May 24, 2025
c0358bd
WIP
JohannesGaessler May 24, 2025
ea3cab5
WIP
JohannesGaessler May 24, 2025
027d97e
WIP
JohannesGaessler May 25, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Next Next commit
WIP
  • Loading branch information
JohannesGaessler committed May 17, 2025
commit 0c908590fb49f69a486564f3d188b2dfd0f2b3b2
1 change: 1 addition & 0 deletions ggml/include/ggml-backend.h
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@ extern "C" {
GGML_API size_t ggml_backend_buft_get_alloc_size(ggml_backend_buffer_type_t buft, const struct ggml_tensor * tensor);
GGML_API bool ggml_backend_buft_is_host (ggml_backend_buffer_type_t buft);
GGML_API ggml_backend_dev_t ggml_backend_buft_get_device (ggml_backend_buffer_type_t buft);
GGML_API bool ggml_backend_buft_is_split (ggml_backend_buffer_type_t buft);

//
// Backend buffer
Expand Down
1 change: 1 addition & 0 deletions ggml/src/ggml-backend-impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ extern "C" {
size_t (*get_alloc_size)(ggml_backend_buffer_type_t buft, const struct ggml_tensor * tensor);
// (optional) check if tensor data is in host memory and uses standard ggml tensor layout (defaults to false)
bool (*is_host) (ggml_backend_buffer_type_t buft);
bool (*is_split) (ggml_backend_buffer_type_t buft);
};

struct ggml_backend_buffer_type {
Expand Down
6 changes: 6 additions & 0 deletions ggml/src/ggml-backend.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,10 @@ ggml_backend_dev_t ggml_backend_buft_get_device(ggml_backend_buffer_type_t buft)
return buft->device;
}

bool ggml_backend_buft_is_split(ggml_backend_buffer_type_t buft) {
return buft->iface.is_split && buft->iface.is_split(buft);
}

// backend buffer

ggml_backend_buffer_t ggml_backend_buffer_init(
Expand Down Expand Up @@ -1971,6 +1975,7 @@ ggml_backend_buffer_type_t ggml_backend_cpu_buffer_type(void) {
/* .get_max_size = */ NULL, // defaults to SIZE_MAX
/* .get_alloc_size = */ NULL, // defaults to ggml_nbytes
/* .is_host = */ ggml_backend_cpu_buffer_type_is_host,
/* .is_split = */ NULL,
},
/* .device = */ NULL, // FIXME ggml_backend_reg_dev_get(ggml_backend_cpu_reg(), 0),
/* .context = */ NULL,
Expand All @@ -1994,6 +1999,7 @@ static ggml_backend_buffer_type_t ggml_backend_cpu_buffer_from_ptr_type(void) {
/* .get_max_size = */ NULL, // defaults to SIZE_MAX
/* .get_alloc_size = */ NULL, // defaults to ggml_nbytes
/* .is_host = */ ggml_backend_cpu_buffer_type_is_host,
/* .is_split = */ NULL,
},
/* .device = */ NULL, // FIXME ggml_backend_reg_dev_get(ggml_backend_cpu_reg(), 0),
/* .context = */ NULL,
Expand Down
5 changes: 5 additions & 0 deletions ggml/src/ggml-cuda/ggml-cuda.cu
Original file line number Diff line number Diff line change
Expand Up @@ -689,13 +689,16 @@ static size_t ggml_backend_cuda_buffer_type_get_alloc_size(ggml_backend_buffer_t
GGML_UNUSED(buft);
}

static bool ggml_backend_buft_is_cuda_split(ggml_backend_buffer_type_t buft);

static const ggml_backend_buffer_type_i ggml_backend_cuda_buffer_type_interface = {
/* .get_name = */ ggml_backend_cuda_buffer_type_get_name,
/* .alloc_buffer = */ ggml_backend_cuda_buffer_type_alloc_buffer,
/* .get_alignment = */ ggml_backend_cuda_buffer_type_get_alignment,
/* .get_max_size = */ NULL, // defaults to SIZE_MAX
/* .get_alloc_size = */ ggml_backend_cuda_buffer_type_get_alloc_size,
/* .is_host = */ NULL,
/* .is_split = */ ggml_backend_buft_is_cuda_split,
};

ggml_backend_buffer_type_t ggml_backend_cuda_buffer_type(int device) {
Expand Down Expand Up @@ -1013,6 +1016,7 @@ static const ggml_backend_buffer_type_i ggml_backend_cuda_split_buffer_type_inte
/* .get_max_size = */ NULL, // defaults to SIZE_MAX
/* .get_alloc_size = */ ggml_backend_cuda_split_buffer_type_get_alloc_size,
/* .is_host = */ ggml_backend_cuda_split_buffer_type_is_host,
/* .is_split = */ ggml_backend_buft_is_cuda_split,
};

ggml_backend_buffer_type_t ggml_backend_cuda_split_buffer_type(int main_device, const float * tensor_split) {
Expand Down Expand Up @@ -1111,6 +1115,7 @@ ggml_backend_buffer_type_t ggml_backend_cuda_host_buffer_type() {
/* .get_max_size = */ NULL, // defaults to SIZE_MAX
/* .get_alloc_size = */ ggml_backend_cpu_buffer_type()->iface.get_alloc_size,
/* .is_host = */ ggml_backend_cpu_buffer_type()->iface.is_host,
/* .is_split = */ NULL,
},
/* .device = */ ggml_backend_reg_dev_get(ggml_backend_cuda_reg(), 0),
/* .context = */ nullptr,
Expand Down
0